id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/prism/plugins/command-line/prism-command-line.js | (function() {
if (typeof self === 'undefined' || !self.Prism || !self.document) {
return;
}
Prism.hooks.add('complete', function (env) {
if (!env.code) {
return;
}
// Works only for <code> wrapped inside <pre> (not inline).
var pre = env.element.parentNode;
var clsReg = /\s*\bcommand-line\b\s*/;
if (
!pre || !/pre/i.test(pre.nodeName) ||
// Abort only if neither the <pre> nor the <code> have the class
(!clsReg.test(pre.className) && !clsReg.test(env.element.className))
) {
return;
}
if (env.element.querySelector('.command-line-prompt')) {
// Abort if prompt already exists.
return;
}
if (clsReg.test(env.element.className)) {
// Remove the class "command-line" from the <code>
env.element.className = env.element.className.replace(clsReg, '');
}
if (!clsReg.test(pre.className)) {
// Add the class "command-line" to the <pre>
pre.className += ' command-line';
}
var getAttribute = function(key, defaultValue) {
return (pre.getAttribute(key) || defaultValue).replace(/"/g, '"');
};
// Create the "rows" that will become the command-line prompts. -- cwells
var lines = new Array(1 + env.code.split('\n').length);
var promptText = getAttribute('data-prompt', '');
if (promptText !== '') {
lines = lines.join('<span data-prompt="' + promptText + '"></span>');
} else {
var user = getAttribute('data-user', 'user');
var host = getAttribute('data-host', 'localhost');
lines = lines.join('<span data-user="' + user + '" data-host="' + host + '"></span>');
}
// Create the wrapper element. -- cwells
var prompt = document.createElement('span');
prompt.className = 'command-line-prompt';
prompt.innerHTML = lines;
// Mark the output lines so they can be styled differently (no prompt). -- cwells
var outputSections = pre.getAttribute('data-output') || '';
outputSections = outputSections.split(',');
for (var i = 0; i < outputSections.length; i++) {
var outputRange = outputSections[i].split('-');
var outputStart = parseInt(outputRange[0]);
var outputEnd = outputStart; // Default: end at the first line when it's not an actual range. -- cwells
if (outputRange.length === 2) {
outputEnd = parseInt(outputRange[1]);
}
if (!isNaN(outputStart) && !isNaN(outputEnd)) {
for (var j = outputStart; j <= outputEnd && j <= prompt.children.length; j++) {
var node = prompt.children[j - 1];
node.removeAttribute('data-user');
node.removeAttribute('data-host');
node.removeAttribute('data-prompt');
}
}
}
env.element.innerHTML = prompt.outerHTML + env.element.innerHTML;
});
}()); | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas/numerical/descent/descent_numpy.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from numpy import array
from numpy import eye
from numpy import finfo
from numpy import float64
from numpy import maximum
from numpy import mean
from numpy import newaxis
from numpy import ones
from numpy import reshape
from numpy import sqrt
from numpy import sum
from numpy import zeros
eps = finfo(float64).eps
e = sqrt(eps)
__all__ = ["descent_numpy"]
def descent_numpy(x0, fn, iterations=1000, gtol=10 ** (-6), bounds=None, limit=0, args=()):
"""A gradient descent optimisation solver.
Parameters
----------
x0 : array-like
n x 1 starting guess of x.
fn : obj
The objective function to minimize.
iterations : int
Maximum number of iterations.
gtol : float
Mean residual of the gradient for convergence.
bounds : list
List of lower and upper bound pairs [[lb, ub], ...], None=unconstrained.
limit : float
Value of the objective function for which to terminate optimisation.
args : tuple
Additional parameters needed for fn.
Returns
-------
float
Final value of the objective function.
array
Values of x at the found local minimum.
"""
r = 0.5
c = 0.0001
n = len(x0)
x0 = reshape(array(x0), (n, 1))
if bounds:
bounds = array(bounds)
lb = bounds[:, 0][:, newaxis]
ub = bounds[:, 1][:, newaxis]
else:
lb = ones((n, 1)) * -(10**20)
ub = ones((n, 1)) * +(10**20)
zn = zeros((n, 1))
g = zeros((n, 1))
v = eye(n) * e
def phi(x, mu, *args):
p = mu * (sum(maximum(lb - x, zn)) + sum(maximum(x - ub, zn))) ** 2
return fn(x, *args) + p
i = 0
mu = 1
while i < iterations:
p0 = phi(x0, mu, *args)
for j in range(n):
vj = v[:, j][:, newaxis]
g[j, 0] = (phi(x0 + vj, mu, *args) - p0) / e
D = sum(-g * g)
a = 1
x1 = x0 - a * g
while phi(x1, mu, *args) > p0 + c * a * D:
a *= r
x1 = x0 - a * g
x0 -= a * g
mu *= 10
res = mean(abs(g))
i += 1
f1 = phi(x0, mu, *args)
if f1 < limit:
break
if res < gtol:
break
print("Iteration: {0} fopt: {1:.3g} gres: {2:.3g} step: {3}".format(i, f1, res, a))
return f1, x0 | PypiClean |
/LabJackPython-9-20-2010.tar.gz/LabJackPython-9-20-2010/src/u6.py | from LabJackPython import *
import struct, ConfigParser
def openAllU6():
"""
A helpful function which will open all the connected U6s. Returns a
dictionary where the keys are the serialNumber, and the value is the device
object.
"""
returnDict = dict()
for i in range(deviceCount(6)):
d = U6(firstFound = False, devNumber = i+1)
returnDict[str(d.serialNumber)] = d
return returnDict
def dumpPacket(buffer):
"""
Name: dumpPacket(buffer)
Args: byte array
Desc: Returns hex value of all bytes in the buffer
"""
return repr([ hex(x) for x in buffer ])
def getBit(n, bit):
"""
Name: getBit(n, bit)
Args: n, the original integer you want the bit of
bit, the index of the bit you want
Desc: Returns the bit at position "bit" of integer "n"
>>> n = 5
>>> bit = 2
>>> getBit(n, bit)
1
>>> bit = 0
>>> getBit(n, bit)
1
"""
return int(bool((int(n) & (1 << bit)) >> bit))
def toBitList(inbyte):
"""
Name: toBitList(inbyte)
Args: a byte
Desc: Converts a byte into list for access to individual bits
>>> inbyte = 5
>>> toBitList(inbyte)
[1, 0, 1, 0, 0, 0, 0, 0]
"""
return [ getBit(inbyte, b) for b in range(8) ]
def dictAsString(d):
"""Helper function that returns a string representation of a dictionary"""
s = "{"
for key, val in sorted(d.items()):
s += "%s: %s, " % (key, val)
s = s.rstrip(", ") # Nuke the trailing comma
s += "}"
return s
class CalibrationInfo(object):
""" A class to hold the calibration info for a U6 """
def __init__(self):
# A flag to tell difference between nominal and actual values.
self.nominal = True
# Positive Channel calibration
self.ain10vSlope = 3.1580578 * (10 ** -4)
self.ain10vOffset = -10.5869565220
self.ain1vSlope = 3.1580578 * (10 ** -5)
self.ain1vOffset = -1.05869565220
self.ain100mvSlope = 3.1580578 * (10 ** -6)
self.ain100mvOffset = -0.105869565220
self.ain10mvSlope = 3.1580578 * (10 ** -7)
self.ain10mvOffset = -0.0105869565220
self.ainSlope = [self.ain10vSlope, self.ain1vSlope, self.ain100mvSlope, self.ain10mvSlope]
self.ainOffset = [ self.ain10vOffset, self.ain1vOffset, self.ain100mvOffset, self.ain10mvOffset ]
# Negative Channel calibration
self.ain10vNegSlope = -3.15805800 * (10 ** -4)
self.ain10vCenter = 33523.0
self.ain1vNegSlope = -3.15805800 * (10 ** -5)
self.ain1vCenter = 33523.0
self.ain100mvNegSlope = -3.15805800 * (10 ** -6)
self.ain100mvCenter = 33523.0
self.ain10mvNegSlope = -3.15805800 * (10 ** -7)
self.ain10mvCenter = 33523.0
self.ainNegSlope = [ self.ain10vNegSlope, self.ain1vNegSlope, self.ain100mvNegSlope, self.ain10mvNegSlope ]
self.ainCenter = [ self.ain10vCenter, self.ain1vCenter, self.ain100mvCenter, self.ain10mvCenter ]
# Miscellaneous
self.dac0Slope = 13200.0
self.dac0Offset = 0
self.dac1Slope = 13200.0
self.dac1Offset = 0
self.currentOutput0 = 0.0000100000
self.currentOutput1 = 0.0002000000
self.temperatureSlope = -92.379
self.temperatureOffset = 465.129
# Hi-Res ADC stuff
# Positive Channel calibration
self.proAin10vSlope = 3.1580578 * (10 ** -4)
self.proAin10vOffset = -10.5869565220
self.proAin1vSlope = 3.1580578 * (10 ** -5)
self.proAin1vOffset = -1.05869565220
self.proAin100mvSlope = 3.1580578 * (10 ** -6)
self.proAin100mvOffset = -0.105869565220
self.proAin10mvSlope = 3.1580578 * (10 ** -7)
self.proAin10mvOffset = -0.0105869565220
# Negative Channel calibration
self.proAin10vNegSlope = -3.15805800 * (10 ** -4)
self.proAin10vCenter = 33523.0
self.proAin1vNegSlope = -3.15805800 * (10 ** -5)
self.proAin1vCenter = 33523.0
self.proAin100mvNegSlope = -3.15805800 * (10 ** -6)
self.proAin100mvCenter = 33523.0
self.proAin10mvNegSlope = -3.15805800 * (10 ** -7)
self.proAin10mvCenter = 33523.0
def __str__(self):
return str(self.__dict__)
class U6(Device):
"""
U6 Class for all U6 specific low-level commands.
Example:
>>> import u6
>>> d = u6.U6()
>>> print d.configU6()
{'SerialNumber': 320032102, ... , 'FirmwareVersion': '1.26'}
"""
def __init__(self, debug = False, autoOpen = True, **kargs):
"""
Name: U6.__init__(self, debug = False, autoOpen = True, **kargs)
Args: debug, Do you want debug information?
autoOpen, If true, then the constructor will call open for you
**kargs, The arguments to be passed to open.
Desc: Your basic constructor.
"""
Device.__init__(self, None, devType = 6)
self.firmwareVersion = 0
self.bootloaderVersion = 0
self.hardwareVersion = 0
self.productId = 0
self.fioDirection = [None] * 8
self.fioState = [None] * 8
self.eioDirection = [None] * 8
self.eioState = [None] * 8
self.cioDirection = [None] * 8
self.cioState = [None] * 8
self.dac1Enable = 0
self.dac0 = 0
self.dac1 = 0
self.calInfo = CalibrationInfo()
self.productName = "U6"
self.debug = debug
if autoOpen:
self.open(**kargs)
def open(self, localId = None, firstFound = True, serial = None, devNumber = None, handleOnly = False, LJSocket = None):
"""
Name: U6.open(localId = None, firstFound = True, devNumber = None,
handleOnly = False, LJSocket = None)
Args: firstFound, If True, use the first found U6
serial, open a U6 with the given serial number
localId, open a U6 with the given local id.
devNumber, open a U6 with the given devNumber
handleOnly, if True, LabJackPython will only open a handle
LJSocket, set to "<ip>:<port>" to connect to LJSocket
Desc: Opens a U6 for reading and writing.
>>> myU6 = u6.U6(autoOpen = False)
>>> myU6.open()
"""
Device.open(self, 6, firstFound = firstFound, serial = serial, localId = localId, devNumber = devNumber, handleOnly = handleOnly, LJSocket = LJSocket )
def configU6(self, LocalID = None):
"""
Name: U6.configU6(LocalID = None)
Args: LocalID, if set, will write the new value to U6
Desc: Writes the Local ID, and reads some hardware information.
>>> myU6 = u6.U6()
>>> myU6.configU6()
{'BootloaderVersion': '6.15',
'FirmwareVersion': '0.88',
'HardwareVersion': '2.0',
'LocalID': 1,
'ProductID': 6,
'SerialNumber': 360005087,
'VersionInfo': 4}
"""
command = [ 0 ] * 26
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x0A
command[3] = 0x08
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
if LocalID != None:
command[6] = (1 << 3)
command[8] = LocalID
#command[7] = Reserved
#command[9-25] = Reserved
try:
result = self._writeRead(command, 38, [0xF8, 0x10, 0x08])
except LabJackException, e:
if e.errorCode is 4:
print "NOTE: ConfigU6 returned an error of 4. This probably means you are using U6 with a *really old* firmware. Please upgrade your U6's firmware as soon as possible."
result = self._writeRead(command, 38, [0xF8, 0x10, 0x08], checkBytes = False)
else:
raise e
self.firmwareVersion = "%s.%02d" % (result[10], result[9])
self.bootloaderVersion = "%s.%02d" % (result[12], result[11])
self.hardwareVersion = "%s.%02d" % (result[14], result[13])
self.serialNumber = struct.unpack("<I", struct.pack(">BBBB", *result[15:19]))[0]
self.productId = struct.unpack("<H", struct.pack(">BB", *result[19:21]))[0]
self.localId = result[21]
self.versionInfo = result[37]
self.deviceName = 'U6'
if self.versionInfo == 12:
self.deviceName = 'U6-Pro'
return { 'FirmwareVersion' : self.firmwareVersion, 'BootloaderVersion' : self.bootloaderVersion, 'HardwareVersion' : self.hardwareVersion, 'SerialNumber' : self.serialNumber, 'ProductID' : self.productId, 'LocalID' : self.localId, 'VersionInfo' : self.versionInfo, 'DeviceName' : self.deviceName }
def configIO(self, NumberTimersEnabled = None, EnableCounter1 = None, EnableCounter0 = None, TimerCounterPinOffset = None, EnableUART = None):
"""
Name: U6.configIO(NumberTimersEnabled = None, EnableCounter1 = None, EnableCounter0 = None, TimerCounterPinOffset = None)
Args: NumberTimersEnabled, Number of timers to enable
EnableCounter1, Set to True to enable counter 1, F to disable
EnableCounter0, Set to True to enable counter 0, F to disable
TimerCounterPinOffset, where should the timers/counters start
if all args are None, command just reads.
Desc: Writes and reads the current IO configuration.
>>> myU6 = u6.U6()
>>> myU6.configIO()
{'Counter0Enabled': False,
'Counter1Enabled': False,
'NumberTimersEnabled': 0,
'TimerCounterPinOffset': 0}
"""
command = [ 0 ] * 16
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x05
command[3] = 0x0B
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
if NumberTimersEnabled != None:
command[6] = 1
command[7] = NumberTimersEnabled
if EnableCounter0 != None:
command[6] = 1
if EnableCounter0:
command[8] = 1
if EnableCounter1 != None:
command[6] = 1
if EnableCounter1:
command[8] |= (1 << 1)
if TimerCounterPinOffset != None:
command[6] = 1
command[9] = TimerCounterPinOffset
if EnableUART is not None:
command[6] |= 1
command[6] |= (1 << 5)
result = self._writeRead(command, 16, [0xf8, 0x05, 0x0B])
return { 'NumberTimersEnabled' : result[8], 'Counter0Enabled' : bool(result[9] & 1), 'Counter1Enabled' : bool( (result[9] >> 1) & 1), 'TimerCounterPinOffset' : result[10] }
def configTimerClock(self, TimerClockBase = None, TimerClockDivisor = None):
"""
Name: U6.configTimerClock(TimerClockBase = None, TimerClockDivisor = None)
Args: TimerClockBase, which timer base to use
TimerClockDivisor, set the divisor
if all args are None, command just reads.
Also, if you cannot set the divisor without setting the base.
Desc: Writes and read the timer clock configuration.
>>> myU6 = u6.U6()
>>> myU6.configTimerClock()
{'TimerClockDivisor': 256, 'TimerClockBase': 2}
"""
command = [ 0 ] * 10
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x02
command[3] = 0x0A
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
#command[6] = Reserved
#command[7] = Reserved
if TimerClockBase != None:
command[8] = (1 << 7)
command[8] |= TimerClockBase & 7
if TimerClockDivisor != None:
command[9] = TimerClockDivisor
result = self._writeRead(command, 10, [0xF8, 0x2, 0x0A])
divisor = result[9]
if divisor == 0:
divisor = 256
return { 'TimerClockBase' : (result[8] & 7), 'TimerClockDivisor' : divisor }
def _buildBuffer(self, sendBuffer, readLen, commandlist):
for cmd in commandlist:
if isinstance(cmd, FeedbackCommand):
sendBuffer += cmd.cmdBytes
readLen += cmd.readLen
elif isinstance(cmd, list):
sendBuffer, readLen = self._buildBuffer(sendBuffer, readLen, cmd)
return (sendBuffer, readLen)
def _buildFeedbackResults(self, rcvBuffer, commandlist, results, i):
for cmd in commandlist:
if isinstance(cmd, FeedbackCommand):
results.append(cmd.handle(rcvBuffer[i:i+cmd.readLen]))
i += cmd.readLen
elif isinstance(cmd, list):
self._buildFeedbackResults(rcvBuffer, cmd, results, i)
return results
def getFeedback(self, *commandlist):
"""
Name: getFeedback(commandlist)
Args: the FeedbackCommands to run
Desc: Forms the commandlist into a packet, sends it to the U6, and reads the response.
>>> myU6 = U6()
>>> ledCommand = u6.LED(False)
>>> internalTempCommand = u6.AIN(30, 31, True)
>>> myU6.getFeedback(ledCommand, internalTempCommand)
[None, 23200]
OR if you like the list version better:
>>> myU6 = U6()
>>> ledCommand = u6.LED(False)
>>> internalTempCommand = u6.AIN(30, 31, True)
>>> commandList = [ ledCommand, internalTempCommand ]
>>> myU6.getFeedback(commandList)
[None, 23200]
"""
sendBuffer = [0] * 7
sendBuffer[1] = 0xF8
readLen = 9
sendBuffer, readLen = self._buildBuffer(sendBuffer, readLen, commandlist)
if len(sendBuffer) % 2:
sendBuffer += [0]
sendBuffer[2] = len(sendBuffer) / 2 - 3
if readLen % 2:
readLen += 1
if len(sendBuffer) > MAX_USB_PACKET_LENGTH:
raise LabJackException("ERROR: The feedback command you are attempting to send is bigger than 64 bytes ( %s bytes ). Break your commands up into separate calls to getFeedback()." % len(sendBuffer))
if readLen > MAX_USB_PACKET_LENGTH:
raise LabJackException("ERROR: The feedback command you are attempting to send would yield a response that is greater than 64 bytes ( %s bytes ). Break your commands up into separate calls to getFeedback()." % readLen)
rcvBuffer = self._writeRead(sendBuffer, readLen, [], checkBytes = False, stream = False, checksum = True)
# Check the response for errors
try:
self._checkCommandBytes(rcvBuffer, [0xF8])
if rcvBuffer[3] != 0x00:
raise LabJackException("Got incorrect command bytes")
except LowlevelErrorException, e:
if isinstance(commandlist[0], list):
culprit = commandlist[0][ (rcvBuffer[7] -1) ]
else:
culprit = commandlist[ (rcvBuffer[7] -1) ]
raise LowlevelErrorException("\nThis Command\n %s\nreturned an error:\n %s" % ( culprit, lowlevelErrorToString(rcvBuffer[6]) ) )
results = []
i = 9
return self._buildFeedbackResults(rcvBuffer, commandlist, results, i)
def readMem(self, BlockNum, ReadCal=False):
"""
Name: U6.readMem(BlockNum, ReadCal=False)
Args: BlockNum, which block to read
ReadCal, set to True to read the calibration data
Desc: Reads 1 block (32 bytes) from the non-volatile user or
calibration memory. Please read section 5.2.6 of the user's
guide before you do something you may regret.
>>> myU6 = U6()
>>> myU6.readMem(0)
[ < userdata stored in block 0 > ]
NOTE: Do not call this function while streaming.
"""
command = [ 0 ] * 8
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x01
command[3] = 0x2A
if ReadCal:
command[3] = 0x2D
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
command[6] = 0x00
command[7] = BlockNum
result = self._writeRead(command, 40, [ 0xF8, 0x11, command[3] ])
return result[8:]
def readCal(self, BlockNum):
return self.readMem(BlockNum, ReadCal = True)
def writeMem(self, BlockNum, Data, WriteCal=False):
"""
Name: U6.writeMem(BlockNum, Data, WriteCal=False)
Args: BlockNum, which block to write
Data, a list of bytes to write
WriteCal, set to True to write calibration.
Desc: Writes 1 block (32 bytes) from the non-volatile user or
calibration memory. Please read section 5.2.7 of the user's
guide before you do something you may regret.
>>> myU6 = U6()
>>> myU6.writeMem(0, [ < userdata to be stored in block 0 > ])
NOTE: Do not call this function while streaming.
"""
if not isinstance(Data, list):
raise LabJackException("Data must be a list of bytes")
command = [ 0 ] * 40
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x11
command[3] = 0x28
if WriteCal:
command[3] = 0x2B
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
command[6] = 0x00
command[7] = BlockNum
command[8:] = Data
self._writeRead(command, 8, [0xF8, 0x11, command[3]])
def writeCal(self, BlockNum, Data):
return self.writeMem(BlockNum, Data, WriteCal = True)
def eraseMem(self, EraseCal=False):
"""
Name: U6.eraseMem(EraseCal=False)
Args: EraseCal, set to True to erase the calibration memory.
Desc: The U6 uses flash memory that must be erased before writing.
Please read section 5.2.8 of the user's guide before you do
something you may regret.
>>> myU6 = U6()
>>> myU6.eraseMem()
NOTE: Do not call this function while streaming.
"""
if eraseCal:
command = [ 0 ] * 8
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x01
command[3] = 0x2C
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
command[6] = 0x4C
command[7] = 0x6C
else:
command = [ 0 ] * 6
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x00
command[3] = 0x29
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
self._writeRead(command, 8, [0xF8, 0x01, command[3]])
def eraseCal(self):
return self.eraseMem(EraseCal=True)
def streamConfig(self, NumChannels = 1, ResolutionIndex = 0, SamplesPerPacket = 25, SettlingFactor = 0, InternalStreamClockFrequency = 0, DivideClockBy256 = False, ScanInterval = 1, ChannelNumbers = [0], ChannelOptions = [0], SampleFrequency = None):
"""
Name: U6.streamConfig(
NumChannels = 1, ResolutionIndex = 0,
SamplesPerPacket = 25, SettlingFactor = 0,
InternalStreamClockFrequency = 0, DivideClockBy256 = False,
ScanInterval = 1, ChannelNumbers = [0],
ChannelOptions = [0], SampleFrequency = None )
Args: NumChannels, the number of channels to stream
ResolutionIndex, the resolution of the samples
SettlingFactor, the settling factor to be used
ChannelNumbers, a list of channel numbers to stream
ChannelOptions, a list of channel options bytes
Set Either:
SampleFrequency, the frequency in Hz to sample
-- OR --
SamplesPerPacket, how many samples make one packet
InternalStreamClockFrequency, 0 = 4 MHz, 1 = 48 MHz
DivideClockBy256, True = divide the clock by 256
ScanInterval, clock/ScanInterval = frequency.
Desc: Configures streaming on the U6. On a decent machine, you can
expect to stream a range of 0.238 Hz to 15 Hz. Without the
conversion, you can get up to 55 Hz.
"""
if NumChannels != len(ChannelNumbers) or NumChannels != len(ChannelOptions):
raise LabJackException("NumChannels must match length of ChannelNumbers and ChannelOptions")
if len(ChannelNumbers) != len(ChannelOptions):
raise LabJackException("len(ChannelNumbers) doesn't match len(ChannelOptions)")
if SampleFrequency != None:
if SampleFrequency < 1000:
if SampleFrequency < 25:
SamplesPerPacket = SampleFrequency
DivideClockBy256 = True
ScanInterval = 15625/SampleFrequency
else:
DivideClockBy256 = False
ScanInterval = 4000000/SampleFrequency
# Force Scan Interval into correct range
ScanInterval = min( ScanInterval, 65535 )
ScanInterval = int( ScanInterval )
ScanInterval = max( ScanInterval, 1 )
# Same with Samples per packet
SamplesPerPacket = max( SamplesPerPacket, 1)
SamplesPerPacket = int( SamplesPerPacket )
SamplesPerPacket = min ( SamplesPerPacket, 25)
command = [ 0 ] * (14 + NumChannels*2)
#command[0] = Checksum8
command[1] = 0xF8
command[2] = NumChannels+4
command[3] = 0x11
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
command[6] = NumChannels
command[7] = ResolutionIndex
command[8] = SamplesPerPacket
#command[9] = Reserved
command[10] = SettlingFactor
command[11] = (InternalStreamClockFrequency & 1) << 3
if DivideClockBy256:
command[11] |= 1 << 1
t = struct.pack("<H", ScanInterval)
command[12] = ord(t[0])
command[13] = ord(t[1])
for i in range(NumChannels):
command[14+(i*2)] = ChannelNumbers[i]
command[15+(i*2)] = ChannelOptions[i]
self._writeRead(command, 8, [0xF8, 0x01, 0x11])
# Set up the variables for future use.
self.streamSamplesPerPacket = SamplesPerPacket
self.streamChannelNumbers = ChannelNumbers
self.streamChannelOptions = ChannelOptions
self.streamConfiged = True
if InternalStreamClockFrequency == 1:
freq = float(48000000)
else:
freq = float(4000000)
if DivideClockBy256:
freq /= 256
freq = freq/ScanInterval
self.packetsPerRequest = max(1, int(freq/SamplesPerPacket))
self.packetsPerRequest = min(self.packetsPerRequest, 48)
def processStreamData(self, result, numBytes = None):
"""
Name: U6.processStreamData(result, numPackets = None)
Args: result, the string returned from streamData()
numBytes, the number of bytes per packet
Desc: Breaks stream data into individual channels and applies
calibrations.
>>> reading = d.streamData(convert = False)
>>> print proccessStreamData(reading['result'])
defaultDict(list, {'AIN0' : [3.123, 3.231, 3.232, ...]})
"""
if numBytes is None:
numBytes = 14 + (self.streamSamplesPerPacket * 2)
returnDict = collections.defaultdict(list)
j = self.streamPacketOffset
for packet in self.breakupPackets(result, numBytes):
for sample in self.samplesFromPacket(packet):
if j >= len(self.streamChannelNumbers):
j = 0
if self.streamChannelNumbers[j] == 193:
value = struct.unpack('<BB', sample )
else:
if (self.streamChannelOptions[j] >> 7) == 1:
# do signed
value = struct.unpack('<H', sample )[0]
else:
# do unsigned
value = struct.unpack('<H', sample )[0]
gainIndex = (self.streamChannelOptions[j] >> 4) & 0x3
value = self.binaryToCalibratedAnalogVoltage(gainIndex, value, is16Bits=True)
returnDict["AIN%s" % self.streamChannelNumbers[j]].append(value)
j += 1
self.streamPacketOffset = j
return returnDict
def watchdog(self, Write = False, ResetOnTimeout = False, SetDIOStateOnTimeout = False, TimeoutPeriod = 60, DIOState = 0, DIONumber = 0):
"""
Name: U6.watchdog(Write = False, ResetOnTimeout = False, SetDIOStateOnTimeout = False, TimeoutPeriod = 60, DIOState = 0, DIONumber = 0)
Args: Write, Set to True to write new values to the watchdog.
ResetOnTimeout, True means reset the device on timeout
SetDIOStateOnTimeout, True means set the sate of a DIO on timeout
TimeoutPeriod, Time, in seconds, to wait before timing out.
DIOState, 1 = High, 0 = Low
DIONumber, which DIO to set.
Desc: Controls a firmware based watchdog timer.
"""
command = [ 0 ] * 16
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x05
command[3] = 0x09
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
if Write:
command[6] = 1
if ResetOnTimeout:
command[7] = (1 << 5)
if SetDIOStateOnTimeout:
command[7] |= (1 << 4)
t = struct.pack("<H", TimeoutPeriod)
command[8] = ord(t[0])
command[9] = ord(t[1])
command[10] = ((DIOState & 1 ) << 7)
command[10] |= (DIONumber & 0xf)
result = self._writeRead(command, 16, [ 0xF8, 0x05, 0x09])
watchdogStatus = {}
if result[7] == 0:
watchdogStatus['WatchDogEnabled'] = False
watchdogStatus['ResetOnTimeout'] = False
watchdogStatus['SetDIOStateOnTimeout'] = False
else:
watchdogStatus['WatchDogEnabled'] = True
if (( result[7] >> 5 ) & 1):
watchdogStatus['ResetOnTimeout'] = True
else:
watchdogStatus['ResetOnTimeout'] = False
if (( result[7] >> 4 ) & 1):
watchdogStatus['SetDIOStateOnTimeout'] = True
else:
watchdogStatus['SetDIOStateOnTimeout'] = False
watchdogStatus['TimeoutPeriod'] = struct.unpack('<H', struct.pack("BB", *result[8:10]))
if (( result[10] >> 7 ) & 1):
watchdogStatus['DIOState'] = 1
else:
watchdogStatus['DIOState'] = 0
watchdogStatus['DIONumber'] = ( result[10] & 15 )
return watchdogStatus
SPIModes = { 'A' : 0, 'B' : 1, 'C' : 2, 'D' : 3 }
def spi(self, SPIBytes, AutoCS=True, DisableDirConfig = False, SPIMode = 'A', SPIClockFactor = 0, CSPINNum = 0, CLKPinNum = 1, MISOPinNum = 2, MOSIPinNum = 3):
"""
Name: U6.spi(SPIBytes, AutoCS=True, DisableDirConfig = False,
SPIMode = 'A', SPIClockFactor = 0, CSPINNum = 0,
CLKPinNum = 1, MISOPinNum = 2, MOSIPinNum = 3)
Args: SPIBytes, A list of bytes to send.
AutoCS, If True, the CS line is automatically driven low
during the SPI communication and brought back high
when done.
DisableDirConfig, If True, function does not set the direction
of the line.
SPIMode, 'A', 'B', 'C', or 'D'.
SPIClockFactor, Sets the frequency of the SPI clock.
CSPINNum, which pin is CS
CLKPinNum, which pin is CLK
MISOPinNum, which pin is MISO
MOSIPinNum, which pin is MOSI
Desc: Sends and receives serial data using SPI synchronous
communication. See Section 5.2.17 of the user's guide.
"""
if not isinstance(SPIBytes, list):
raise LabJackException("SPIBytes MUST be a list of bytes")
numSPIBytes = len(SPIBytes)
oddPacket = False
if numSPIBytes%2 != 0:
SPIBytes.append(0)
numSPIBytes = numSPIBytes + 1
oddPacket = True
command = [ 0 ] * (13 + numSPIBytes)
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 4 + (numSPIBytes/2)
command[3] = 0x3A
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
if AutoCS:
command[6] |= (1 << 7)
if DisableDirConfig:
command[6] |= (1 << 6)
command[6] |= ( self.SPIModes[SPIMode] & 3 )
command[7] = SPIClockFactor
#command[8] = Reserved
command[9] = CSPINNum
command[10] = CLKPinNum
command[11] = MISOPinNum
command[12] = MOSIPinNum
command[13] = numSPIBytes
if oddPacket:
command[13] = numSPIBytes - 1
command[14:] = SPIBytes
result = self._writeRead(command, 8+numSPIBytes, [ 0xF8, 1+(numSPIBytes/2), 0x3A ])
return { 'NumSPIBytesTransferred' : result[7], 'SPIBytes' : result[8:] }
def asynchConfig(self, Update = True, UARTEnable = True, DesiredBaud = None, BaudFactor = 63036):
"""
Name: U6.asynchConfig(Update = True, UARTEnable = True,
DesiredBaud = None, BaudFactor = 63036)
Args: Update, If True, new values are written.
UARTEnable, If True, UART will be enabled.
DesiredBaud, If set, will apply the formualt to
calculate BaudFactor.
BaudFactor, = 2^16 - 48000000/(2 * Desired Baud). Ignored
if DesiredBaud is set.
Desc: Configures the U6 UART for asynchronous communication. See
section 5.2.18 of the User's Guide.
"""
if UARTEnable:
self.configIO(EnableUART = True)
command = [ 0 ] * 10
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x02
command[3] = 0x14
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
#commmand[6] = 0x00
if Update:
command[7] = (1 << 7)
if UARTEnable:
command[7] |= (1 << 6)
if DesiredBaud != None:
BaudFactor = (2**16) - 48000000/(2 * DesiredBaud)
t = struct.pack("<H", BaudFactor)
command[8] = ord(t[0])
command[9] = ord(t[1])
results = self._writeRead(command, 10, [0xF8, 0x02, 0x14])
if command[8] != results[8] and command[9] != results[9]:
raise LabJackException("BaudFactor didn't stick.")
def asynchTX(self, AsynchBytes):
"""
Name: U6.asynchTX(AsynchBytes)
Args: AsynchBytes, List of bytes to send
Desc: Sends bytes to the U6 UART which will be sent asynchronously
on the transmit line. Section 5.2.19 of the User's Guide.
"""
numBytes = len(AsynchBytes)
oddPacket = False
if numBytes%2 != 0:
oddPacket = True
AsynchBytes.append(0)
numBytes = numBytes + 1
command = [ 0 ] * (8+numBytes)
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 1 + (numBytes/2)
command[3] = 0x15
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
#commmand[6] = 0x00
command[7] = numBytes
if oddPacket:
command[7] = numBytes-1
command[8:] = AsynchBytes
result = self._writeRead(command, 10, [ 0xF8, 0x02, 0x15])
return { 'NumAsynchBytesSent' : result[7], 'NumAsynchBytesInRXBuffer' : result[8] }
def asynchRX(self, Flush = False):
"""
Name: U6.asynchTX(AsynchBytes)
Args: Flush, If True, empties the entire 256-byte RX buffer.
Desc: Sends bytes to the U6 UART which will be sent asynchronously
on the transmit line. Section 5.2.20 of the User's Guide.
"""
command = [ 0, 0xF8, 0x01, 0x16, 0, 0, 0, int(Flush)]
result = self._writeRead(command, 40, [ 0xF8, 0x11, 0x16 ])
return { 'NumAsynchBytesInRXBuffer' : result[7], 'AsynchBytes' : result[8:] }
def i2c(self, Address, I2CBytes, EnableClockStretching = False, NoStopWhenRestarting = False, ResetAtStart = False, SpeedAdjust = 0, SDAPinNum = 0, SCLPinNum = 1, NumI2CBytesToReceive = 0, AddressByte = None):
"""
Name: U6.i2c(Address, I2CBytes, EnableClockStretching = False, NoStopWhenRestarting = False, ResetAtStart = False, SpeedAdjust = 0, SDAPinNum = 0, SCLPinNum = 1, NumI2CBytesToReceive = 0, AddressByte = None)
Args: Address, the address (Not shifted over)
I2CBytes, a list of bytes to send
EnableClockStretching, True enables clock stretching
NoStopWhenRestarting, True means no stop sent when restarting
ResetAtStart, if True, an I2C bus reset will be done
before communicating.
SpeedAdjust, Allows the communication frequency to be reduced.
SDAPinNum, Which pin will be data
SCLPinNum, Which pin is clock
NumI2CBytesToReceive, Number of I2C bytes to expect back.
AddressByte, The address as you would put it in the lowlevel
packet. Overrides Address. Optional.
Desc: Sends and receives serial data using I2C synchronous
communication. Section 5.2.21 of the User's Guide.
"""
numBytes = len(I2CBytes)
oddPacket = False
if numBytes%2 != 0:
oddPacket = True
I2CBytes.append(0)
numBytes = numBytes+1
command = [ 0 ] * (14+numBytes)
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 4 + (numBytes/2)
command[3] = 0x3B
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
if EnableClockStretching:
command[6] |= (1 << 3)
if NoStopWhenRestarting:
command[6] |= (1 << 2)
if ResetAtStart:
command[6] |= (1 << 1)
command[7] = SpeedAdjust
command[8] = SDAPinNum
command[9] = SCLPinNum
if AddressByte != None:
command[10] = AddressByte
else:
command[10] = Address << 1
#command[11] = Reserved
command[12] = numBytes
if oddPacket:
command[12] = numBytes-1
command[13] = NumI2CBytesToReceive
command[14:] = I2CBytes
oddResponse = False
if NumI2CBytesToReceive%2 != 0:
NumI2CBytesToReceive = NumI2CBytesToReceive+1
oddResponse = True
result = self._writeRead(command, (12+NumI2CBytesToReceive), [0xF8, (3+(NumI2CBytesToReceive/2)), 0x3B])
if NumI2CBytesToReceive != 0:
return { 'AckArray' : result[8:12], 'I2CBytes' : result[12:] }
else:
return { 'AckArray' : result[8:12] }
def sht1x(self, DataPinNum = 0, ClockPinNum = 1, SHTOptions = 0xc0):
"""
Name: U6.sht1x(DataPinNum = 0, ClockPinNum = 1, SHTOptions = 0xc0)
Args: DataPinNum, Which pin is the Data line
ClockPinNum, Which line is the Clock line
SHTOptions (and proof people read documentation):
bit 7 = Read Temperature
bit 6 = Read Realtive Humidity
bit 2 = Heater. 1 = on, 0 = off
bit 1 = Reserved at 0
bit 0 = Resolution. 1 = 8 bit RH, 12 bit T; 0 = 12 RH, 14 bit T
Desc: Reads temperature and humidity from a Sensirion SHT1X sensor.
Section 5.2.22 of the User's Guide.
"""
command = [ 0 ] * 10
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x02
command[3] = 0x39
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
command[6] = DataPinNum
command[7] = ClockPinNum
#command[8] = Reserved
command[9] = SHTOptions
result = self._writeRead(command, 16, [ 0xF8, 0x05, 0x39])
val = (result[11]*256) + result[10]
temp = -39.60 + 0.01*val
val = (result[14]*256) + result[13]
humid = -4 + 0.0405*val + -.0000028*(val*val)
humid = (temp - 25)*(0.01 + 0.00008*val) + humid
return { 'StatusReg' : result[8], 'StatusCRC' : result[9], 'Temperature' : temp, 'TemperatureCRC' : result[12], 'Humidity' : humid, 'HumidityCRC' : result[15] }
# --------------------------- Old U6 code -------------------------------
def _readCalDataBlock(self, n):
"""
Internal routine to read the specified calibration block (0-2)
"""
sendBuffer = [0] * 8
sendBuffer[1] = 0xF8 # command byte
sendBuffer[2] = 0x01 # number of data words
sendBuffer[3] = 0x2D # extended command number
sendBuffer[6] = 0x00
sendBuffer[7] = n # Blocknum = 0
self.write(sendBuffer)
buff = self.read(40)
return buff[8:]
def getCalibrationData(self):
"""
Name: getCalibrationData(self)
Args: None
Desc: Gets the slopes and offsets for AIN and DACs,
as well as other calibration data
>>> myU6 = U6()
>>> myU6.getCalibrationData()
>>> myU6.calInfo
<ainDiffOffset: -2.46886488446,...>
"""
if self.debug is True:
print "Calibration data retrieval"
self.calInfo.nominal = False
#reading block 0 from memory
rcvBuffer = self._readCalDataBlock(0)
# Positive Channel calibration
self.calInfo.ain10vSlope = toDouble(rcvBuffer[:8])
self.calInfo.ain10vOffset = toDouble(rcvBuffer[8:16])
self.calInfo.ain1vSlope = toDouble(rcvBuffer[16:24])
self.calInfo.ain1vOffset = toDouble(rcvBuffer[24:])
#reading block 1 from memory
rcvBuffer = self._readCalDataBlock(1)
self.calInfo.ain100mvSlope = toDouble(rcvBuffer[:8])
self.calInfo.ain100mvOffset = toDouble(rcvBuffer[8:16])
self.calInfo.ain10mvSlope = toDouble(rcvBuffer[16:24])
self.calInfo.ain10mvOffset = toDouble(rcvBuffer[24:])
self.calInfo.ainSlope = [self.calInfo.ain10vSlope, self.calInfo.ain1vSlope, self.calInfo.ain100mvSlope, self.calInfo.ain10mvSlope]
self.calInfo.ainOffset = [ self.calInfo.ain10vOffset, self.calInfo.ain1vOffset, self.calInfo.ain100mvOffset, self.calInfo.ain10mvOffset ]
#reading block 2 from memory
rcvBuffer = self._readCalDataBlock(2)
# Negative Channel calibration
self.calInfo.ain10vNegSlope = toDouble(rcvBuffer[:8])
self.calInfo.ain10vCenter = toDouble(rcvBuffer[8:16])
self.calInfo.ain1vNegSlope = toDouble(rcvBuffer[16:24])
self.calInfo.ain1vCenter = toDouble(rcvBuffer[24:])
#reading block 3 from memory
rcvBuffer = self._readCalDataBlock(3)
self.calInfo.ain100mvNegSlope = toDouble(rcvBuffer[:8])
self.calInfo.ain100mvCenter = toDouble(rcvBuffer[8:16])
self.calInfo.ain10mvNegSlope = toDouble(rcvBuffer[16:24])
self.calInfo.ain10mvCenter = toDouble(rcvBuffer[24:])
self.calInfo.ainNegSlope = [ self.calInfo.ain10vNegSlope, self.calInfo.ain1vNegSlope, self.calInfo.ain100mvNegSlope, self.calInfo.ain10mvNegSlope ]
self.calInfo.ainCenter = [ self.calInfo.ain10vCenter, self.calInfo.ain1vCenter, self.calInfo.ain100mvCenter, self.calInfo.ain10mvCenter ]
#reading block 4 from memory
rcvBuffer = self._readCalDataBlock(4)
# Miscellaneous
self.calInfo.dac0Slope = toDouble(rcvBuffer[:8])
self.calInfo.dac0Offset = toDouble(rcvBuffer[8:16])
self.calInfo.dac1Slope = toDouble(rcvBuffer[16:24])
self.calInfo.dac1Offset = toDouble(rcvBuffer[24:])
#reading block 5 from memory
rcvBuffer = self._readCalDataBlock(5)
self.calInfo.currentOutput0 = toDouble(rcvBuffer[:8])
self.calInfo.currentOutput1 = toDouble(rcvBuffer[8:16])
self.calInfo.temperatureSlope = toDouble(rcvBuffer[16:24])
self.calInfo.temperatureOffset = toDouble(rcvBuffer[24:])
if self.productName == "U6-Pro":
# Hi-Res ADC stuff
#reading block 6 from memory
rcvBuffer = self._readCalDataBlock(6)
# Positive Channel calibration
self.calInfo.proAin10vSlope = toDouble(rcvBuffer[:8])
self.calInfo.proAin10vOffset = toDouble(rcvBuffer[8:16])
self.calInfo.proAin1vSlope = toDouble(rcvBuffer[16:24])
self.calInfo.proAin1vOffset = toDouble(rcvBuffer[24:])
#reading block 7 from memory
rcvBuffer = self._readCalDataBlock(7)
self.calInfo.proAin100mvSlope = toDouble(rcvBuffer[:8])
self.calInfo.proAin100mvOffset = toDouble(rcvBuffer[8:16])
self.calInfo.proAin10mvSlope = toDouble(rcvBuffer[16:24])
self.calInfo.proAin10mvOffset = toDouble(rcvBuffer[24:])
self.calInfo.proAinSlope = [self.calInfo.proAin10vSlope, self.calInfo.proAin1vSlope, self.calInfo.proAin100mvSlope, self.calInfo.proAin10mvSlope]
self.calInfo.proAinOffset = [ self.calInfo.proAin10vOffset, self.calInfo.proAin1vOffset, self.calInfo.proAin100mvOffset, self.calInfo.proAin10mvOffset ]
#reading block 8 from memory
rcvBuffer = self._readCalDataBlock(8)
# Negative Channel calibration
self.calInfo.proAin10vNegSlope = toDouble(rcvBuffer[:8])
self.calInfo.proAin10vCenter = toDouble(rcvBuffer[8:16])
self.calInfo.proAin1vNegSlope = toDouble(rcvBuffer[16:24])
self.calInfo.proAin1vCenter = toDouble(rcvBuffer[24:])
#reading block 9 from memory
rcvBuffer = self._readCalDataBlock(9)
self.calInfo.proAin100mvNegSlope = toDouble(rcvBuffer[:8])
self.calInfo.proAin100mvCenter = toDouble(rcvBuffer[8:16])
self.calInfo.proAin10mvNegSlope = toDouble(rcvBuffer[16:24])
self.calInfo.proAin10mvCenter = toDouble(rcvBuffer[24:])
self.calInfo.proAinNegSlope = [ self.calInfo.proAin10vNegSlope, self.calInfo.proAin1vNegSlope, self.calInfo.proAin100mvNegSlope, self.calInfo.proAin10mvNegSlope ]
self.calInfo.proAinCenter = [ self.calInfo.proAin10vCenter, self.calInfo.proAin1vCenter, self.calInfo.proAin100mvCenter, self.calInfo.proAin10mvCenter ]
def binaryToCalibratedAnalogVoltage(self, gainIndex, bytesVoltage, is16Bits=False):
"""
Name: binaryToCalibratedAnalogVoltage(gainIndex, bytesVoltage, is16Bits = False)
Args: gainIndex, which gain did you use?
bytesVoltage, bytes returned from the U6
is16bits, set to True if bytesVolotage is 16 bits (not 24)
Desc: Converts binary voltage to an analog value.
"""
if not is16Bits:
bits = float(bytesVoltage)/256
else:
bits = float(bytesVoltage)
center = self.calInfo.ainCenter[gainIndex]
negSlope = self.calInfo.ainNegSlope[gainIndex]
posSlope = self.calInfo.ainSlope[gainIndex]
if self.productName == "U6-Pro":
center = self.calInfo.proAinCenter[gainIndex]
negSlope = self.calInfo.proAinNegSlope[gainIndex]
posSlope = self.calInfo.proAinSlope[gainIndex]
if bits < center:
return (center - bits) * negSlope
else:
return (bits - center) * posSlope
def binaryToCalibratedAnalogTemperature(self, bytesTemperature):
voltage = self.binaryToCalibratedAnalogVoltage(0, bytesTemperature)
return self.calInfo.temperatureSlope * float(voltage) + self.calInfo.temperatureOffset
def softReset(self):
"""
Name: softReset
Args: none
Desc: Send a soft reset.
>>> myU6 = U6()
>>> myU6.softReset()
"""
command = [ 0x00, 0x99, 0x01, 0x00 ]
command = setChecksum8(command, 4)
self.write(command, False, False)
results = self.read(4)
if results[3] != 0:
raise LowlevelErrorException(results[3], "The softReset command returned an error:\n %s" % lowlevelErrorToString(results[3]))
def hardReset(self):
"""
Name: hardReset
Args: none
Desc: Send a hard reset.
>>> myU6 = U6()
>>> myU6.hardReset()
"""
command = [ 0x00, 0x99, 0x02, 0x00 ]
command = setChecksum8(command, 4)
self.write(command, False, False)
results = self.read(4)
if results[3] != 0:
raise LowlevelErrorException(results[3], "The softHard command returned an error:\n %s" % lowlevelErrorToString(results[3]))
self.close()
def setLED(self, state):
"""
Name: setLED(self, state)
Args: state: 1 = On, 0 = Off
Desc: Sets the state of the LED. (5.2.5.4 of user's guide)
>>> myU6 = U6()
>>> myU6.setLED(0)
... (LED turns off) ...
"""
self.getFeedback(LED(state))
def getTemperature(self):
"""
Name: getTemperature
Args: none
Desc: Reads the U6's internal temperature sensor in Kelvin.
See Section 2.6.4 of the U6 User's Guide.
>>> myU6.getTemperature()
299.87723471224308
"""
if self.calInfo.nominal:
# Read the actual calibration constants if we haven't already.
self.getCalibrationData()
result = self.getFeedback(AIN24AR(14))
return self.binaryToCalibratedAnalogTemperature(result[0]['AIN'])
def getAIN(self, positiveChannel, resolutionIndex = 0, gainIndex = 0, settlingFactor = 0, differential = False):
"""
Name: getAIN
Args: positiveChannel, resolutionIndex = 0, gainIndex = 0, settlingFactor = 0, differential = False
Desc: Reads an AIN and applies the calibration constants to it.
>>> myU6.getAIN(14)
299.87723471224308
"""
result = self.getFeedback(AIN24AR(positiveChannel, resolutionIndex, gainIndex, settlingFactor, differential))
return self.binaryToCalibratedAnalogVoltage(result[0]['GainIndex'], result[0]['AIN'])
def readDefaultsConfig(self):
"""
Name: U6.readDefaultsConfig( )
Args: None
Desc: Reads the power-up defaults stored in flash.
"""
results = dict()
defaults = self.readDefaults(0)
results['FIODirection'] = defaults[4]
results['FIOState'] = defaults[5]
results['EIODirection'] = defaults[8]
results['EIOState'] = defaults[9]
results['CIODirection'] = defaults[12]
results['CIOState'] = defaults[13]
results['ConfigWriteMask'] = defaults[16]
results['NumOfTimersEnable'] = defaults[17]
results['CounterMask'] = defaults[18]
results['PinOffset'] = defaults[19]
defaults = self.readDefaults(1)
results['ClockSource'] = defaults[0]
results['Divisor'] = defaults[1]
results['TMR0Mode'] = defaults[16]
results['TMR0ValueL'] = defaults[17]
results['TMR0ValueH'] = defaults[18]
results['TMR1Mode'] = defaults[20]
results['TMR1ValueL'] = defaults[21]
results['TMR1ValueH'] = defaults[22]
results['TMR2Mode'] = defaults[24]
results['TMR2ValueL'] = defaults[25]
results['TMR2ValueH'] = defaults[26]
results['TMR3Mode'] = defaults[28]
results['TMR3ValueL'] = defaults[29]
results['TMR3ValueH'] = defaults[30]
defaults = self.readDefaults(2)
results['DAC0'] = struct.unpack( ">H", struct.pack("BB", *defaults[16:18]) )[0]
results['DAC1'] = struct.unpack( ">H", struct.pack("BB", *defaults[20:22]) )[0]
defaults = self.readDefaults(3)
for i in range(14):
results["AIN%sGainRes" % i] = defaults[i]
results["AIN%sOptions" % i] = defaults[i+16]
return results
def exportConfig(self):
"""
Name: U6.exportConfig( )
Args: None
Desc: Takes a configuration and puts it into a ConfigParser object.
"""
# Make a new configuration file
parser = ConfigParser.SafeConfigParser()
# Change optionxform so that options preserve their case.
parser.optionxform = str
# Local Id and name
section = "Identifiers"
parser.add_section(section)
parser.set(section, "Local ID", str(self.localId))
parser.set(section, "Name", str(self.getName()))
parser.set(section, "Device Type", str(self.devType))
# FIO Direction / State
section = "FIOs"
parser.add_section(section)
dirs, states = self.getFeedback( PortDirRead(), PortStateRead() )
for key, value in dirs.items():
parser.set(section, "%s Directions" % key, str(value))
for key, value in states.items():
parser.set(section, "%s States" % key, str(value))
# DACs
section = "DACs"
parser.add_section(section)
dac0 = self.readRegister(5000)
dac0 = max(dac0, 0)
dac0 = min(dac0, 5)
parser.set(section, "DAC0", "%0.2f" % dac0)
dac1 = self.readRegister(5002)
dac1 = max(dac1, 0)
dac1 = min(dac1, 5)
parser.set(section, "DAC1", "%0.2f" % dac1)
# Timer Clock Configuration
section = "Timer Clock Speed Configuration"
parser.add_section(section)
timerclockconfig = self.configTimerClock()
for key, value in timerclockconfig.items():
parser.set(section, key, str(value))
# Timers / Counters
section = "Timers And Counters"
parser.add_section(section)
ioconfig = self.configIO()
for key, value in ioconfig.items():
parser.set(section, key, str(value))
for i in range(ioconfig['NumberTimersEnabled']):
mode, value = self.readRegister(7100 + (2 * i), numReg = 2, format = ">HH")
parser.set(section, "Timer%s Mode" % i, str(mode))
parser.set(section, "Timer%s Value" % i, str(value))
return parser
def loadConfig(self, configParserObj):
"""
Name: U6.loadConfig( configParserObj )
Args: configParserObj, A Config Parser object to load in
Desc: Takes a configuration and updates the U6 to match it.
"""
parser = configParserObj
# Set Identifiers:
section = "Identifiers"
if parser.has_section(section):
if parser.has_option(section, "device type"):
if parser.getint(section, "device type") != self.devType:
raise Exception("Not a U6 Config file.")
if parser.has_option(section, "local id"):
self.configU6( LocalID = parser.getint(section, "local id"))
if parser.has_option(section, "name"):
self.setName( parser.get(section, "name") )
# Set FIOs:
section = "FIOs"
if parser.has_section(section):
fiodirs = 0
eiodirs = 0
ciodirs = 0
fiostates = 0
eiostates = 0
ciostates = 0
if parser.has_option(section, "fios directions"):
fiodirs = parser.getint(section, "fios directions")
if parser.has_option(section, "eios directions"):
eiodirs = parser.getint(section, "eios directions")
if parser.has_option(section, "cios directions"):
ciodirs = parser.getint(section, "cios directions")
if parser.has_option(section, "fios states"):
fiostates = parser.getint(section, "fios states")
if parser.has_option(section, "eios states"):
eiostates = parser.getint(section, "eios states")
if parser.has_option(section, "cios states"):
ciostates = parser.getint(section, "cios states")
self.getFeedback( PortStateWrite([fiostates, eiostates, ciostates]), PortDirWrite([fiodirs, eiodirs, ciodirs]) )
# Set DACs:
section = "DACs"
if parser.has_section(section):
if parser.has_option(section, "dac0"):
self.writeRegister(5000, parser.getfloat(section, "dac0"))
if parser.has_option(section, "dac1"):
self.writeRegister(5002, parser.getfloat(section, "dac1"))
# Set Timer Clock Configuration
section = "Timer Clock Speed Configuration"
if parser.has_section(section):
if parser.has_option(section, "timerclockbase") and parser.has_option(section, "timerclockdivisor"):
self.configTimerClock(TimerClockBase = parser.getint(section, "timerclockbase"), TimerClockDivisor = parser.getint(section, "timerclockdivisor"))
# Set Timers / Counters
section = "Timers And Counters"
if parser.has_section(section):
nte = None
c0e = None
c1e = None
cpo = None
if parser.has_option(section, "NumberTimersEnabled"):
nte = parser.getint(section, "NumberTimersEnabled")
if parser.has_option(section, "TimerCounterPinOffset"):
cpo = parser.getint(section, "TimerCounterPinOffset")
if parser.has_option(section, "Counter0Enabled"):
c0e = parser.getboolean(section, "Counter0Enabled")
if parser.has_option(section, "Counter1Enabled"):
c1e = parser.getboolean(section, "Counter1Enabled")
self.configIO(NumberTimersEnabled = nte, EnableCounter1 = c1e, EnableCounter0 = c0e, TimerCounterPinOffset = cpo)
mode = None
value = None
for i in range(4):
if parser.has_option(section, "timer%i mode" % i):
mode = parser.getint(section, "timer%i mode" % i)
if parser.has_option(section, "timer%i value" % i):
value = parser.getint(section, "timer%i value" % i)
self.getFeedback( TimerConfig(i, mode, value) )
class FeedbackCommand(object):
'''
The base FeedbackCommand class
Used to make Feedback easy. Make a list of these
and call getFeedback.
'''
readLen = 0
def handle(self, input):
return None
validChannels = range(144)
class AIN(FeedbackCommand):
'''
Analog Input Feedback command
AIN(PositiveChannel)
PositiveChannel : the positive channel to use
NOTE: This function kept for compatibility. Please use
the new AIN24 and AIN24AR.
returns 16-bit unsigned int sample
>>> d.getFeedback( u6.AIN( PositiveChannel ) )
[ 19238 ]
'''
def __init__(self, PositiveChannel):
if PositiveChannel not in validChannels:
raise LabJackException("Invalid Positive Channel specified")
self.positiveChannel = PositiveChannel
self.cmdBytes = [ 0x01, PositiveChannel, 0 ]
readLen = 2
def __repr__(self):
return "<u6.AIN( PositiveChannel = %s )>" % self.positiveChannel
def handle(self, input):
result = (input[1] << 8) + input[0]
return result
class AIN24(FeedbackCommand):
'''
Analog Input 24-bit Feedback command
ainCommand = AIN24(PositiveChannel, ResolutionIndex = 0, GainIndex = 0, SettlingFactor = 0, Differential = False)
See section 5.2.5.2 of the user's guide.
NOTE: If you use a gain index of 15 (autorange), you should be using
the AIN24AR command instead.
positiveChannel : The positive channel to use
resolutionIndex : 0=default, 1-8 for high-speed ADC,
9-12 for high-res ADC on U6-Pro.
gainIndex : 0=x1, 1=x10, 2=x100, 3=x1000, 15=autorange
settlingFactor : 0=5us, 1=10us, 2=100us, 3=1ms, 4=10ms
differential : If this bit is set, a differential reading is done where
the negative channel is positiveChannel+1
returns 24-bit unsigned int sample
>>> d.getFeedback( u6.AIN24(PositiveChannel, ResolutionIndex = 0,
GainIndex = 0, SettlingFactor = 0,
Differential = False ) )
[ 193847 ]
'''
def __init__(self, PositiveChannel, ResolutionIndex = 0, GainIndex = 0, SettlingFactor = 0, Differential = False):
if PositiveChannel not in validChannels:
raise LabJackException("Invalid Positive Channel specified")
self.positiveChannel = PositiveChannel
self.resolutionIndex = ResolutionIndex
self.gainIndex = GainIndex
self.settlingFactor = SettlingFactor
self.differential = Differential
byte2 = ( ResolutionIndex & 0xf )
byte2 = ( ( GainIndex & 0xf ) << 4 ) + byte2
byte3 = (int(Differential) << 7) + SettlingFactor
self.cmdBytes = [ 0x02, PositiveChannel, byte2, byte3 ]
def __repr__(self):
return "<u6.AIN24( PositiveChannel = %s, ResolutionIndex = %s, GainIndex = %s, SettlingFactor = %s, Differential = %s )>" % (self.positiveChannel, self.resolutionIndex, self.gainIndex, self.settlingFactor, self.differential)
readLen = 3
def handle(self, input):
#Put it all into an integer.
result = (input[2] << 16 ) + (input[1] << 8 ) + input[0]
return result
class AIN24AR(FeedbackCommand):
'''
Autorange Analog Input 24-bit Feedback command
ainARCommand = AIN24AR(0, ResolutionIndex = 0, GainIndex = 0, SettlingFactor = 0, Differential = False)
See section 5.2.5.3 of the user's guide
PositiveChannel : The positive channel to use
ResolutionIndex : 0=default, 1-8 for high-speed ADC,
9-13 for high-res ADC on U6-Pro.
GainIndex : 0=x1, 1=x10, 2=x100, 3=x1000, 15=autorange
SettlingFactor : 0=5us, 1=10us, 2=100us, 3=1ms, 4=10ms
Differential : If this bit is set, a differential reading is done where
the negative channel is positiveChannel+1
returns a dictionary:
{
'AIN' : < 24-bit binary reading >,
'ResolutionIndex' : < actual resolution setting used for the reading >,
'GainIndex' : < actual gain used for the reading >,
'Status' : < reserved for future use >
}
>>> d.getFeedback( u6.AIN24AR( PositiveChannel, ResolutionIndex = 0,
GainIndex = 0, SettlingFactor = 0,
Differential = False ) )
{ 'AIN' : 193847, 'ResolutionIndex' : 0, 'GainIndex' : 0, 'Status' : 0 }
'''
def __init__(self, PositiveChannel, ResolutionIndex = 0, GainIndex = 0, SettlingFactor = 0, Differential = False):
if PositiveChannel not in validChannels:
raise LabJackException("Invalid Positive Channel specified")
self.positiveChannel = PositiveChannel
self.resolutionIndex = ResolutionIndex
self.gainIndex = GainIndex
self.settlingFactor = SettlingFactor
self.differential = Differential
byte2 = ( ResolutionIndex & 0xf )
byte2 = ( ( GainIndex & 0xf ) << 4 ) + byte2
byte3 = (int(Differential) << 7) + SettlingFactor
self.cmdBytes = [ 0x03, PositiveChannel, byte2, byte3 ]
def __repr__(self):
return "<u6.AIN24AR( PositiveChannel = %s, ResolutionIndex = %s, GainIndex = %s, SettlingFactor = %s, Differential = %s )>" % (self.positiveChannel, self.resolutionIndex, self.gainIndex, self.settlingFactor, self.differential)
readLen = 5
def handle(self, input):
#Put it all into an integer.
result = (input[2] << 16 ) + (input[1] << 8 ) + input[0]
resolutionIndex = input[3] & 0xf
gainIndex = ( input[3] >> 4 ) & 0xf
status = input[4]
return { 'AIN' : result, 'ResolutionIndex' : resolutionIndex, 'GainIndex' : gainIndex, 'Status' : status }
class WaitShort(FeedbackCommand):
'''
WaitShort Feedback command
specify the number of 128us time increments to wait
>>> d.getFeedback( u6.WaitShort( Time ) )
[ None ]
'''
def __init__(self, Time):
self.time = Time % 256
self.cmdBytes = [ 5, Time % 256 ]
def __repr__(self):
return "<u6.WaitShort( Time = %s )>" % self.time
class WaitLong(FeedbackCommand):
'''
WaitLong Feedback command
specify the number of 32ms time increments to wait
>>> d.getFeedback( u6.WaitLog( Time ) )
[ None ]
'''
def __init__(self, Time):
self.time = Time
self.cmdBytes = [ 6, Time % 256 ]
def __repr__(self):
return "<u6.WaitLog( Time = %s )>" % self.time
class LED(FeedbackCommand):
'''
LED Toggle
specify whether the LED should be on or off by truth value
1 or True = On, 0 or False = Off
>>> d.getFeedback( u6.LED( State ) )
[ None ]
'''
def __init__(self, State):
self.state = State
self.cmdBytes = [ 9, int(bool(State)) ]
def __repr__(self):
return "<u6.LED( State = %s )>" % self.state
class BitStateRead(FeedbackCommand):
'''
BitStateRead Feedback command
read the state of a single bit of digital I/O. Only digital
lines return valid readings.
IONumber: 0-7=FIO, 8-15=EIO, 16-19=CIO
return 0 or 1
>>> d.getFeedback( u6.BitStateRead( IONumber ) )
[ 1 ]
'''
def __init__(self, IONumber):
self.ioNumber = IONumber
self.cmdBytes = [ 10, IONumber % 20 ]
def __repr__(self):
return "<u6.BitStateRead( IONumber = %s )>" % self.ioNumber
readLen = 1
def handle(self, input):
return int(bool(input[0]))
class BitStateWrite(FeedbackCommand):
'''
BitStateWrite Feedback command
write a single bit of digital I/O. The direction of the
specified line is forced to output.
IONumber: 0-7=FIO, 8-15=EIO, 16-19=CIO
State: 0 or 1
>>> d.getFeedback( u6.BitStateWrite( IONumber, State ) )
[ None ]
'''
def __init__(self, IONumber, State):
self.ioNumber = IONumber
self.state = State
self.cmdBytes = [ 11, (IONumber % 20) + (int(bool(State)) << 7) ]
def __repr__(self):
return "<u6.BitStateWrite( IONumber = %s, State = %s )>" % self.ioNumber
class BitDirRead(FeedbackCommand):
'''
Read the digital direction of one I/O
IONumber: 0-7=FIO, 8-15=EIO, 16-19=CIO
returns 1 = Output, 0 = Input
>>> d.getFeedback( u6.BitDirRead( IONumber ) )
[ 1 ]
'''
def __init__(self, IONumber):
self.ioNumber = IONumber
self.cmdBytes = [ 12, IONumber % 20 ]
def __repr__(self):
return "<u6.BitDirRead( IONumber = %s )>" % self.ioNumber
readLen = 1
def handle(self, input):
return int(bool(input[0]))
class BitDirWrite(FeedbackCommand):
'''
BitDirWrite Feedback command
Set the digital direction of one I/O
IONumber: 0-7=FIO, 8-15=EIO, 16-19=CIO
Direction: 1 = Output, 0 = Input
>>> d.getFeedback( u6.BitDirWrite( IONumber, Direction ) )
[ None ]
'''
def __init__(self, IONumber, Direction):
self.ioNumber = IONumber
self.direction = Direction
self.cmdBytes = [ 13, (IONumber % 20) + (int(bool(Direction)) << 7) ]
def __repr__(self):
return "<u6.BitDirWrite( IONumber = %s, Direction = %s )>" % (self.ioNumber, self.direction)
class PortStateRead(FeedbackCommand):
"""
PortStateRead Feedback command
Reads the state of all digital I/O.
>>> d.getFeedback( u6.PortStateRead() )
[ { 'FIO' : 10, 'EIO' : 0, 'CIO' : 0 } ]
"""
def __init__(self):
self.cmdBytes = [ 26 ]
def __repr__(self):
return "<u6.PortStateRead()>"
readLen = 3
def handle(self, input):
return {'FIO' : input[0], 'EIO' : input[1], 'CIO' : input[2] }
class PortStateWrite(FeedbackCommand):
"""
PortStateWrite Feedback command
State: A list of 3 bytes representing FIO, EIO, CIO
WriteMask: A list of 3 bytes, representing which to update.
The Default is all ones.
>>> d.getFeedback( u6.PortStateWrite( State,
WriteMask = [ 0xff, 0xff, 0xff] ) )
[ None ]
"""
def __init__(self, State, WriteMask = [ 0xff, 0xff, 0xff]):
self.state = State
self.writeMask = WriteMask
self.cmdBytes = [ 27 ] + WriteMask + State
def __repr__(self):
return "<u6.PortStateWrite( State = %s, WriteMask = %s )>" % (self.state, self.writeMask)
class PortDirRead(FeedbackCommand):
"""
PortDirRead Feedback command
Reads the direction of all digital I/O.
>>> d.getFeedback( u6.PortDirRead() )
[ { 'FIO' : 10, 'EIO' : 0, 'CIO' : 0 } ]
"""
def __init__(self):
self.cmdBytes = [ 28 ]
def __repr__(self):
return "<u6.PortDirRead()>"
readLen = 3
def handle(self, input):
return {'FIO' : input[0], 'EIO' : input[1], 'CIO' : input[2] }
class PortDirWrite(FeedbackCommand):
"""
PortDirWrite Feedback command
Direction: A list of 3 bytes representing FIO, EIO, CIO
WriteMask: A list of 3 bytes, representing which to update. Default is all ones.
>>> d.getFeedback( u6.PortDirWrite( Direction,
WriteMask = [ 0xff, 0xff, 0xff] ) )
[ None ]
"""
def __init__(self, Direction, WriteMask = [ 0xff, 0xff, 0xff]):
self.direction = Direction
self.writeMask = WriteMask
self.cmdBytes = [ 29 ] + WriteMask + Direction
def __repr__(self):
return "<u6.PortDirWrite( Direction = %s, WriteMask = %s )>" % (self.direction, self.writeMask)
class DAC8(FeedbackCommand):
'''
8-bit DAC Feedback command
Controls a single analog output
Dac: 0 or 1
Value: 0-255
>>> d.getFeedback( u6.DAC8( Dac, Value ) )
[ None ]
'''
def __init__(self, Dac, Value):
self.dac = Dac
self.value = Value % 256
self.cmdBytes = [ 34 + (Dac % 2), Value % 256 ]
def __repr__(self):
return "<u6.DAC8( Dac = %s, Value = %s )>" % (self.dac, self.value)
class DAC0_8(DAC8):
"""
8-bit DAC Feedback command for DAC0
Controls DAC0 in 8-bit mode.
Value: 0-255
>>> d.getFeedback( u6.DAC0_8( Value ) )
[ None ]
"""
def __init__(self, Value):
DAC8.__init__(self, 0, Value)
def __repr__(self):
return "<u6.DAC0_8( Value = %s )>" % self.value
class DAC1_8(DAC8):
"""
8-bit DAC Feedback command for DAC1
Controls DAC1 in 8-bit mode.
Value: 0-255
>>> d.getFeedback( u6.DAC1_8( Value ) )
[ None ]
"""
def __init__(self, Value):
DAC8.__init__(self, 1, Value)
def __repr__(self):
return "<u6.DAC1_8( Value = %s )>" % self.value
class DAC16(FeedbackCommand):
'''
16-bit DAC Feedback command
Controls a single analog output
Dac: 0 or 1
Value: 0-65535
>>> d.getFeedback( u6.DAC16( Dac, Value ) )
[ None ]
'''
def __init__(self, Dac, Value):
self.dac = Dac
self.value = Value
self.cmdBytes = [ 38 + (Dac % 2), Value % 256, Value >> 8 ]
def __repr__(self):
return "<u6.DAC8( Dac = %s, Value = %s )>" % (self.dac, self.value)
class DAC0_16(DAC16):
"""
16-bit DAC Feedback command for DAC0
Controls DAC0 in 16-bit mode.
Value: 0-65535
>>> d.getFeedback( u6.DAC0_16( Value ) )
[ None ]
"""
def __init__(self, Value):
DAC16.__init__(self, 0, Value)
def __repr__(self):
return "<u6.DAC0_16( Value = %s )>" % self.value
class DAC1_16(DAC16):
"""
16-bit DAC Feedback command for DAC1
Controls DAC1 in 16-bit mode.
Value: 0-65535
>>> d.getFeedback( u6.DAC1_16( Value ) )
[ None ]
"""
def __init__(self, Value):
DAC16.__init__(self, 1, Value)
def __repr__(self):
return "<u6.DAC1_16( Value = %s )>" % self.value
class Timer(FeedbackCommand):
"""
For reading the value of the Timer. It provides the ability to update/reset
a given timer, and read the timer value.
( Section 5.2.5.17 of the User's Guide)
timer: Either 0 or 1 for counter0 or counter1
UpdateReset: Set True if you want to update the value
Value: Only updated if the UpdateReset bit is 1. The meaning of this
parameter varies with the timer mode.
Mode: Set to the timer mode to handle any special processing. See classes
QuadratureInputTimer and TimerStopInput1.
Returns an unsigned integer of the timer value, unless Mode has been
specified and there are special return values. See Section 2.9.1 for
expected return values.
>>> d.getFeedback( u6.Timer( timer, UpdateReset = False, Value = 0 \
... , Mode = None ) )
[ 12314 ]
"""
def __init__(self, timer, UpdateReset = False, Value=0, Mode = None):
if timer != 0 and timer != 1:
raise LabJackException("Timer should be either 0 or 1.")
if UpdateReset and Value == None:
raise LabJackException("UpdateReset set but no value.")
self.timer = timer
self.updateReset = UpdateReset
self.value = Value
self.mode = Mode
self.cmdBytes = [ (42 + (2*timer)), UpdateReset, Value % 256, Value >> 8 ]
readLen = 4
def __repr__(self):
return "<u6.Timer( timer = %s, UpdateReset = %s, Value = %s, Mode = %s )>" % (self.timer, self.updateReset, self.value, self.mode)
def handle(self, input):
inStr = struct.pack('B' * len(input), *input)
if self.mode == 8:
return struct.unpack('<i', inStr )[0]
elif self.mode == 9:
maxCount, current = struct.unpack('<HH', inStr )
return current, maxCount
else:
return struct.unpack('<I', inStr )[0]
class Timer0(Timer):
"""
For reading the value of the Timer0. It provides the ability to
update/reset Timer0, and read the timer value.
( Section 5.2.5.17 of the User's Guide)
UpdateReset: Set True if you want to update the value
Value: Only updated if the UpdateReset bit is 1. The meaning of this
parameter varies with the timer mode.
Mode: Set to the timer mode to handle any special processing. See classes
QuadratureInputTimer and TimerStopInput1.
>>> d.getFeedback( u6.Timer0( UpdateReset = False, Value = 0, \
... Mode = None ) )
[ 12314 ]
"""
def __init__(self, UpdateReset = False, Value = 0, Mode = None):
Timer.__init__(self, 0, UpdateReset, Value, Mode)
def __repr__(self):
return "<u6.Timer0( UpdateReset = %s, Value = %s, Mode = %s )>" % (self.updateReset, self.value, self.mode)
class Timer1(Timer):
"""
For reading the value of the Timer1. It provides the ability to
update/reset Timer1, and read the timer value.
( Section 5.2.5.17 of the User's Guide)
UpdateReset: Set True if you want to update the value
Value: Only updated if the UpdateReset bit is 1. The meaning of this
parameter varies with the timer mode.
Mode: Set to the timer mode to handle any special processing. See classes
QuadratureInputTimer and TimerStopInput1.
>>> d.getFeedback( u6.Timer1( UpdateReset = False, Value = 0, \
... Mode = None ) )
[ 12314 ]
"""
def __init__(self, UpdateReset = False, Value = 0, Mode = None):
Timer.__init__(self, 1, UpdateReset, Value, Mode)
def __repr__(self):
return "<u6.Timer1( UpdateReset = %s, Value = %s, Mode = %s )>" % (self.updateReset, self.value, self.mode)
class QuadratureInputTimer(Timer):
"""
For reading Quadrature input timers. They are special because their values
are signed.
( Section 2.9.1.8 of the User's Guide)
Args:
UpdateReset: Set True if you want to reset the counter.
Value: Set to 0, and UpdateReset to True to reset the counter.
Returns a signed integer.
>>> # Setup the two timers to be quadrature
>>> d.getFeedback( u6.Timer0Config( 8 ), u6.Timer1Config( 8 ) )
[None, None]
>>> # Read the value
>>> d.getFeedback( u6.QuadratureInputTimer() )
[-21]
"""
def __init__(self, UpdateReset = False, Value = 0):
Timer.__init__(self, 0, UpdateReset, Value, Mode = 8)
def __repr__(self):
return "<u6.QuadratureInputTimer( UpdateReset = %s, Value = %s )>" % (self.updateReset, self.value)
class TimerStopInput1(Timer1):
"""
For reading a stop input timer. They are special because the value returns
the current edge count and the stop value.
( Section 2.9.1.9 of the User's Guide)
Args:
UpdateReset: Set True if you want to update the value.
Value: The stop value. Only updated if the UpdateReset bit is 1.
Returns a tuple where the first value is current edge count, and the second
value is the stop value.
>>> # Setup the timer to be Stop Input
>>> d.getFeedback( u6.Timer0Config( 9, Value = 30 ) )
[None]
>>> # Read the timer
>>> d.getFeedback( u6.TimerStopInput1() )
[(0, 30)]
"""
def __init__(self, UpdateReset = False, Value = 0):
Timer.__init__(self, 1, UpdateReset, Value, Mode = 9)
def __repr__(self):
return "<u6.TimerStopInput1( UpdateReset = %s, Value = %s )>" % (self.updateReset, self.value)
class TimerConfig(FeedbackCommand):
"""
This IOType configures a particular timer.
timer = # of the timer to configure
TimerMode = See Section 2.9 for more information about the available modes.
Value = The meaning of this parameter varies with the timer mode.
>>> d.getFeedback( u6.TimerConfig( timer, TimerMode, Value = 0 ) )
[ None ]
"""
def __init__(self, timer, TimerMode, Value=0):
'''Creates command bytes for configureing a Timer'''
#Conditions come from pages 33-34 of user's guide
if timer not in range(4):
raise LabJackException("Timer should be either 0-3.")
if TimerMode > 13 or TimerMode < 0:
raise LabJackException("Invalid Timer Mode.")
self.timer = timer
self.timerMode = TimerMode
self.value = Value
self.cmdBytes = [43 + (timer * 2), TimerMode, Value % 256, Value >> 8]
def __repr__(self):
return "<u6.TimerConfig( timer = %s, TimerMode = %s, Value = %s )>" % (self.timer, self.timerMode, self.value)
class Timer0Config(TimerConfig):
"""
This IOType configures Timer0.
TimerMode = See Section 2.9 for more information about the available modes.
Value = The meaning of this parameter varies with the timer mode.
>>> d.getFeedback( u6.Timer0Config( TimerMode, Value = 0 ) )
[ None ]
"""
def __init__(self, TimerMode, Value = 0):
TimerConfig.__init__(self, 0, TimerMode, Value)
def __repr__(self):
return "<u6.Timer0Config( TimerMode = %s, Value = %s )>" % (self.timerMode, self.value)
class Timer1Config(TimerConfig):
"""
This IOType configures Timer1.
TimerMode = See Section 2.9 for more information about the available modes.
Value = The meaning of this parameter varies with the timer mode.
>>> d.getFeedback( u6.Timer1Config( TimerMode, Value = 0 ) )
[ None ]
"""
def __init__(self, TimerMode, Value = 0):
TimerConfig.__init__(self, 1, TimerMode, Value)
def __repr__(self):
return "<u6.Timer1Config( TimerMode = %s, Value = %s )>" % (self.timerMode, self.value)
class Counter(FeedbackCommand):
'''
Counter Feedback command
Reads a hardware counter, optionally resetting it
counter: 0 or 1
Reset: True ( or 1 ) = Reset, False ( or 0 ) = Don't Reset
Returns the current count from the counter if enabled. If reset,
this is the value before the reset.
>>> d.getFeedback( u6.Counter( counter, Reset = False ) )
[ 2183 ]
'''
def __init__(self, counter, Reset):
self.counter = counter
self.reset = Reset
self.cmdBytes = [ 54 + (counter % 2), int(bool(Reset))]
def __repr__(self):
return "<u6.Counter( counter = %s, Reset = %s )>" % (self.counter, self.reset)
readLen = 4
def handle(self, input):
inStr = ''.join([chr(x) for x in input])
return struct.unpack('<I', inStr )[0]
class Counter0(Counter):
'''
Counter0 Feedback command
Reads hardware counter0, optionally resetting it
Reset: True ( or 1 ) = Reset, False ( or 0 ) = Don't Reset
Returns the current count from the counter if enabled. If reset,
this is the value before the reset.
>>> d.getFeedback( u6.Counter0( Reset = False ) )
[ 2183 ]
'''
def __init__(self, Reset = False):
Counter.__init__(self, 0, Reset)
def __repr__(self):
return "<u6.Counter0( Reset = %s )>" % self.reset
class Counter1(Counter):
'''
Counter1 Feedback command
Reads hardware counter1, optionally resetting it
Reset: True ( or 1 ) = Reset, False ( or 0 ) = Don't Reset
Returns the current count from the counter if enabled. If reset,
this is the value before the reset.
>>> d.getFeedback( u6.Counter1( Reset = False ) )
[ 2183 ]
'''
def __init__(self, Reset = False):
Counter.__init__(self, 1, Reset)
def __repr__(self):
return "<u6.Counter1( Reset = %s )>" % self.reset
class DSP(FeedbackCommand):
'''
DSP Feedback command
Acquires 1000 samples from the specified AIN at 50us intervals and performs
the specified analysis on the acquired data.
AcquireNewData: True, acquire new data; False, operate on existing data
DSPAnalysis: 1, True RMS; 2, DC Offset; 3, Peak To Peak; 4, Period (ms)
PLine: Positive Channel
Gain: The gain you would like to use
Resolution: The resolution index to use
SettlingFactor: The SettlingFactor to use
Differential: True, do differential readings; False, single-ended readings
See section 5.2.5.20 of the U3 User's Guide
(http://labjack.com/support/u6/users-guide/5.2.5.20)
>>> d.getFeedback( u6.DSP( PLine, Resolution = 0, Gain = 0,
SettlingFactor = 0, Differential = False,
DSPAnalysis = 1, AcquireNewData = True) )
[ 2183 ]
'''
def __init__(self, PLine, Resolution = 0, Gain = 0, SettlingFactor = 0, Differential = False, DSPAnalysis = 1, AcquireNewData = True):
self.pline = PLine
self.resolution = Resolution
self.gain = Gain
self.settlingFactor = SettlingFactor
self.differential = Differential
self.dspAnalysis = DSPAnalysis
self.acquireNewData = AcquireNewData
byte1 = DSPAnalysis + ( int(AcquireNewData) << 7 )
byte4 = ( Gain << 4 ) + Resolution
byte5 = ( int(Differential) << 7 ) + SettlingFactor
self.cmdBytes = [ 62, byte1, PLine, 0, byte4, byte5, 0, 0 ]
def __repr__(self):
return "<u6.DSP( PLine = %s, Resolution = %s, Gain = %s, SettlingFactor = %s, Differential = %s, DSPAnalysis = %s, AcquireNewData = %s )>" % (self.pline, self.resolution, self.gain, self.settlingFactor, self.differential, self.dspAnalysis, self.acquireNewData)
readLen = 4
def handle(self, input):
inStr = ''.join([chr(x) for x in input])
return struct.unpack('<I', inStr )[0] | PypiClean |
/Docassemble-Pattern-3.6.7.tar.gz/Docassemble-Pattern-3.6.7/docassemble_pattern/vector/stemmer.py |
####################################################################################################
# The Porter2 stemming algorithm (or "Porter stemmer") is a process for removing the commoner
# morphological and inflexional endings from words in English.
# Its main use is as part of a term normalisation process that is usually done
# when setting up Information Retrieval systems.
# Reference:
# C.J. van Rijsbergen, S.E. Robertson and M.F. Porter, 1980.
# "New models in probabilistic information retrieval."
# London: British Library. (British Library Research and Development Report, no. 5587).
#
# http://tartarus.org/~martin/PorterStemmer/
# Comments throughout the source code were taken from:
# http://snowball.tartarus.org/algorithms/english/stemmer.html
from __future__ import unicode_literals
from __future__ import division
import re
from builtins import str, bytes, dict, int
from builtins import object, range
#---------------------------------------------------------------------------------------------------
# Note: this module is optimized for performance.
# There is little gain in using more regular expressions.
VOWELS = ["a", "e", "i", "o", "u", "y"]
DOUBLE = ["bb", "dd", "ff", "gg", "mm", "nn", "pp", "rr", "tt"]
VALID_LI = ["b", "c", "d", "e", "g", "h", "k", "m", "n", "r", "t"]
def is_vowel(s):
return s in VOWELS
def is_consonant(s):
return s not in VOWELS
def is_double_consonant(s):
return s in DOUBLE
def is_short_syllable(w, before=None):
""" A short syllable in a word is either:
- a vowel followed by a non-vowel other than w, x or Y and preceded by a non-vowel
- a vowel at the beginning of the word followed by a non-vowel.
Checks the three characters before the given index in the word (or entire word if None).
"""
if before is not None:
i = before < 0 and len(w) + before or before
return is_short_syllable(w[max(0, i - 3):i])
if len(w) == 3 and is_consonant(w[0]) and is_vowel(w[1]) and is_consonant(w[2]) and w[2] not in "wxY":
return True
if len(w) == 2 and is_vowel(w[0]) and is_consonant(w[1]):
return True
return False
def is_short(w):
""" A word is called short if it consists of a short syllable preceded by zero or more consonants.
"""
return is_short_syllable(w[-3:]) and len([ch for ch in w[:-3] if ch in VOWELS]) == 0
# A point made at least twice in the literature is that words beginning with gener-
# are overstemmed by the Porter stemmer:
# generate => gener, generically => gener
# Moving the region one vowel-consonant pair to the right fixes this:
# generate => generat, generically => generic
overstemmed = ("gener", "commun", "arsen")
RE_R1 = re.compile(r"[aeiouy][^aeiouy]")
def R1(w):
""" R1 is the region after the first non-vowel following a vowel,
or the end of the word if there is no such non-vowel.
"""
m = RE_R1.search(w)
if m:
return w[m.end():]
return ""
def R2(w):
""" R2 is the region after the first non-vowel following a vowel in R1,
or the end of the word if there is no such non-vowel.
"""
if w.startswith(tuple(overstemmed)):
return R1(R1(R1(w)))
return R1(R1(w))
def find_vowel(w):
""" Returns the index of the first vowel in the word.
When no vowel is found, returns len(word).
"""
for i, ch in enumerate(w):
if ch in VOWELS:
return i
return len(w)
def has_vowel(w):
""" Returns True if there is a vowel in the given string.
"""
for ch in w:
if ch in VOWELS:
return True
return False
def vowel_consonant_pairs(w, max=None):
""" Returns the number of consecutive vowel-consonant pairs in the word.
"""
m = 0
for i, ch in enumerate(w):
if is_vowel(ch) and i < len(w) - 1 and is_consonant(w[i + 1]):
m += 1
# An optimisation to stop searching once we reach the amount of <vc> pairs we need.
if m == max:
break
return m
#--- REPLACEMENT RULES -----------------------------------------------------------------------------
def step_1a(w):
""" Step 1a handles -s suffixes.
"""
if w.endswith("s"):
if w.endswith("sses"):
return w[:-2]
if w.endswith("ies"):
# Replace by -ie if preceded by just one letter,
# otherwise by -i (so ties => tie, cries => cri).
return len(w) == 4 and w[:-1] or w[:-2]
if w.endswith(("us", "ss")):
return w
if find_vowel(w) < len(w) - 2:
# Delete -s if the preceding part contains a vowel not immediately before the -s
# (so gas and this retain the -s, gaps and kiwis lose it).
return w[:-1]
return w
def step_1b(w):
""" Step 1b handles -ed and -ing suffixes (or -edly and -ingly).
Removes double consonants at the end of the stem and adds -e to some words.
"""
if w.endswith("y") and w.endswith(("edly", "ingly")):
w = w[:-2] # Strip -ly for next step.
if w.endswith(("ed", "ing")):
if w.endswith("ied"):
# See -ies in step 1a.
return len(w) == 4 and w[:-1] or w[:-2]
if w.endswith("eed"):
# Replace by -ee if preceded by at least one vowel-consonant pair.
return R1(w).endswith("eed") and w[:-1] or w
for suffix in ("ed", "ing"):
# Delete if the preceding word part contains a vowel.
# - If the word ends -at, -bl or -iz add -e (luxuriat => luxuriate).
# - If the word ends with a double remove the last letter (hopp => hop).
# - If the word is short, add e (hop => hope).
if w.endswith(suffix) and has_vowel(w[:-len(suffix)]):
w = w[:-len(suffix)]
if w.endswith(("at", "bl", "iz")):
return w + "e"
if is_double_consonant(w[-2:]):
return w[:-1]
if is_short(w):
return w + "e"
return w
def step_1c(w):
""" Step 1c replaces suffix -y or -Y by -i if preceded by a non-vowel
which is not the first letter of the word (cry => cri, by => by, say => say).
"""
if len(w) > 2 and w.endswith(("y", "Y")) and is_consonant(w[-2]):
return w[:-1] + "i"
return w
suffixes2 = [
("al", (("ational", "ate"), ("tional", "tion"))),
("ci", (("enci", "ence"), ("anci", "ance"))),
("er", (("izer", "ize"),)),
("li", (("bli", "ble"), ("alli", "al"), ("entli", "ent"), ("eli", "e"), ("ousli", "ous"))),
("on", (("ization", "ize"), ("isation", "ize"), ("ation", "ate"))),
("or", (("ator", "ate"),)),
("ss", (("iveness", "ive"), ("fulness", "ful"), ("ousness", "ous"))),
("sm", (("alism", "al"),)),
("ti", (("aliti", "al"), ("iviti", "ive"), ("biliti", "ble"))),
("gi", (("logi", "log"),))
]
def step_2(w):
""" Step 2 replaces double suffixes (singularization => singularize).
This only happens if there is at least one vowel-consonant pair before the suffix.
"""
for suffix, rules in suffixes2:
if w.endswith(suffix):
for A, B in rules:
if w.endswith(A):
return R1(w).endswith(A) and w[:-len(A)] + B or w
if w.endswith("li") and R1(w)[-3:-2] in VALID_LI:
# Delete -li if preceded by a valid li-ending.
return w[:-2]
return w
suffixes3 = [
("e", (("icate", "ic"), ("ative", ""), ("alize", "al"))),
("i", (("iciti", "ic"),)),
("l", (("ical", "ic"), ("ful", ""))),
("s", (("ness", ""),))
]
def step_3(w):
""" Step 3 replaces -ic, -ful, -ness etc. suffixes.
This only happens if there is at least one vowel-consonant pair before the suffix.
"""
for suffix, rules in suffixes3:
if w.endswith(suffix):
for A, B in rules:
if w.endswith(A):
return R1(w).endswith(A) and w[:-len(A)] + B or w
return w
suffixes4 = [
("al", ("al",)),
("ce", ("ance", "ence")),
("er", ("er",)),
("ic", ("ic",)),
("le", ("able", "ible")),
("nt", ("ant", "ement", "ment", "ent")),
("e", ("ate", "ive", "ize")),
(("m", "i", "s"), ("ism", "iti", "ous"))
]
def step_4(w):
""" Step 4 strips -ant, -ent etc. suffixes.
This only happens if there is more than one vowel-consonant pair before the suffix.
"""
for suffix, rules in suffixes4:
if w.endswith(suffix):
for A in rules:
if w.endswith(A):
return R2(w).endswith(A) and w[:-len(A)] or w
if R2(w).endswith("ion") and w[:-3].endswith(("s", "t")):
# Delete -ion if preceded by s or t.
return w[:-3]
return w
def step_5a(w):
""" Step 5a strips suffix -e if preceded by multiple vowel-consonant pairs,
or one vowel-consonant pair that is not a short syllable.
"""
if w.endswith("e"):
if R2(w).endswith("e") or R1(w).endswith("e") and not is_short_syllable(w, before=-1):
return w[:-1]
return w
def step_5b(w):
""" Step 5b strips suffix -l if preceded by l and multiple vowel-consonant pairs,
bell => bell, rebell => rebel.
"""
if w.endswith("ll") and R2(w).endswith("l"):
return w[:-1]
return w
#--- EXCEPTIONS ------------------------------------------------------------------------------------
# Exceptions:
# - in, out and can stems could be seen as stop words later on.
# - Special -ly cases.
exceptions = {
"skis": "ski",
"skies": "sky",
"dying": "die",
"lying": "lie",
"tying": "tie",
"innings": "inning",
"outings": "outing",
"cannings": "canning",
"idly": "idl",
"gently": "gentl",
"ugly": "ugli",
"early": "earli",
"only": "onli",
"singly": "singl"
}
# Words that are never stemmed:
uninflected = dict.fromkeys([
"sky",
"news",
"howe",
"inning", "outing", "canning",
"proceed", "exceed", "succeed",
"atlas", "cosmos", "bias", "andes" # not plural forms
], True)
#--- STEMMER ---------------------------------------------------------------------------------------
def case_sensitive(stem, word):
""" Applies the letter case of the word to the stem:
Ponies => Poni
"""
ch = []
for i in range(len(stem)):
if word[i] == word[i].upper():
ch.append(stem[i].upper())
else:
ch.append(stem[i])
return "".join(ch)
def upper_consonant_y(w):
""" Sets the initial y, or y after a vowel, to Y.
Of course, y is interpreted as a vowel and Y as a consonant.
"""
a = []
p = None
for ch in w:
if ch == "y" and (p is None or p in VOWELS):
a.append("Y")
else:
a.append(ch)
p = ch
return "".join(a)
# If we stemmed a word once, we can cache the result and reuse it.
# By default, keep a history of a 10000 entries (<500KB).
cache = {}
def stem(word, cached=True, history=10000, **kwargs):
""" Returns the stem of the given word: ponies => poni.
Note: it is often taken to be a crude error
that a stemming algorithm does not leave a real word after removing the stem.
But the purpose of stemming is to bring variant forms of a word together,
not to map a word onto its "paradigm" form.
"""
stem = word.lower()
if cached and stem in cache:
return case_sensitive(cache[stem], word)
if cached and len(cache) > history: # Empty cache every now and then.
cache.clear()
if len(stem) <= 2:
# If the word has two letters or less, leave it as it is.
return case_sensitive(stem, word)
if stem in exceptions:
return case_sensitive(exceptions[stem], word)
if stem in uninflected:
return case_sensitive(stem, word)
# Mark y treated as a consonant as Y.
stem = upper_consonant_y(stem)
for f in (step_1a, step_1b, step_1c, step_2, step_3, step_4, step_5a, step_5b):
stem = f(stem)
# Turn any remaining Y letters in the stem back into lower case.
# Apply the case of the original word to the stem.
stem = stem.lower()
stem = case_sensitive(stem, word)
if cached:
cache[word.lower()] = stem.lower()
return stem | PypiClean |
/IsPycharmRun-1.0.tar.gz/IsPycharmRun-1.0/airtest/core/android/ime.py | import re
from airtest.core.android.yosemite import Yosemite
from airtest.core.error import AdbError
from .constant import YOSEMITE_IME_SERVICE
from six import text_type
def ensure_unicode(value):
"""
Decode UTF-8 values
Args:
value: value to be decoded
Returns:
decoded valued
"""
if type(value) is not text_type:
try:
value = value.decode('utf-8')
except UnicodeDecodeError:
value = value.decode('gbk')
return value
class CustomIme(object):
"""
Input Methods Class Object
"""
def __init__(self, adb, apk_path, service_name):
super(CustomIme, self).__init__()
self.adb = adb
self.apk_path = apk_path
self.service_name = service_name
self.started = False
def _get_ime_list(self):
"""
Return all the input methods on the device
Returns:
list of all input methods on the device
"""
out = self.adb.shell("ime list -a")
m = re.findall("mId=(.*?/.*?) ", out)
return m
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.end()
def start(self):
"""
Enable input method
Returns:
None
"""
try:
self.default_ime = self.adb.shell("settings get secure default_input_method").strip()
except AdbError:
# settings cmd not found for older phones, e.g. Xiaomi 2A
# /system/bin/sh: settings: not found
self.default_ime = None
self.ime_list = self._get_ime_list()
if self.service_name not in self.ime_list:
if self.apk_path:
self.device.install_app(self.apk_path)
if self.default_ime != self.service_name:
self.adb.shell("ime enable %s" % self.service_name)
self.adb.shell("ime set %s" % self.service_name)
self.started = True
def end(self):
"""
Disable input method
Returns:
None
"""
if self.default_ime and self.default_ime != self.service_name:
self.adb.shell("ime disable %s" % self.service_name)
self.adb.shell("ime set %s" % self.default_ime)
self.started = False
def text(self, value):
raise NotImplementedError
class YosemiteIme(CustomIme):
"""
Yosemite Input Method Class Object
"""
def __init__(self, adb):
super(YosemiteIme, self).__init__(adb, None, YOSEMITE_IME_SERVICE)
self.yosemite = Yosemite(adb)
def start(self):
self.yosemite.get_ready()
super(YosemiteIme, self).start()
def text(self, value):
"""
Input text with Yosemite input method
Args:
value: text to be inputted
Returns:
output form `adb shell` command
"""
if not self.started:
self.start()
# 更多的输入用法请见 https://github.com/macacajs/android-unicode#use-in-adb-shell
value = ensure_unicode(value)
self.adb.shell(u"am broadcast -a ADB_INPUT_TEXT --es msg '{}'".format(value))
def code(self, code):
"""
Sending editor action
Args:
code: editor action code, e.g., 2 = IME_ACTION_GO, 3 = IME_ACTION_SEARCH
Editor Action Code Ref: http://developer.android.com/reference/android/view/inputmethod/EditorInfo.html
Returns:
output form `adb shell` command
"""
if not self.started:
self.start()
self.adb.shell("am broadcast -a ADB_EDITOR_CODE --ei code {}".format(str(code))) | PypiClean |
/NIRCAM_Gsim-1.60.tar.gz/NIRCAM_Gsim-1.60/NIRCAM_Gsim/polyclip/polyclip.py | import os
import sys
from glob import glob
import numpy.ctypeslib as npct
import numpy as np
import ctypes
from ctypes import c_int
#print __file__
this_path = os.path.split(__file__)[0]
#print(this_path)
so_file = glob(os.path.join(this_path,'polyclip_c*.so'))
#print(so_file)
if len(so_file) >= 1:
so_file = so_file[0]
else:
print("WARNING: Cannot find polyclip_c*.so library")
sys.exit()
polyclip = ctypes.cdll.LoadLibrary(so_file)
#polyclip = ctypes.cdll.LoadLibrary(os.path.join(this_path,"polyclip_c.so"))
array_1d_int_l = npct.ndpointer(dtype=np.int32, ndim=1, flags='CONTIGUOUS')
array_1d_int_r = npct.ndpointer(dtype=np.int32, ndim=1, flags='CONTIGUOUS')
array_1d_int_b = npct.ndpointer(dtype=np.int32, ndim=1, flags='CONTIGUOUS')
array_1d_int_t = npct.ndpointer(dtype=np.int32, ndim=1, flags='CONTIGUOUS')
array_1d_double_px = npct.ndpointer(dtype=np.float32, ndim=1, flags='CONTIGUOUS')
array_1d_double_py = npct.ndpointer(dtype=np.float32, ndim=1, flags='CONTIGUOUS')
array_1d_double_px_out = npct.ndpointer(dtype=np.float32, ndim=1, flags='CONTIGUOUS')
array_1d_double_py_out = npct.ndpointer(dtype=np.float32, ndim=1, flags='CONTIGUOUS')
array_1d_double_ri_out = npct.ndpointer(dtype=np.float32, ndim=1, flags='CONTIGUOUS')
array_1d_double_areas = npct.ndpointer(dtype=np.float32, ndim=1, flags='CONTIGUOUS')
array_1d_double_nclip_poly = npct.ndpointer(dtype=np.int32, ndim=1, flags='CONTIGUOUS')
array_1d_int_poly_inds = npct.ndpointer(dtype=np.int32, ndim=1, flags='CONTIGUOUS')
array_1d_double_inds = npct.ndpointer(dtype=np.int32, ndim=2, flags='CONTIGUOUS')
array_1d_double_x = npct.ndpointer(dtype=np.int32, ndim=1, flags='CONTIGUOUS')
array_1d_double_y = npct.ndpointer(dtype=np.int32, ndim=1, flags='CONTIGUOUS')
array_1d_double_index = npct.ndpointer(dtype=np.int32, ndim=1, flags='CONTIGUOUS')
polyclip.polyclip_multi2.restype = None
polyclip.polyclip_multi4.argtypes = [array_1d_int_l, # l
array_1d_int_r, # r
array_1d_int_b, # b
array_1d_int_t, # t
array_1d_double_px, # px
array_1d_double_py, # py
c_int, # n_poly
array_1d_int_poly_inds, # poly_inds
array_1d_double_x, # x
array_1d_double_y, # y
array_1d_double_nclip_poly, # nclip_poly
array_1d_double_areas, # areas
array_1d_double_index # output index
] | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/geo/charting/widget/Legend.js.uncompressed.js |
define("dojox/geo/charting/widget/Legend", ["dojo/_base/kernel", "dojo/_base/lang","dojo/_base/array", "dojo/_base/declare","dojo/_base/html","dojo/dom",
"dojo/dom-construct","dojo/dom-class", "dojo/_base/window", "dijit/_Widget"],
function(dojo, lang, arr, declare, html,dom,domConstruct,domClass, win, Widget) {
return declare("dojox.geo.charting.widget.Legend",Widget, {
// summary:
// A legend widget displaying association between colors and Feature value ranges.
//
// description:
// This widget basically is a table comprising (icon,string) pairs, describing the color scheme
// used for the map and its associated text descriptions.
//
// example:
// | var legend = new dojox.geo.charting.widget.Legend({
// | map: map
// | });
horizontal:true,
legendBody:null,
swatchSize:18,
map:null,
postCreate: function(){
// summary:
// inherited Dijit's postCreate function
// tags:
// protected
if(!this.map){return;}
this.series = this.map.series;
if (!this.domNode.parentNode) {
// compatibility with older version : add to map domNode if not already attached to a parentNode.
dom.byId(this.map.container).appendChild(this.domNode);
}
this.refresh();
},
buildRendering: function(){
// summary:
// Construct the UI for this widget, creates the underlying real dojox.geo.charting.Map object.
// tags:
// protected
this.domNode = domConstruct.create("table",
{role: "group", "class": "dojoxLegendNode"});
this.legendBody = domConstruct.create("tbody", null, this.domNode);
this.inherited(arguments);
},
refresh:function(){
// summary:
// Refreshes this legend contents when Map series has changed.
// cleanup
while(this.legendBody.lastChild){
domConstruct.destroy(this.legendBody.lastChild);
}
if(this.horizontal){
domClass.add(this.domNode,"dojoxLegendHorizontal");
this._tr = win.doc.createElement("tr");
this.legendBody.appendChild(this._tr);
}
var s = this.series;
if(s.length == 0){return;}
arr.forEach(s,function(x){
this._addLabel(x.color, x.name);
},this);
},
_addLabel:function(color,label){
var icon = win.doc.createElement("td");
var text = win.doc.createElement("td");
var div = win.doc.createElement("div");
domClass.add(icon, "dojoxLegendIcon");
domClass.add(text, "dojoxLegendText");
div.style.width = this.swatchSize + "px";
div.style.height = this.swatchSize + "px";
icon.appendChild(div);
if(this.horizontal){
this._tr.appendChild(icon);
this._tr.appendChild(text);
}else{
var tr = win.doc.createElement("tr");
this.legendBody.appendChild(tr);
tr.appendChild(icon);
tr.appendChild(text);
}
div.style.background = color;
text.innerHTML = String(label);
}
});
}); | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/prism/components/prism-perl.min.js | Prism.languages.perl={comment:[{pattern:/(^\s*)=\w+[\s\S]*?=cut.*/m,lookbehind:!0},{pattern:/(^|[^\\$])#.*/,lookbehind:!0}],string:[{pattern:/\b(?:q|qq|qx|qw)\s*([^a-zA-Z0-9\s{(\[<])(?:(?!\1)[^\\]|\\[\s\S])*\1/,greedy:!0},{pattern:/\b(?:q|qq|qx|qw)\s+([a-zA-Z0-9])(?:(?!\1)[^\\]|\\[\s\S])*\1/,greedy:!0},{pattern:/\b(?:q|qq|qx|qw)\s*\((?:[^()\\]|\\[\s\S])*\)/,greedy:!0},{pattern:/\b(?:q|qq|qx|qw)\s*\{(?:[^{}\\]|\\[\s\S])*\}/,greedy:!0},{pattern:/\b(?:q|qq|qx|qw)\s*\[(?:[^[\]\\]|\\[\s\S])*\]/,greedy:!0},{pattern:/\b(?:q|qq|qx|qw)\s*<(?:[^<>\\]|\\[\s\S])*>/,greedy:!0},{pattern:/("|`)(?:(?!\1)[^\\]|\\[\s\S])*\1/,greedy:!0},{pattern:/'(?:[^'\\\r\n]|\\.)*'/,greedy:!0}],regex:[{pattern:/\b(?:m|qr)\s*([^a-zA-Z0-9\s{(\[<])(?:(?!\1)[^\\]|\\[\s\S])*\1[msixpodualngc]*/,greedy:!0},{pattern:/\b(?:m|qr)\s+([a-zA-Z0-9])(?:(?!\1)[^\\]|\\[\s\S])*\1[msixpodualngc]*/,greedy:!0},{pattern:/\b(?:m|qr)\s*\((?:[^()\\]|\\[\s\S])*\)[msixpodualngc]*/,greedy:!0},{pattern:/\b(?:m|qr)\s*\{(?:[^{}\\]|\\[\s\S])*\}[msixpodualngc]*/,greedy:!0},{pattern:/\b(?:m|qr)\s*\[(?:[^[\]\\]|\\[\s\S])*\][msixpodualngc]*/,greedy:!0},{pattern:/\b(?:m|qr)\s*<(?:[^<>\\]|\\[\s\S])*>[msixpodualngc]*/,greedy:!0},{pattern:/(^|[^-]\b)(?:s|tr|y)\s*([^a-zA-Z0-9\s{(\[<])(?:(?!\2)[^\\]|\\[\s\S])*\2(?:(?!\2)[^\\]|\\[\s\S])*\2[msixpodualngcer]*/,lookbehind:!0,greedy:!0},{pattern:/(^|[^-]\b)(?:s|tr|y)\s+([a-zA-Z0-9])(?:(?!\2)[^\\]|\\[\s\S])*\2(?:(?!\2)[^\\]|\\[\s\S])*\2[msixpodualngcer]*/,lookbehind:!0,greedy:!0},{pattern:/(^|[^-]\b)(?:s|tr|y)\s*\((?:[^()\\]|\\[\s\S])*\)\s*\((?:[^()\\]|\\[\s\S])*\)[msixpodualngcer]*/,lookbehind:!0,greedy:!0},{pattern:/(^|[^-]\b)(?:s|tr|y)\s*\{(?:[^{}\\]|\\[\s\S])*\}\s*\{(?:[^{}\\]|\\[\s\S])*\}[msixpodualngcer]*/,lookbehind:!0,greedy:!0},{pattern:/(^|[^-]\b)(?:s|tr|y)\s*\[(?:[^[\]\\]|\\[\s\S])*\]\s*\[(?:[^[\]\\]|\\[\s\S])*\][msixpodualngcer]*/,lookbehind:!0,greedy:!0},{pattern:/(^|[^-]\b)(?:s|tr|y)\s*<(?:[^<>\\]|\\[\s\S])*>\s*<(?:[^<>\\]|\\[\s\S])*>[msixpodualngcer]*/,lookbehind:!0,greedy:!0},{pattern:/\/(?:[^\/\\\r\n]|\\.)*\/[msixpodualngc]*(?=\s*(?:$|[\r\n,.;})&|\-+*~<>!?^]|(lt|gt|le|ge|eq|ne|cmp|not|and|or|xor|x)\b))/,greedy:!0}],variable:[/[&*$@%]\{\^[A-Z]+\}/,/[&*$@%]\^[A-Z_]/,/[&*$@%]#?(?=\{)/,/[&*$@%]#?(?:(?:::)*'?(?!\d)[\w$]+)+(?:::)*/i,/[&*$@%]\d+/,/(?!%=)[$@%][!"#$%&'()*+,\-.\/:;<=>?@[\\\]^_`{|}~]/],filehandle:{pattern:/<(?![<=])\S*>|\b_\b/,alias:"symbol"},vstring:{pattern:/v\d+(?:\.\d+)*|\d+(?:\.\d+){2,}/,alias:"string"},"function":{pattern:/sub [a-z0-9_]+/i,inside:{keyword:/sub/}},keyword:/\b(?:any|break|continue|default|delete|die|do|else|elsif|eval|for|foreach|given|goto|if|last|local|my|next|our|package|print|redo|require|say|state|sub|switch|undef|unless|until|use|when|while)\b/,number:/\b-?(?:0x[\dA-Fa-f](?:_?[\dA-Fa-f])*|0b[01](?:_?[01])*|(?:\d(?:_?\d)*)?\.?\d(?:_?\d)*(?:[Ee][+-]?\d+)?)\b/,operator:/-[rwxoRWXOezsfdlpSbctugkTBMAC]\b|\+[+=]?|-[-=>]?|\*\*?=?|\/\/?=?|=[=~>]?|~[~=]?|\|\|?=?|&&?=?|<(?:=>?|<=?)?|>>?=?|![~=]?|[%^]=?|\.(?:=|\.\.?)?|[\\?]|\bx(?:=|\b)|\b(?:lt|gt|le|ge|eq|ne|cmp|not|and|or|xor)\b/,punctuation:/[{}[\];(),:]/}; | PypiClean |
/Django-Pizza-16.10.1.tar.gz/Django-Pizza-16.10.1/pizza/kitchen_sink/static/ks/ckeditor/lang/bg.js | /*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.lang['bg']={"editor":"Текстов редактор за форматиран текст","editorPanel":"Rich Text Editor panel","common":{"editorHelp":"натиснете ALT 0 за помощ","browseServer":"Избор от сървъра","url":"URL","protocol":"Протокол","upload":"Качване","uploadSubmit":"Изпращане към сървъра","image":"Снимка","flash":"Флаш","form":"Форма","checkbox":"Поле за избор","radio":"Радио бутон","textField":"Текстово поле","textarea":"Текстова зона","hiddenField":"Скрито поле","button":"Бутон","select":"Поле за избор","imageButton":"Бутон за снимка","notSet":"<не е избрано>","id":"ID","name":"Име","langDir":"Посока на езика","langDirLtr":"Ляво на дясно (ЛнД)","langDirRtl":"Дясно на ляво (ДнЛ)","langCode":"Код на езика","longDescr":"Уеб адрес за дълго описание","cssClass":"Класове за CSS","advisoryTitle":"Препоръчително заглавие","cssStyle":"Стил","ok":"ОК","cancel":"Отказ","close":"Затвори","preview":"Преглед","resize":"Влачете за да оразмерите","generalTab":"Общи","advancedTab":"Разширено","validateNumberFailed":"Тази стойност не е число","confirmNewPage":"Всички незапазени промени ще бъдат изгубени. Сигурни ли сте, че желаете да заредите нова страница?","confirmCancel":"Някои от опциите са променени. Сигурни ли сте, че желаете да затворите прозореца?","options":"Опции","target":"Цел","targetNew":"Нов прозорец (_blank)","targetTop":"Горна позиция (_top)","targetSelf":"Текущия прозорец (_self)","targetParent":"Основен прозорец (_parent)","langDirLTR":"Ляво на дясно (ЛнД)","langDirRTL":"Дясно на ляво (ДнЛ)","styles":"Стил","cssClasses":"Класове за CSS","width":"Ширина","height":"Височина","align":"Подравняване","alignLeft":"Ляво","alignRight":"Дясно","alignCenter":"Център","alignTop":"Горе","alignMiddle":"По средата","alignBottom":"Долу","invalidValue":"Невалидна стойност.","invalidHeight":"Височината трябва да е число.","invalidWidth":"Ширина требе да е число.","invalidCssLength":"Стойността на полето \"%1\" трябва да бъде положително число с или без валидна CSS измервателна единица (px, %, in, cm, mm, em, ex, pt, или pc).","invalidHtmlLength":"Стойността на полето \"%1\" трябва да бъде положително число с или без валидна HTML измервателна единица (px или %).","invalidInlineStyle":"Стойността на стилa трябва да съдържат една или повече двойки във формат \"name : value\", разделени с двоеточие.","cssLengthTooltip":"Въведете числена стойност в пиксели или друга валидна CSS единица (px, %, in, cm, mm, em, ex, pt, или pc).","unavailable":"%1<span class=\"cke_accessibility\">, недостъпно</span>"},"about":{"copy":"Copyright © $1. All rights reserved.","dlgTitle":"Относно CKEditor","help":"Проверете $1 за помощ.","moreInfo":"За лицензионна информация моля посетете сайта ни:","title":"Относно CKEditor","userGuide":"CKEditor User's Guide"},"basicstyles":{"bold":"Удебелен","italic":"Наклонен","strike":"Зачертан текст","subscript":"Индексиран текст","superscript":"Суперскрипт","underline":"Подчертан"},"bidi":{"ltr":"Посока на текста от ляво на дясно","rtl":"Посока на текста от дясно на ляво"},"blockquote":{"toolbar":"Блок за цитат"},"clipboard":{"copy":"Копирай","copyError":"Настройките за сигурност на вашия бразуър не разрешават на редактора да изпълни запаметяването. За целта използвайте клавиатурата (Ctrl/Cmd+C).","cut":"Отрежи","cutError":"Настройките за сигурност на Вашия браузър не позволяват на редактора автоматично да изъплни действията за отрязване. Моля ползвайте клавиатурните команди за целта (ctrl+x).","paste":"Вмъкни","pasteArea":"Зона за вмъкване","pasteMsg":"Вмъкнете тук съдъжанието с клавиатуарата (<STRONG>Ctrl/Cmd+V</STRONG>) и натиснете <STRONG>OK</STRONG>.","securityMsg":"Заради настройките за сигурност на Вашия браузър, редакторът не може да прочете данните от клипборда коректно.","title":"Вмъкни"},"colorbutton":{"auto":"Автоматично","bgColorTitle":"Фонов цвят","colors":{"000":"Черно","800000":"Кестеняво","8B4513":"Светлокафяво","2F4F4F":"Dark Slate Gray","008080":"Teal","000080":"Navy","4B0082":"Индиго","696969":"Тъмно сиво","B22222":"Огнено червено","A52A2A":"Кафяво","DAA520":"Златисто","006400":"Тъмно зелено","40E0D0":"Тюркуазено","0000CD":"Средно синьо","800080":"Пурпурно","808080":"Сиво","F00":"Червено","FF8C00":"Тъмно оранжево","FFD700":"Златно","008000":"Зелено","0FF":"Светло синьо","00F":"Blue","EE82EE":"Violet","A9A9A9":"Dim Gray","FFA07A":"Light Salmon","FFA500":"Orange","FFFF00":"Yellow","00FF00":"Lime","AFEEEE":"Pale Turquoise","ADD8E6":"Light Blue","DDA0DD":"Plum","D3D3D3":"Light Grey","FFF0F5":"Lavender Blush","FAEBD7":"Antique White","FFFFE0":"Light Yellow","F0FFF0":"Honeydew","F0FFFF":"Azure","F0F8FF":"Alice Blue","E6E6FA":"Lavender","FFF":"White"},"more":"Още цветове","panelTitle":"Цветове","textColorTitle":"Цвят на шрифт"},"colordialog":{"clear":"Изчистване","highlight":"Осветяване","options":"Цветови опции","selected":"Изберете цвят","title":"Изберете цвят"},"templates":{"button":"Шаблони","emptyListMsg":"(Няма дефинирани шаблони)","insertOption":"Препокрива актуалното съдържание","options":"Опции за шаблона","selectPromptMsg":"Изберете шаблон <br>(текущото съдържание на редактора ще бъде загубено):","title":"Шаблони"},"contextmenu":{"options":"Опции на контекстното меню"},"div":{"IdInputLabel":"ID","advisoryTitleInputLabel":"Препоръчително заглавие","cssClassInputLabel":"Класове за CSS","edit":"Промяна на Div","inlineStyleInputLabel":"Inline Style","langDirLTRLabel":"Ляво на Дясно (ЛнД)","langDirLabel":"Посока на езика","langDirRTLLabel":"Дясно на Ляво (ДнЛ)","languageCodeInputLabel":" Код на езика","remove":"Премахване на Div","styleSelectLabel":"Стил","title":"Create Div Container","toolbar":"Create Div Container"},"toolbar":{"toolbarCollapse":"Свиване на лентата с инструменти","toolbarExpand":"Разширяване на лентата с инструменти","toolbarGroups":{"document":"Документ","clipboard":"Clipboard/Undo","editing":"Промяна","forms":"Форми","basicstyles":"Базови стилове","paragraph":"Параграф","links":"Връзки","insert":"Вмъкване","styles":"Стилове","colors":"Цветове","tools":"Инструменти"},"toolbars":"Ленти с инструменти"},"elementspath":{"eleLabel":"Път за елементите","eleTitle":"%1 елемент"},"find":{"find":"Търсене","findOptions":"Find Options","findWhat":"Търси за:","matchCase":"Съвпадение","matchCyclic":"Циклично съвпадение","matchWord":"Съвпадение с дума","notFoundMsg":"Указаният текст не е намерен.","replace":"Препокриване","replaceAll":"Препокрий всички","replaceSuccessMsg":"%1 occurrence(s) replaced.","replaceWith":"Препокрива с:","title":"Търсене и препокриване"},"fakeobjects":{"anchor":"Кука","flash":"Флаш анимация","hiddenfield":"Скрито поле","iframe":"IFrame","unknown":"Неизвестен обект"},"flash":{"access":"Достъп до скрипт","accessAlways":"Винаги","accessNever":"Никога","accessSameDomain":"Същият домейн","alignAbsBottom":"Най-долу","alignAbsMiddle":"Точно по средата","alignBaseline":"Базова линия","alignTextTop":"Върху текста","bgcolor":"Цвят на фона","chkFull":"Включи на цял екран","chkLoop":"Цикъл","chkMenu":"Разрешено Flash меню","chkPlay":"Авто. пускане","flashvars":"Променливи за Флаш","hSpace":"Хоризонтален отстъп","properties":"Настройки за флаш","propertiesTab":"Настройки","quality":"Качество","qualityAutoHigh":"Авто. високо","qualityAutoLow":"Авто. ниско","qualityBest":"Отлично","qualityHigh":"Високо","qualityLow":"Ниско","qualityMedium":"Средно","scale":"Оразмеряване","scaleAll":"Показва всичко","scaleFit":"Според мястото","scaleNoBorder":"Без рамка","title":"Настройки за флаш","vSpace":"Вертикален отстъп","validateHSpace":"HSpace трябва да е число.","validateSrc":"Уеб адреса не трябва да е празен.","validateVSpace":"VSpace трябва да е число.","windowMode":"Режим на прозореца","windowModeOpaque":"Плътност","windowModeTransparent":"Прозрачност","windowModeWindow":"Прозорец"},"font":{"fontSize":{"label":"Размер","voiceLabel":"Размер на шрифт","panelTitle":"Размер на шрифт"},"label":"Шрифт","panelTitle":"Име на шрифт","voiceLabel":"Шрифт"},"forms":{"button":{"title":"Настройки на бутона","text":"Текст (стойност)","type":"Тип","typeBtn":"Бутон","typeSbm":"Добави","typeRst":"Нулиране"},"checkboxAndRadio":{"checkboxTitle":"Checkbox Properties","radioTitle":"Настройки на радиобутон","value":"Стойност","selected":"Избрано"},"form":{"title":"Настройки на формата","menu":"Настройки на формата","action":"Действие","method":"Метод","encoding":"Кодиране"},"hidden":{"title":"Настройки за скрито поле","name":"Име","value":"Стойност"},"select":{"title":"Selection Field Properties","selectInfo":"Select Info","opAvail":"Налични опции","value":"Стойност","size":"Размер","lines":"линии","chkMulti":"Allow multiple selections","opText":"Текст","opValue":"Стойност","btnAdd":"Добави","btnModify":"Промени","btnUp":"На горе","btnDown":"На долу","btnSetValue":"Set as selected value","btnDelete":"Изтриване"},"textarea":{"title":"Опции за текстовата зона","cols":"Колони","rows":"Редове"},"textfield":{"title":"Настройки за текстово поле","name":"Име","value":"Стойност","charWidth":"Ширина на знаците","maxChars":"Макс. знаци","type":"Тип","typeText":"Текст","typePass":"Парола","typeEmail":"Email","typeSearch":"Търсене","typeTel":"Телефонен номер","typeUrl":"Уеб адрес"}},"format":{"label":"Формат","panelTitle":"Формат","tag_address":"Адрес","tag_div":"Параграф (DIV)","tag_h1":"Заглавие 1","tag_h2":"Заглавие 2","tag_h3":"Заглавие 3","tag_h4":"Заглавие 4","tag_h5":"Заглавие 5","tag_h6":"Заглавие 6","tag_p":"Нормален","tag_pre":"Форматиран"},"horizontalrule":{"toolbar":"Вмъкване на хоризонтална линия"},"iframe":{"border":"Показва рамка на карето","noUrl":"Моля въведете URL за iFrame","scrolling":"Вкл. скролбаровете","title":"IFrame настройки","toolbar":"IFrame"},"image":{"alertUrl":"Моля, въведете пълния път до изображението","alt":"Алтернативен текст","border":"Рамка","btnUpload":"Изпрати я на сървъра","button2Img":"Do you want to transform the selected image button on a simple image?","hSpace":"Хоризонтален отстъп","img2Button":"Do you want to transform the selected image on a image button?","infoTab":"Инфо за снимка","linkTab":"Връзка","lockRatio":"Заключване на съотношението","menu":"Настройки за снимка","resetSize":"Нулиране на размер","title":"Настройки за снимка","titleButton":"Настойки за бутон за снимка","upload":"Качване","urlMissing":"Image source URL is missing.","vSpace":"Вертикален отстъп","validateBorder":"Border must be a whole number.","validateHSpace":"HSpace must be a whole number.","validateVSpace":"VSpace must be a whole number."},"indent":{"indent":"Увеличаване на отстъпа","outdent":"Намаляване на отстъпа"},"smiley":{"options":"Опции за усмивката","title":"Вмъкване на усмивка","toolbar":"Усмивка"},"justify":{"block":"Двустранно подравняване","center":"Център","left":"Подравни в ляво","right":"Подравни в дясно"},"link":{"acccessKey":"Ключ за достъп","advanced":"Разширено","advisoryContentType":"Препоръчителен тип на съдържанието","advisoryTitle":"Препоръчително заглавие","anchor":{"toolbar":"Котва","menu":"Промяна на котва","title":"Настройки на котва","name":"Име на котва","errorName":"Моля въведете име на котвата","remove":"Премахване на котва"},"anchorId":"По ID на елемент","anchorName":"По име на котва","charset":"Тип на свързания ресурс","cssClasses":"Класове за CSS","emailAddress":"E-mail aдрес","emailBody":"Съдържание","emailSubject":"Тема","id":"ID","info":"Инфо за връзката","langCode":"Код за езика","langDir":"Посока на езика","langDirLTR":"Ляво на Дясно (ЛнД)","langDirRTL":"Дясно на Ляво (ДнЛ)","menu":"Промяна на връзка","name":"Име","noAnchors":"(Няма котви в текущия документ)","noEmail":"Моля въведете e-mail aдрес","noUrl":"Моля въведете URL адреса","other":"<друго>","popupDependent":"Зависимост (Netscape)","popupFeatures":"Функции на изкачащ прозорец","popupFullScreen":"Цял екран (IE)","popupLeft":"Лява позиция","popupLocationBar":"Лента с локацията","popupMenuBar":"Лента за меню","popupResizable":"Оразмеряем","popupScrollBars":"Скролери","popupStatusBar":"Статусна лента","popupToolbar":"Лента с инструменти","popupTop":"Горна позиция","rel":"Връзка","selectAnchor":"Изберете котва","styles":"Стил","tabIndex":"Ред на достъп","target":"Цел","targetFrame":"<frame>","targetFrameName":"Име на целевият прозорец","targetPopup":"<изкачащ прозорец>","targetPopupName":"Име на изкачащ прозорец","title":"Връзка","toAnchor":"Връзка към котва в текста","toEmail":"E-mail","toUrl":"Уеб адрес","toolbar":"Връзка","type":"Тип на връзката","unlink":"Премахни връзката","upload":"Качване"},"list":{"bulletedlist":"Вмъкване/Премахване на точков списък","numberedlist":"Вмъкване/Премахване на номериран списък"},"liststyle":{"armenian":"Арменско номериране","bulletedTitle":"Bulleted List Properties","circle":"Кръг","decimal":"Числа (1, 2, 3 и др.)","decimalLeadingZero":"Числа с водеща нула (01, 02, 03 и т.н.)","disc":"Диск","georgian":"Грузинско номериране (an, ban, gan, и т.н.)","lowerAlpha":"Малки букви (а, б, в, г, д и т.н.)","lowerGreek":"Малки гръцки букви (алфа, бета, гама и т.н.)","lowerRoman":"Малки римски числа (i, ii, iii, iv, v и т.н.)","none":"Няма","notset":"<не е указано>","numberedTitle":"Numbered List Properties","square":"Квадрат","start":"Старт","type":"Тип","upperAlpha":"Големи букви (А, Б, В, Г, Д и т.н.)","upperRoman":"Големи римски числа (I, II, III, IV, V и т.н.)","validateStartNumber":"List start number must be a whole number."},"magicline":{"title":"Вмъкнете параграф тук"},"maximize":{"maximize":"Максимизиране","minimize":"Минимизиране"},"newpage":{"toolbar":"Нова страница"},"pagebreak":{"alt":"Разделяне на страници","toolbar":"Вмъкване на нова страница при печат"},"pastetext":{"button":"Вмъкни като чист текст","title":"Вмъкни като чист текст"},"pastefromword":{"confirmCleanup":"The text you want to paste seems to be copied from Word. Do you want to clean it before pasting?","error":"It was not possible to clean up the pasted data due to an internal error","title":"Вмъкни от MS Word","toolbar":"Вмъкни от MS Word"},"preview":{"preview":"Преглед"},"print":{"toolbar":"Печат"},"removeformat":{"toolbar":"Премахване на форматирането"},"save":{"toolbar":"Запис"},"selectall":{"toolbar":"Избери всичко"},"showblocks":{"toolbar":"Показва блокове"},"sourcearea":{"toolbar":"Източник"},"specialchar":{"options":"Опции за специален знак","title":"Избор на специален знак","toolbar":"Вмъкване на специален знак"},"scayt":{"about":"About SCAYT","aboutTab":"Относно","addWord":"Add Word","allCaps":"Ignore All-Caps Words","dic_create":"Нов","dic_delete":"Изтриване","dic_field_name":"Име на речнк","dic_info":"Initially the User Dictionary is stored in a Cookie. However, Cookies are limited in size. When the User Dictionary grows to a point where it cannot be stored in a Cookie, then the dictionary may be stored on our server. To store your personal dictionary on our server you should specify a name for your dictionary. If you already have a stored dictionary, please type its name and click the Restore button.","dic_rename":"Преименуване","dic_restore":"Възтановяване","dictionariesTab":"Речници","disable":"Disable SCAYT","emptyDic":"Dictionary name should not be empty.","enable":"Enable SCAYT","ignore":"Ignore","ignoreAll":"Ignore All","ignoreDomainNames":"Ignore Domain Names","langs":"Languages","languagesTab":"Езици","mixedCase":"Ignore Words with Mixed Case","mixedWithDigits":"Игнорирани думи и цифри","moreSuggestions":"More suggestions","opera_title":"Not supported by Opera","options":"Options","optionsTab":"Options","title":"Spell Check As You Type","toggle":"Toggle SCAYT","noSuggestions":"No suggestion"},"stylescombo":{"label":"Стилове","panelTitle":"Стилове за форматиране","panelTitle1":"Блокови стилове","panelTitle2":"Вътрешни стилове","panelTitle3":"Обектни стилове"},"table":{"border":"Размер на рамката","caption":"Заглавие","cell":{"menu":"Клетка","insertBefore":"Вмъкване на клетка преди","insertAfter":"Вмъкване на клетка след","deleteCell":"Изтриване на клетки","merge":"Сливане на клетки","mergeRight":"Сливане в дясно","mergeDown":"Merge Down","splitHorizontal":"Split Cell Horizontally","splitVertical":"Split Cell Vertically","title":"Настройки на клетката","cellType":"Тип на клетката","rowSpan":"Rows Span","colSpan":"Columns Span","wordWrap":"Авто. пренос","hAlign":"Хоризонтално подравняване","vAlign":"Вертикално подравняване","alignBaseline":"Базова линия","bgColor":"Фон","borderColor":"Цвят на рамката","data":"Данни","header":"Хедър","yes":"Да","no":"Не","invalidWidth":"Cell width must be a number.","invalidHeight":"Cell height must be a number.","invalidRowSpan":"Rows span must be a whole number.","invalidColSpan":"Columns span must be a whole number.","chooseColor":"Изберете"},"cellPad":"Отделяне на клетките","cellSpace":"Разтояние между клетките","column":{"menu":"Колона","insertBefore":"Вмъкване на колона преди","insertAfter":"Вмъкване на колона след","deleteColumn":"Изтриване на колони"},"columns":"Колони","deleteTable":"Изтриване на таблица","headers":"Хедъри","headersBoth":"Заедно","headersColumn":"Първа колона","headersNone":"Няма","headersRow":"Първи ред","invalidBorder":"Размерът на рамката трябва да е число.","invalidCellPadding":"Отстоянието на клетките трябва да е позитивно число.","invalidCellSpacing":"Интервала в клетките трябва да е позитивно число.","invalidCols":"Броят колони трябва да е по-голям от 0.","invalidHeight":"Височината на таблицата трябва да е число.","invalidRows":"Броят редове трябва да е по-голям от 0.","invalidWidth":"Ширината на таблицата трябва да е число.","menu":"Настройки на таблицата","row":{"menu":"Ред","insertBefore":"Вмъкване на ред преди","insertAfter":"Вмъкване на ред след","deleteRow":"Изтриване на редове"},"rows":"Редове","summary":"Обща информация","title":"Настройки на таблицата","toolbar":"Таблица","widthPc":"процент","widthPx":"пиксела","widthUnit":"единица за ширина"},"undo":{"redo":"Връщане на предишен статус","undo":"Възтанови"},"wsc":{"btnIgnore":"Игнорирай","btnIgnoreAll":"Игнорирай всичко","btnReplace":"Препокриване","btnReplaceAll":"Препокрий всичко","btnUndo":"Възтанови","changeTo":"Промени на","errorLoading":"Error loading application service host: %s.","ieSpellDownload":"Spell checker not installed. Do you want to download it now?","manyChanges":"Spell check complete: %1 words changed","noChanges":"Spell check complete: No words changed","noMispell":"Spell check complete: No misspellings found","noSuggestions":"- Няма препоръчани -","notAvailable":"Съжаляваме, но услугата не е достъпна за момента","notInDic":"Не е в речника","oneChange":"Spell check complete: One word changed","progress":"Проверява се правописа...","title":"Проверка на правопис","toolbar":"Проверка на правопис"}}; | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ace/theme-textmate.js | ace.define("ace/theme/textmate",["require","exports","module","ace/lib/dom"], function(require, exports, module) {
"use strict";
exports.isDark = false;
exports.cssClass = "ace-tm";
exports.cssText = ".ace-tm .ace_gutter {\
background: #f0f0f0;\
color: #333;\
}\
.ace-tm .ace_print-margin {\
width: 1px;\
background: #e8e8e8;\
}\
.ace-tm .ace_fold {\
background-color: #6B72E6;\
}\
.ace-tm {\
background-color: #FFFFFF;\
color: black;\
}\
.ace-tm .ace_cursor {\
color: black;\
}\
.ace-tm .ace_invisible {\
color: rgb(191, 191, 191);\
}\
.ace-tm .ace_storage,\
.ace-tm .ace_keyword {\
color: blue;\
}\
.ace-tm .ace_constant {\
color: rgb(197, 6, 11);\
}\
.ace-tm .ace_constant.ace_buildin {\
color: rgb(88, 72, 246);\
}\
.ace-tm .ace_constant.ace_language {\
color: rgb(88, 92, 246);\
}\
.ace-tm .ace_constant.ace_library {\
color: rgb(6, 150, 14);\
}\
.ace-tm .ace_invalid {\
background-color: rgba(255, 0, 0, 0.1);\
color: red;\
}\
.ace-tm .ace_support.ace_function {\
color: rgb(60, 76, 114);\
}\
.ace-tm .ace_support.ace_constant {\
color: rgb(6, 150, 14);\
}\
.ace-tm .ace_support.ace_type,\
.ace-tm .ace_support.ace_class {\
color: rgb(109, 121, 222);\
}\
.ace-tm .ace_keyword.ace_operator {\
color: rgb(104, 118, 135);\
}\
.ace-tm .ace_string {\
color: rgb(3, 106, 7);\
}\
.ace-tm .ace_comment {\
color: rgb(76, 136, 107);\
}\
.ace-tm .ace_comment.ace_doc {\
color: rgb(0, 102, 255);\
}\
.ace-tm .ace_comment.ace_doc.ace_tag {\
color: rgb(128, 159, 191);\
}\
.ace-tm .ace_constant.ace_numeric {\
color: rgb(0, 0, 205);\
}\
.ace-tm .ace_variable {\
color: rgb(49, 132, 149);\
}\
.ace-tm .ace_xml-pe {\
color: rgb(104, 104, 91);\
}\
.ace-tm .ace_entity.ace_name.ace_function {\
color: #0000A2;\
}\
.ace-tm .ace_heading {\
color: rgb(12, 7, 255);\
}\
.ace-tm .ace_list {\
color:rgb(185, 6, 144);\
}\
.ace-tm .ace_meta.ace_tag {\
color:rgb(0, 22, 142);\
}\
.ace-tm .ace_string.ace_regex {\
color: rgb(255, 0, 0)\
}\
.ace-tm .ace_marker-layer .ace_selection {\
background: rgb(181, 213, 255);\
}\
.ace-tm.ace_multiselect .ace_selection.ace_start {\
box-shadow: 0 0 3px 0px white;\
border-radius: 2px;\
}\
.ace-tm .ace_marker-layer .ace_step {\
background: rgb(252, 255, 0);\
}\
.ace-tm .ace_marker-layer .ace_stack {\
background: rgb(164, 229, 101);\
}\
.ace-tm .ace_marker-layer .ace_bracket {\
margin: -1px 0 0 -1px;\
border: 1px solid rgb(192, 192, 192);\
}\
.ace-tm .ace_marker-layer .ace_active-line {\
background: rgba(0, 0, 0, 0.07);\
}\
.ace-tm .ace_gutter-active-line {\
background-color : #dcdcdc;\
}\
.ace-tm .ace_marker-layer .ace_selected-word {\
background: rgb(250, 250, 255);\
border: 1px solid rgb(200, 200, 250);\
}\
.ace-tm .ace_indent-guide {\
background: url(\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAACCAYAAACZgbYnAAAAE0lEQVQImWP4////f4bLly//BwAmVgd1/w11/gAAAABJRU5ErkJggg==\") right repeat-y;\
}\
";
var dom = require("../lib/dom");
dom.importCssString(exports.cssText, exports.cssClass);
}); | PypiClean |
/LZBEAT-0.13.1.tar.gz/LZBEAT-0.13.1/econml/_ortho_learner.py | import copy
from collections import namedtuple
from warnings import warn
from abc import abstractmethod
import inspect
from collections import defaultdict
import re
import numpy as np
from sklearn.base import clone
from sklearn.model_selection import KFold, StratifiedKFold, check_cv
from sklearn.preprocessing import (FunctionTransformer, LabelEncoder,
OneHotEncoder)
from sklearn.utils import check_random_state
from ._cate_estimator import (BaseCateEstimator, LinearCateEstimator,
TreatmentExpansionMixin)
from .inference import BootstrapInference
from .utilities import (_deprecate_positional, check_input_arrays,
cross_product, filter_none_kwargs,
inverse_onehot, ndim, reshape, shape, transpose)
def _crossfit(model, folds, *args, **kwargs):
"""
General crossfit based calculation of nuisance parameters.
Parameters
----------
model : object
An object that supports fit and predict. Fit must accept all the args
and the keyword arguments kwargs. Similarly predict must all accept
all the args as arguments and kwards as keyword arguments. The fit
function estimates a model of the nuisance function, based on the input
data to fit. Predict evaluates the fitted nuisance function on the input
data to predict.
folds : list of tuples or None
The crossfitting fold structure. Every entry in the list is a tuple whose
first element are the training indices of the args and kwargs data and
the second entry are the test indices. If the union of the test indices
is not the full set of all indices, then the remaining nuisance parameters
for the missing indices have value NaN. If folds is None, then cross fitting
is not performed; all indices are used for both model fitting and prediction
args : a sequence of (numpy matrices or None)
Each matrix is a data variable whose first index corresponds to a sample
kwargs : a sequence of key-value args, with values being (numpy matrices or None)
Each keyword argument is of the form Var=x, with x a numpy array. Each
of these arrays are data variables. The model fit and predict will be
called with signature: `model.fit(*args, **kwargs)` and
`model.predict(*args, **kwargs)`. Key-value arguments that have value
None, are ommitted from the two calls. So all the args and the non None
kwargs variables must be part of the models signature.
Returns
-------
nuisances : tuple of numpy matrices
Each entry in the tuple is a nuisance parameter matrix. Each row i-th in the
matrix corresponds to the value of the nuisance parameter for the i-th input
sample.
model_list : list of objects of same type as input model
The cloned and fitted models for each fold. Can be used for inspection of the
variability of the fitted models across folds.
fitted_inds : np array1d
The indices of the arrays for which the nuisance value was calculated. This
corresponds to the union of the indices of the test part of each fold in
the input fold list.
scores : tuple of list of float or None
The out-of-sample model scores for each nuisance model
Examples
--------
.. testcode::
import numpy as np
from sklearn.model_selection import KFold
from sklearn.linear_model import Lasso
from econml._ortho_learner import _crossfit
class Wrapper:
def __init__(self, model):
self._model = model
def fit(self, X, y, W=None):
self._model.fit(X, y)
return self
def predict(self, X, y, W=None):
return self._model.predict(X)
np.random.seed(123)
X = np.random.normal(size=(5000, 3))
y = X[:, 0] + np.random.normal(size=(5000,))
folds = list(KFold(2).split(X, y))
model = Lasso(alpha=0.01)
nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, X, y, W=y, Z=None)
>>> nuisance
(array([-1.105728... , -1.537566..., -2.451827... , ..., 1.106287...,
-1.829662..., -1.782273...]),)
>>> model_list
[<Wrapper object at 0x...>, <Wrapper object at 0x...>]
>>> fitted_inds
array([ 0, 1, 2, ..., 4997, 4998, 4999])
"""
model_list = []
fitted_inds = []
calculate_scores = hasattr(model, 'score')
# remove None arguments
kwargs = filter_none_kwargs(**kwargs)
if folds is None: # skip crossfitting
model_list.append(clone(model, safe=False))
model_list[0].fit(*args, **kwargs)
nuisances = model_list[0].predict(*args, **kwargs)
scores = model_list[0].score(*args, **kwargs) if calculate_scores else None
if not isinstance(nuisances, tuple):
nuisances = (nuisances,)
if not isinstance(scores, tuple):
scores = (scores,)
# scores entries should be lists of scores, so make each entry a singleton list
scores = tuple([s] for s in scores)
first_arr = args[0] if args else kwargs.items()[0][1]
return nuisances, model_list, np.arange(first_arr.shape[0]), scores
for idx, (train_idxs, test_idxs) in enumerate(folds):
model_list.append(clone(model, safe=False))
if len(np.intersect1d(train_idxs, test_idxs)) > 0:
raise AttributeError("Invalid crossfitting fold structure." +
"Train and test indices of each fold must be disjoint.")
if len(np.intersect1d(fitted_inds, test_idxs)) > 0:
raise AttributeError("Invalid crossfitting fold structure. The same index appears in two test folds.")
fitted_inds = np.concatenate((fitted_inds, test_idxs))
args_train = tuple(var[train_idxs] if var is not None else None for var in args)
args_test = tuple(var[test_idxs] if var is not None else None for var in args)
kwargs_train = {key: var[train_idxs] for key, var in kwargs.items()}
kwargs_test = {key: var[test_idxs] for key, var in kwargs.items()}
model_list[idx].fit(*args_train, **kwargs_train)
nuisance_temp = model_list[idx].predict(*args_test, **kwargs_test)
if not isinstance(nuisance_temp, tuple):
nuisance_temp = (nuisance_temp,)
if idx == 0:
nuisances = tuple([np.full((args[0].shape[0],) + nuis.shape[1:], np.nan) for nuis in nuisance_temp])
for it, nuis in enumerate(nuisance_temp):
nuisances[it][test_idxs] = nuis
if calculate_scores:
score_temp = model_list[idx].score(*args_test, **kwargs_test)
if not isinstance(score_temp, tuple):
score_temp = (score_temp,)
if idx == 0:
scores = tuple([] for _ in score_temp)
for it, score in enumerate(score_temp):
scores[it].append(score)
return nuisances, model_list, np.sort(fitted_inds.astype(int)), (scores if calculate_scores else None)
CachedValues = namedtuple('CachedValues', ['nuisances',
'Y', 'T', 'X', 'W', 'Z', 'sample_weight', 'freq_weight',
'sample_var', 'groups'])
class _OrthoLearner(TreatmentExpansionMixin, LinearCateEstimator):
"""
Base class for all orthogonal learners. This class is a parent class to any method that has
the following architecture:
1. The CATE :math:`\\theta(X)` is the minimizer of some expected loss function
.. math ::
\\mathbb{E}[\\ell(V; \\theta(X), h(V))]
where :math:`V` are all the random variables and h is a vector of nuisance functions. Alternatively,
the class would also work if :math:`\\theta(X)` is the solution to a set of moment equations that
also depend on nuisance functions :math:`h`.
2. To estimate :math:`\\theta(X)` we first fit the h functions and calculate :math:`h(V_i)` for each sample
:math:`i` in a crossfit manner:
- Let (F1_train, F1_test), ..., (Fk_train, Fk_test) be any KFold partition
of the data, where Ft_train, Ft_test are subsets of indices of the input samples and such that
F1_train is disjoint from F1_test. The sets F1_test, ..., Fk_test form an incomplete partition
of all the input indices, i.e. they are be disjoint and their union could potentially be a subset of
all input indices. For instance, in a time series split F0_train could be a prefix of the data and
F0_test the suffix. Typically, these folds will be created
by a KFold split, i.e. if S1, ..., Sk is any partition of the data, then Ft_train is the set of
all indices except St and Ft_test = St. If the union of the Ft_test is not all the data, then only the
subset of the data in the union of the Ft_test sets will be used in the final stage.
- Then for each t in [1, ..., k]
- Estimate a model :math:`\\hat{h}_t` for :math:`h` using Ft_train
- Evaluate the learned :math:`\\hat{h}_t` model on the data in Ft_test and use that value
as the nuisance value/vector :math:`\\hat{U}_i=\\hat{h}(V_i)` for the indices i in Ft_test
3. Estimate the model for :math:`\\theta(X)` by minimizing the empirical (regularized) plugin loss on
the subset of indices for which we have a nuisance value, i.e. the union of {F1_test, ..., Fk_test}:
.. math ::
\\mathbb{E}_n[\\ell(V; \\theta(X), \\hat{h}(V))]\
= \\frac{1}{n} \\sum_{i=1}^n \\ell(V_i; \\theta(X_i), \\hat{U}_i)
The method is a bit more general in that the final step does not need to be a loss minimization step.
The class takes as input a model for fitting an estimate of the nuisance h given a set of samples
and predicting the value of the learned nuisance model on any other set of samples. It also
takes as input a model for the final estimation, that takes as input the data and their associated
estimated nuisance values from the first stage and fits a model for the CATE :math:`\\theta(X)`. Then
at predict time, the final model given any set of samples of the X variable, returns the estimated
:math:`\\theta(X)`.
The method essentially implements all the crossfit and plugin logic, so that any child classes need
to only implement the appropriate `model_nuisance` and `model_final` and essentially nothing more.
It also implements the basic preprocessing logic behind the expansion of discrete treatments into
one-hot encodings.
Parameters
----------
discrete_treatment: bool
Whether the treatment values should be treated as categorical, rather than continuous, quantities
discrete_instrument: bool
Whether the instrument values should be treated as categorical, rather than continuous, quantities
categories: 'auto' or list
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
cv: int, cross-validation generator or an iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the treatment is discrete
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used
(with a random shuffle in either case).
Unless an iterable is used, we call `split(concat[Z, W, X], T)` to generate the splits. If all
Z, W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
mc_iters: int, optional (default=None)
The number of times to rerun the first stage models to reduce the variance of the nuisances.
mc_agg: {'mean', 'median'}, optional (default='mean')
How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of
cross-fitting.
Examples
--------
The example code below implements a very simple version of the double machine learning
method on top of the :class:`._OrthoLearner` class, for expository purposes.
For a more elaborate implementation of a Double Machine Learning child class of the class
:class:`._OrthoLearner` check out :class:`.DML`
and its child classes:
.. testcode::
import numpy as np
from sklearn.linear_model import LinearRegression
from econml._ortho_learner import _OrthoLearner
class ModelNuisance:
def __init__(self, model_t, model_y):
self._model_t = model_t
self._model_y = model_y
def fit(self, Y, T, W=None):
self._model_t.fit(W, T)
self._model_y.fit(W, Y)
return self
def predict(self, Y, T, W=None):
return Y - self._model_y.predict(W), T - self._model_t.predict(W)
class ModelFinal:
def __init__(self):
return
def fit(self, Y, T, W=None, nuisances=None):
Y_res, T_res = nuisances
self.model = LinearRegression(fit_intercept=False).fit(T_res.reshape(-1, 1), Y_res)
return self
def predict(self, X=None):
return self.model.coef_[0]
def score(self, Y, T, W=None, nuisances=None):
Y_res, T_res = nuisances
return np.mean((Y_res - self.model.predict(T_res.reshape(-1, 1)))**2)
class OrthoLearner(_OrthoLearner):
def _gen_ortho_learner_model_nuisance(self):
return ModelNuisance(LinearRegression(), LinearRegression())
def _gen_ortho_learner_model_final(self):
return ModelFinal()
np.random.seed(123)
X = np.random.normal(size=(100, 3))
y = X[:, 0] + X[:, 1] + np.random.normal(0, 0.1, size=(100,))
est = OrthoLearner(cv=2, discrete_treatment=False, discrete_instrument=False,
categories='auto', random_state=None)
est.fit(y, X[:, 0], W=X[:, 1:])
>>> est.score_
0.00756830...
>>> est.const_marginal_effect()
1.02364992...
>>> est.effect()
array([1.023649...])
>>> est.effect(T0=0, T1=10)
array([10.236499...])
>>> est.score(y, X[:, 0], W=X[:, 1:])
0.00727995...
>>> est.ortho_learner_model_final_.model
LinearRegression(fit_intercept=False)
>>> est.ortho_learner_model_final_.model.coef_
array([1.023649...])
The following example shows how to do double machine learning with discrete treatments, using
the _OrthoLearner:
.. testcode::
class ModelNuisance:
def __init__(self, model_t, model_y):
self._model_t = model_t
self._model_y = model_y
def fit(self, Y, T, W=None):
self._model_t.fit(W, np.matmul(T, np.arange(1, T.shape[1]+1)))
self._model_y.fit(W, Y)
return self
def predict(self, Y, T, W=None):
return Y - self._model_y.predict(W), T - self._model_t.predict_proba(W)[:, 1:]
class ModelFinal:
def __init__(self):
return
def fit(self, Y, T, W=None, nuisances=None):
Y_res, T_res = nuisances
self.model = LinearRegression(fit_intercept=False).fit(T_res.reshape(-1, 1), Y_res)
return self
def predict(self):
# theta needs to be of dimension (1, d_t) if T is (n, d_t)
return np.array([[self.model.coef_[0]]])
def score(self, Y, T, W=None, nuisances=None):
Y_res, T_res = nuisances
return np.mean((Y_res - self.model.predict(T_res.reshape(-1, 1)))**2)
from sklearn.linear_model import LogisticRegression
class OrthoLearner(_OrthoLearner):
def _gen_ortho_learner_model_nuisance(self):
return ModelNuisance(LogisticRegression(solver='lbfgs'), LinearRegression())
def _gen_ortho_learner_model_final(self):
return ModelFinal()
np.random.seed(123)
W = np.random.normal(size=(100, 3))
import scipy.special
T = np.random.binomial(1, scipy.special.expit(W[:, 0]))
y = T + W[:, 0] + np.random.normal(0, 0.01, size=(100,))
est = OrthoLearner(cv=2, discrete_treatment=True, discrete_instrument=False,
categories='auto', random_state=None)
est.fit(y, T, W=W)
>>> est.score_
0.00673015...
>>> est.const_marginal_effect()
array([[1.008401...]])
>>> est.effect()
array([1.008401...])
>>> est.score(y, T, W=W)
0.00310431...
>>> est.ortho_learner_model_final_.model.coef_[0]
1.00840170...
Attributes
----------
models_nuisance_: nested list of objects of type(model_nuisance)
A nested list of instances of the model_nuisance object. The number of sublist equals to the
number of monte carlo iterations. Each element in the sublist corresponds to a crossfitting
fold and is the model instance that was fitted for that training fold.
ortho_learner_model_final_: object of type(model_final)
An instance of the model_final object that was fitted after calling fit.
score_ : float or array of floats
If the model_final has a score method, then `score_` contains the outcome of the final model
score when evaluated on the fitted nuisances from the first stage. Represents goodness of fit,
of the final CATE model.
nuisance_scores_ : tuple of nested lists of floats or None
The out-of-sample scores from training each nuisance model
"""
def __init__(self, *,
discrete_treatment, discrete_instrument, categories, cv, random_state,
mc_iters=None, mc_agg='mean'):
self.cv = cv
self.discrete_treatment = discrete_treatment
self.discrete_instrument = discrete_instrument
self.random_state = random_state
self.categories = categories
self.mc_iters = mc_iters
self.mc_agg = mc_agg
super().__init__()
@abstractmethod
def _gen_ortho_learner_model_nuisance(self):
""" Must return a fresh instance of a nuisance model
Returns
-------
model_nuisance: estimator
The estimator for fitting the nuisance function. Must implement
`fit` and `predict` methods that both have signatures::
model_nuisance.fit(Y, T, X=X, W=W, Z=Z,
sample_weight=sample_weight)
model_nuisance.predict(Y, T, X=X, W=W, Z=Z,
sample_weight=sample_weight)
In fact we allow for the model method signatures to skip any of the keyword arguments
as long as the class is always called with the omitted keyword argument set to ``None``.
This can be enforced in child classes by re-implementing the fit and the various effect
methods. If ``discrete_treatment=True``, then the input ``T`` to both above calls will be the
one-hot encoding of the original input ``T``, excluding the first column of the one-hot.
If the estimator also provides a score method with the same arguments as fit, it will be used to
calculate scores during training.
"""
pass
@abstractmethod
def _gen_ortho_learner_model_final(self):
""" Must return a fresh instance of a final model
Returns
-------
model_final: estimator for fitting the response residuals to the features and treatment residuals
Must implement `fit` and `predict` methods that must have signatures::
model_final.fit(Y, T, X=X, W=W, Z=Z, nuisances=nuisances,
sample_weight=sample_weight, freq_weight=freq_weight, sample_var=sample_var)
model_final.predict(X=X)
Predict, should just take the features X and return the constant marginal effect. In fact we allow
for the model method signatures to skip any of the keyword arguments as long as the class is always
called with the omitted keyword argument set to ``None``. Moreover, the predict function of the final
model can take no argument if the class is always called with ``X=None``. This can be enforced in child
classes by re-implementing the fit and the various effect methods. If ``discrete_treatment=True``,
then the input ``T`` to both above calls will be the one-hot encoding of the original input ``T``,
excluding the first column of the one-hot.
"""
pass
def _check_input_dims(self, Y, T, X=None, W=None, Z=None, *other_arrays):
assert shape(Y)[0] == shape(T)[0], "Dimension mis-match!"
for arr in [X, W, Z, *other_arrays]:
assert (arr is None) or (arr.shape[0] == Y.shape[0]), "Dimension mismatch"
self._d_x = X.shape[1:] if X is not None else None
self._d_w = W.shape[1:] if W is not None else None
self._d_z = Z.shape[1:] if Z is not None else None
def _check_fitted_dims(self, X):
if X is None:
assert self._d_x is None, "X was not None when fitting, so can't be none for score or effect"
else:
assert self._d_x == X.shape[1:], "Dimension mis-match of X with fitted X"
def _check_fitted_dims_w_z(self, W, Z):
if W is None:
assert self._d_w is None, "W was not None when fitting, so can't be none for score"
else:
assert self._d_w == W.shape[1:], "Dimension mis-match of W with fitted W"
if Z is None:
assert self._d_z is None, "Z was not None when fitting, so can't be none for score"
else:
assert self._d_z == Z.shape[1:], "Dimension mis-match of Z with fitted Z"
def _subinds_check_none(self, var, inds):
return var[inds] if var is not None else None
def _strata(self, Y, T, X=None, W=None, Z=None,
sample_weight=None, freq_weight=None, sample_var=None, groups=None,
cache_values=False, only_final=False, check_input=True):
if self.discrete_instrument:
Z = LabelEncoder().fit_transform(np.ravel(Z))
if self.discrete_treatment:
enc = LabelEncoder()
T = enc.fit_transform(np.ravel(T))
if self.discrete_instrument:
return T + Z * len(enc.classes_)
else:
return T
elif self.discrete_instrument:
return Z
else:
return None
def _prefit(self, Y, T, *args, only_final=False, **kwargs):
# generate an instance of the final model
self._ortho_learner_model_final = self._gen_ortho_learner_model_final()
if not only_final:
# generate an instance of the nuisance model
self._ortho_learner_model_nuisance = self._gen_ortho_learner_model_nuisance()
super()._prefit(Y, T, *args, **kwargs)
@BaseCateEstimator._wrap_fit
def fit(self, Y, T, *, X=None, W=None, Z=None, sample_weight=None, freq_weight=None, sample_var=None, groups=None,
cache_values=False, inference=None, only_final=False, check_input=True):
"""
Estimate the counterfactual model from data, i.e. estimates function :math:`\\theta(\\cdot)`.
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
X: optional (n, d_x) matrix or None (Default=None)
Features for each sample
W: optional (n, d_w) matrix or None (Default=None)
Controls for each sample
Z: optional (n, d_z) matrix or None (Default=None)
Instruments for each sample
sample_weight : (n,) array like, default None
Individual weights for each sample. If None, it assumes equal weight.
freq_weight: (n, ) array like of integers, default None
Weight for the observation. Observation i is treated as the mean
outcome of freq_weight[i] independent observations.
When ``sample_var`` is not None, this should be provided.
sample_var : {(n,), (n, d_y)} nd array like, default None
Variance of the outcome(s) of the original freq_weight[i] observations that were used to
compute the mean outcome represented by observation i.
groups: (n,) vector, optional
All rows corresponding to the same group will be kept together during splitting.
If groups is not None, the cv argument passed to this class's initializer
must support a 'groups' argument to its split method.
cache_values: bool, default False
Whether to cache the inputs and computed nuisances, which will allow refitting a different final model
inference: string, :class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of :class:`.BootstrapInference`).
only_final: bool, defaul False
Whether to fit the nuisance models or use the existing cached values
Note. This parameter is only used internally by the `refit` method and should not be exposed
publicly by overwrites of the `fit` method in public classes.
check_input: bool, default True
Whether to check if the input is valid
Note. This parameter is only used internally by the `refit` method and should not be exposed
publicly by overwrites of the `fit` method in public classes.
Returns
-------
self : object
"""
self._random_state = check_random_state(self.random_state)
assert (freq_weight is None) == (
sample_var is None), "Sample variances and frequency weights must be provided together!"
if check_input:
Y, T, X, W, Z, sample_weight, freq_weight, sample_var, groups = check_input_arrays(
Y, T, X, W, Z, sample_weight, freq_weight, sample_var, groups)
self._check_input_dims(Y, T, X, W, Z, sample_weight, freq_weight, sample_var, groups)
if not only_final:
if self.discrete_treatment:
categories = self.categories
if categories != 'auto':
categories = [categories] # OneHotEncoder expects a 2D array with features per column
self.transformer = OneHotEncoder(categories=categories, sparse=False, drop='first')
self.transformer.fit(reshape(T, (-1, 1)))
self._d_t = (len(self.transformer.categories_[0]) - 1,)
else:
self.transformer = None
if self.discrete_instrument:
self.z_transformer = OneHotEncoder(categories='auto', sparse=False, drop='first')
self.z_transformer.fit(reshape(Z, (-1, 1)))
else:
self.z_transformer = None
all_nuisances = []
fitted_inds = None
if sample_weight is None:
if freq_weight is not None:
sample_weight_nuisances = freq_weight
else:
sample_weight_nuisances = None
else:
if freq_weight is not None:
sample_weight_nuisances = freq_weight * sample_weight
else:
sample_weight_nuisances = sample_weight
self._models_nuisance = []
for idx in range(self.mc_iters or 1):
nuisances, fitted_models, new_inds, scores = self._fit_nuisances(
Y, T, X, W, Z, sample_weight=sample_weight_nuisances, groups=groups)
all_nuisances.append(nuisances)
self._models_nuisance.append(fitted_models)
if scores is None:
self.nuisance_scores_ = None
else:
if idx == 0:
self.nuisance_scores_ = tuple([] for _ in scores)
for ind, score in enumerate(scores):
self.nuisance_scores_[ind].append(score)
if fitted_inds is None:
fitted_inds = new_inds
elif not np.array_equal(fitted_inds, new_inds):
raise AttributeError("Different indices were fit by different folds, so they cannot be aggregated")
if self.mc_iters is not None:
if self.mc_agg == 'mean':
nuisances = tuple(np.mean(nuisance_mc_variants, axis=0)
for nuisance_mc_variants in zip(*all_nuisances))
elif self.mc_agg == 'median':
nuisances = tuple(np.median(nuisance_mc_variants, axis=0)
for nuisance_mc_variants in zip(*all_nuisances))
else:
raise ValueError(
"Parameter `mc_agg` must be one of {'mean', 'median'}. Got {}".format(self.mc_agg))
Y, T, X, W, Z, sample_weight, freq_weight, sample_var = (self._subinds_check_none(arr, fitted_inds)
for arr in (Y, T, X, W, Z, sample_weight,
freq_weight, sample_var))
nuisances = tuple([self._subinds_check_none(nuis, fitted_inds) for nuis in nuisances])
self._cached_values = CachedValues(nuisances=nuisances,
Y=Y, T=T, X=X, W=W, Z=Z,
sample_weight=sample_weight,
freq_weight=freq_weight,
sample_var=sample_var,
groups=groups) if cache_values else None
else:
nuisances = self._cached_values.nuisances
# _d_t is altered by fit nuisances to what prefit does. So we need to perform the same
# alteration even when we only want to fit_final.
if self.transformer is not None:
self._d_t = (len(self.transformer.categories_[0]) - 1,)
self._fit_final(Y=Y,
T=self.transformer.transform(T.reshape((-1, 1))) if self.transformer is not None else T,
X=X, W=W, Z=Z,
nuisances=nuisances,
sample_weight=sample_weight,
freq_weight=freq_weight,
sample_var=sample_var,
groups=groups)
return self
@property
def _illegal_refit_inference_methods(self):
return (BootstrapInference,)
def refit_final(self, inference=None):
"""
Estimate the counterfactual model using a new final model specification but with cached first stage results.
In order for this to succeed, ``fit`` must have been called with ``cache_values=True``. This call
will only refit the final model. This call we use the current setting of any parameters that change the
final stage estimation. If any parameters that change how the first stage nuisance estimates
has also been changed then it will have no effect. You need to call fit again to change the
first stage estimation results.
Parameters
----------
inference : inference method, optional
The string or object that represents the inference method
Returns
-------
self : object
This instance
"""
assert self._cached_values, "Refit can only be called if values were cached during the original fit"
if isinstance(self._get_inference(inference), self._illegal_refit_inference_methods):
raise ValueError("The chosen inference method does not allow only for model final re-fitting.")
cached = self._cached_values
kwargs = filter_none_kwargs(
Y=cached.Y, T=cached.T, X=cached.X, W=cached.W, Z=cached.Z,
sample_weight=cached.sample_weight, freq_weight=cached.freq_weight, sample_var=cached.sample_var,
groups=cached.groups,
)
_OrthoLearner.fit(self, **kwargs,
cache_values=True, inference=inference, only_final=True, check_input=False)
return self
def _fit_nuisances(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):
# use a binary array to get stratified split in case of discrete treatment
stratify = self.discrete_treatment or self.discrete_instrument
strata = self._strata(Y, T, X=X, W=W, Z=Z, sample_weight=sample_weight, groups=groups)
if strata is None:
strata = T # always safe to pass T as second arg to split even if we're not actually stratifying
if self.discrete_treatment:
T = self.transformer.transform(reshape(T, (-1, 1)))
if self.discrete_instrument:
Z = self.z_transformer.transform(reshape(Z, (-1, 1)))
if self.cv == 1: # special case, no cross validation
folds = None
else:
splitter = check_cv(self.cv, [0], classifier=stratify)
# if check_cv produced a new KFold or StratifiedKFold object, we need to set shuffle and random_state
# TODO: ideally, we'd also infer whether we need a GroupKFold (if groups are passed)
# however, sklearn doesn't support both stratifying and grouping (see
# https://github.com/scikit-learn/scikit-learn/issues/13621), so for now the user needs to supply
# their own object that supports grouping if they want to use groups.
if splitter != self.cv and isinstance(splitter, (KFold, StratifiedKFold)):
splitter.shuffle = True
splitter.random_state = self._random_state
all_vars = [var if np.ndim(var) == 2 else var.reshape(-1, 1) for var in [Z, W, X] if var is not None]
to_split = np.hstack(all_vars) if all_vars else np.ones((T.shape[0], 1))
if groups is not None:
if isinstance(splitter, (KFold, StratifiedKFold)):
raise TypeError("Groups were passed to fit while using a KFold or StratifiedKFold splitter. "
"Instead you must initialize this object with a splitter that can handle groups.")
folds = splitter.split(to_split, strata, groups=groups)
else:
folds = splitter.split(to_split, strata)
nuisances, fitted_models, fitted_inds, scores = _crossfit(self._ortho_learner_model_nuisance, folds,
Y, T, X=X, W=W, Z=Z,
sample_weight=sample_weight, groups=groups)
return nuisances, fitted_models, fitted_inds, scores
def _fit_final(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None,
freq_weight=None, sample_var=None, groups=None):
self._ortho_learner_model_final.fit(Y, T, **filter_none_kwargs(X=X, W=W, Z=Z,
nuisances=nuisances,
sample_weight=sample_weight,
freq_weight=freq_weight,
sample_var=sample_var,
groups=groups))
self.score_ = None
if hasattr(self._ortho_learner_model_final, 'score'):
self.score_ = self._ortho_learner_model_final.score(Y, T, **filter_none_kwargs(X=X, W=W, Z=Z,
nuisances=nuisances,
sample_weight=sample_weight,
groups=groups))
def const_marginal_effect(self, X=None):
X, = check_input_arrays(X)
self._check_fitted_dims(X)
if X is None:
return self._ortho_learner_model_final.predict()
else:
return self._ortho_learner_model_final.predict(X)
const_marginal_effect.__doc__ = LinearCateEstimator.const_marginal_effect.__doc__
def const_marginal_effect_interval(self, X=None, *, alpha=0.05):
X, = check_input_arrays(X)
self._check_fitted_dims(X)
return super().const_marginal_effect_interval(X, alpha=alpha)
const_marginal_effect_interval.__doc__ = LinearCateEstimator.const_marginal_effect_interval.__doc__
def const_marginal_effect_inference(self, X=None):
X, = check_input_arrays(X)
self._check_fitted_dims(X)
return super().const_marginal_effect_inference(X)
const_marginal_effect_inference.__doc__ = LinearCateEstimator.const_marginal_effect_inference.__doc__
def effect_interval(self, X=None, *, T0=0, T1=1, alpha=0.05):
X, T0, T1 = check_input_arrays(X, T0, T1)
self._check_fitted_dims(X)
return super().effect_interval(X, T0=T0, T1=T1, alpha=alpha)
effect_interval.__doc__ = LinearCateEstimator.effect_interval.__doc__
def effect_inference(self, X=None, *, T0=0, T1=1):
X, T0, T1 = check_input_arrays(X, T0, T1)
self._check_fitted_dims(X)
return super().effect_inference(X, T0=T0, T1=T1)
effect_inference.__doc__ = LinearCateEstimator.effect_inference.__doc__
def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):
"""
Score the fitted CATE model on a new data set. Generates nuisance parameters
for the new data set based on the fitted nuisance models created at fit time.
It uses the mean prediction of the models fitted by the different crossfit folds
under different iterations. Then calls the score function of the model_final and
returns the calculated score. The model_final model must have a score method.
If model_final does not have a score method, then it raises an :exc:`.AttributeError`
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
X: optional (n, d_x) matrix or None (Default=None)
Features for each sample
W: optional (n, d_w) matrix or None (Default=None)
Controls for each sample
Z: optional (n, d_z) matrix or None (Default=None)
Instruments for each sample
sample_weight: optional(n,) vector or None (Default=None)
Weights for each samples
groups: (n,) vector, optional
All rows corresponding to the same group will be kept together during splitting.
Returns
-------
score : float or (array of float)
The score of the final CATE model on the new data. Same type as the return
type of the model_final.score method.
"""
if not hasattr(self._ortho_learner_model_final, 'score'):
raise AttributeError("Final model does not have a score method!")
Y, T, X, W, Z = check_input_arrays(Y, T, X, W, Z)
self._check_fitted_dims(X)
self._check_fitted_dims_w_z(W, Z)
X, T = self._expand_treatments(X, T)
if self.z_transformer is not None:
Z = self.z_transformer.transform(reshape(Z, (-1, 1)))
n_iters = len(self._models_nuisance)
n_splits = len(self._models_nuisance[0])
# for each mc iteration
for i, models_nuisances in enumerate(self._models_nuisance):
# for each model under cross fit setting
for j, mdl in enumerate(models_nuisances):
nuisance_temp = mdl.predict(Y, T, **filter_none_kwargs(X=X, W=W, Z=Z, groups=groups))
if not isinstance(nuisance_temp, tuple):
nuisance_temp = (nuisance_temp,)
if i == 0 and j == 0:
nuisances = [np.zeros((n_iters * n_splits,) + nuis.shape) for nuis in nuisance_temp]
for it, nuis in enumerate(nuisance_temp):
nuisances[it][i * n_iters + j] = nuis
for it in range(len(nuisances)):
nuisances[it] = np.mean(nuisances[it], axis=0)
return self._ortho_learner_model_final.score(Y, T, nuisances=nuisances,
**filter_none_kwargs(X=X, W=W, Z=Z,
sample_weight=sample_weight, groups=groups))
@property
def ortho_learner_model_final_(self):
if not hasattr(self, '_ortho_learner_model_final'):
raise AttributeError("Model is not fitted!")
return self._ortho_learner_model_final
@property
def models_nuisance_(self):
if not hasattr(self, '_models_nuisance'):
raise AttributeError("Model is not fitted!")
return self._models_nuisance | PypiClean |
/OASYS1-ShadowOui-1.5.210.tar.gz/OASYS1-ShadowOui-1.5.210/orangecontrib/shadow/util/shadow_util.py | __author__ = 'labx'
import os
import random
import sys
import copy
import numpy
import xraylib
import h5py
try:
from PyQt5.QtCore import QSettings
from PyQt5.QtWidgets import QWidget, QGridLayout, QLabel
from PyQt5.QtGui import QFont, QPalette, QColor
except:
pass
from matplotlib.patches import FancyArrowPatch, ArrowStyle
from scipy import optimize
from numpy import asarray
try:
from oasys.widgets import gui
except:
pass
from oasys.widgets import congruence
from oasys.util.oasys_util import get_sigma, get_fwhm, get_average
try:
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import figure as matfig
import pylab
except ImportError:
print(sys.exc_info()[1])
pass
import Shadow.ShadowToolsPrivate as stp
import scipy.constants as codata
class ShadowCongruence():
@classmethod
def checkEmptyBeam(cls, input_beam):
if input_beam is None: return False
elif not hasattr(input_beam._beam, "rays"): return False
elif len(input_beam._beam.rays) == 0: return False
else: return True
@classmethod
def checkGoodBeam(cls, input_beam):
return len(input_beam._beam.rays[numpy.where(input_beam._beam.rays[:, 9] == 1)]) > 0
@classmethod
def checkBraggFile(cls, file_name):
file = open(file_name, "r")
try:
rows = file.readlines()
if len(rows) < 10: raise Exception("Bragg file malformed, please check input")
if "# Bragg version," in rows[0]:
pass # version 2
else:
first_row = ShadowCongruence.__get_numbers(rows[0].strip())
if not len(first_row) == 3: raise Exception("Bragg file malformed, please check input")
second_row = ShadowCongruence.__get_numbers(rows[1].strip())
if not len(second_row) == 3: raise Exception("Bragg file malformed, please check input")
if not (rows[2].strip().startswith("(") and \
rows[3].strip().startswith("(") and \
rows[4].strip().startswith("(") and \
rows[5].strip().startswith("(")):
raise Exception("Bragg file malformed, please check input")
seventh_row = ShadowCongruence.__get_numbers(rows[6].strip())
if not len(seventh_row) == 3: raise Exception("Bragg file malformed, please check input")
eighth_row = ShadowCongruence.__get_numbers(rows[7].strip())
if not len(eighth_row) == 3: raise Exception("Bragg file malformed, please check input")
nineth_row = ShadowCongruence.__get_numbers(rows[8].strip())
if not len(nineth_row) == 1: raise Exception("Bragg file malformed, please check input")
except Exception as e:
file.close()
raise e
@classmethod
def checkPreReflFile(cls, file_name):
file = open(file_name, "r")
try:
rows = file.readlines()
if len(rows) < 3: raise Exception("PreRefl file malformed, please check input")
first_row = ShadowCongruence.__get_numbers(rows[0].strip())
if not len(first_row) == 4: raise Exception("PreRefl file malformed, please check input")
second_row = ShadowCongruence.__get_numbers(rows[1].strip())
if not len(second_row) == 1: raise Exception("PreRefl file malformed, please check input")
try:
elements = int(second_row[0])
except:
raise Exception("PreRefl file malformed, please check input")
if len(rows) != (elements*2) + 2: raise Exception("PreRefl file malformed, please check input")
except Exception as e:
file.close()
raise e
@classmethod
def checkPreMLayerFile(cls, file_name):
file = open(file_name, "r")
try:
rows = file.readlines()
if len(rows) < 2: raise Exception("PreMLayer file malformed, please check input")
first_row = ShadowCongruence.__get_numbers(rows[0].strip())
if not len(first_row) == 1: raise Exception("PreMLayer file malformed, please check input")
try:
elements = int(first_row[0])
except:
raise Exception("PreRefl file malformed, please check input")
second_row = ShadowCongruence.__get_numbers(rows[1].strip())
if not len(second_row) == int(elements): raise Exception("PreMLayer file malformed, please check input")
try:
separator_row = ShadowCongruence.__get_numbers(rows[2 + elements*3].strip())
if not len(separator_row) == 1: raise Exception("PreMLayer file malformed, please check input")
except:
raise Exception("PreRefl file malformed, please check input")
next_row = ShadowCongruence.__get_numbers(rows[2 + elements*3 + 1].strip())
if not len(next_row) == 4: raise Exception("PreMLayer file malformed, please check input")
except Exception as e:
file.close()
raise e
@classmethod
def check2ColumnFormatFile(cls, file_name, specific_name):
try:
if file_name.startswith('/'):
values = numpy.loadtxt(os.path.abspath(file_name))
else:
values = numpy.loadtxt(os.path.abspath(os.path.join(os.path.curdir, file_name)))
except:
raise Exception(specific_name + " file malformed (should be 2 or more columns of numbers, separated by spaces), please check input")
if len(values) < 2: raise Exception(specific_name + " file malformed (should be 2 or more columns of numbers, separated by spaces), please check input")
@classmethod
def checkErrorProfileFile(cls, file_name):
file = open(file_name, "r")
try:
rows = file.readlines()
if len(rows) < 2: raise Exception("Surface Error file malformed, please check input")
first_row = ShadowCongruence.__get_numbers(rows[0].strip())
if not len(first_row) == 2: raise Exception("Surface Error file malformed, please check input")
n_x = int(first_row[0])
if n_x > 500:
raise Exception("Malformed file: maximum allowed point in X direction is 500")
except Exception as e:
file.close()
raise e
@classmethod
def __get_numbers(cls, string):
values = string.strip().split(" ")
numbers = []
for value in values:
if not value == "":
try:
numbers.append(value)
except:
pass
return numbers
class ShadowStatisticData:
intensity = 0.0
total_number_of_rays = 0
total_good_rays = 0
total_lost_rays = 0
def __init__(self, intensity = 0.0,
total_number_of_rays = 0,
total_good_rays = 0,
total_lost_rays = 0):
self.intensity = intensity
self.total_number_of_rays = total_number_of_rays
self.total_good_rays = total_good_rays
self.total_lost_rays = total_lost_rays
class ShadowHistoData(ShadowStatisticData):
fwhm = 0.0
x_fwhm_i = 0.0
x_fwhm_f = 0.0
y_fwhm = 0.0
def __init__(self, intensity = 0.0,
total_number_of_rays = 0,
total_good_rays = 0,
total_lost_rays = 0,
fwhm = 0.0,
x_fwhm_i = 0.0,
x_fwhm_f = 0.0,
y_fwhm = 0.0):
super().__init__(intensity, total_number_of_rays, total_good_rays, total_lost_rays)
self.fwhm = fwhm
self.x_fwhm_i = x_fwhm_i
self.x_fwhm_f = x_fwhm_f
self.y_fwhm = y_fwhm
class ShadowPlotData(ShadowStatisticData):
fwhm_h = 0.0
fwhm_v = 0.0
def __init__(self, intensity = 0.0,
total_number_of_rays = 0,
total_good_rays = 0,
total_lost_rays = 0,
fwhm_h = 0.0,
fwhm_v = 0.0):
super().__init__(intensity, total_number_of_rays, total_good_rays, total_lost_rays)
self.fwhm_h = fwhm_h
self.fwhm_v = fwhm_v
try:
class ShadowPlot:
_is_conversione_active = True
#########################################################################################
#
# FOR TEMPORARY USE: FIX AN ERROR IN PYMCA.PLOT.IMAGEWIEW
#
#########################################################################################
@classmethod
def set_conversion_active(cls, is_active=True):
ShadowPlot._is_conversione_active = is_active
"""Sample code to add 2D dataset saving as text to ImageView."""
#########################################################################################
#
# WIDGET FOR DETAILED PLOT
#
#########################################################################################
class InfoBoxWidget(QWidget):
intensity_field = ""
flux_field = ""
total_rays_field = ""
total_good_rays_field = ""
total_lost_rays_field = ""
fwhm_h_field = ""
fwhm_v_field = ""
sigma_h_field = ""
sigma_v_field = ""
centroid_h_field = ""
centroid_v_field = ""
def __init__(self, x_scale_factor = 1.0, y_scale_factor = 1.0, is_2d=True):
super(ShadowPlot.InfoBoxWidget, self).__init__()
info_box_inner=gui.widgetBox(self, "Info")
info_box_inner.setFixedHeight(518*y_scale_factor)
info_box_inner.setFixedWidth(230*x_scale_factor)
self.flux_box = gui.widgetBox(info_box_inner, "", addSpace=False, orientation="horizontal")
self.flux = gui.lineEdit(self.flux_box, self, "flux_field", "\u03a6 [ph/s/0.1%BW]", tooltip="Flux", labelWidth=115, valueType=str, orientation="horizontal")
self.flux_box.setVisible(False)
self.intensity = gui.lineEdit(info_box_inner, self, "intensity_field", "Intensity", tooltip="Intensity", labelWidth=115, valueType=str, orientation="horizontal")
self.total_rays = gui.lineEdit(info_box_inner, self, "total_rays_field", "Total Rays", tooltip="Total Rays", labelWidth=115, valueType=str, orientation="horizontal")
self.total_good_rays = gui.lineEdit(info_box_inner, self, "total_good_rays_field", "Total Good Rays", tooltip="Total Good Rays", labelWidth=115, valueType=str, orientation="horizontal")
self.total_lost_rays = gui.lineEdit(info_box_inner, self, "total_lost_rays_field", "Total Lost Rays", tooltip="Total Lost Rays", labelWidth=115, valueType=str, orientation="horizontal")
label_box_1 = gui.widgetBox(info_box_inner, "", addSpace=False, orientation="horizontal")
self.label_h = QLabel("FWHM ")
self.label_h.setFixedWidth(115)
palette = QPalette(self.label_h.palette())
palette.setColor(QPalette.Foreground, QColor('blue'))
self.label_h.setPalette(palette)
label_box_1.layout().addWidget(self.label_h)
self.fwhm_h = gui.lineEdit(label_box_1, self, "fwhm_h_field", "", tooltip="FWHM", labelWidth=115, valueType=str, orientation="horizontal")
if is_2d:
label_box_2 = gui.widgetBox(info_box_inner, "", addSpace=False, orientation="horizontal")
self.label_v = QLabel("FWHM ")
self.label_v.setFixedWidth(115)
palette = QPalette(self.label_v.palette())
palette.setColor(QPalette.Foreground, QColor('red'))
self.label_v.setPalette(palette)
label_box_2.layout().addWidget(self.label_v)
self.fwhm_v = gui.lineEdit(label_box_2, self, "fwhm_v_field", "", tooltip="FWHM", labelWidth=115, valueType=str, orientation="horizontal")
label_box_1 = gui.widgetBox(info_box_inner, "", addSpace=False, orientation="horizontal")
self.label_s_h = QLabel("\u03c3 (s.d.)")
self.label_s_h.setFixedWidth(115)
palette = QPalette(self.label_s_h.palette())
palette.setColor(QPalette.Foreground, QColor('blue'))
self.label_s_h.setPalette(palette)
label_box_1.layout().addWidget(self.label_s_h)
self.sigma_h = gui.lineEdit(label_box_1, self, "sigma_h_field", "", tooltip="Sigma", labelWidth=115, valueType=str, orientation="horizontal")
if is_2d:
label_box_2 = gui.widgetBox(info_box_inner, "", addSpace=False, orientation="horizontal")
self.label_s_v = QLabel("\u03c3 (s.d.)")
self.label_s_v.setFixedWidth(115)
palette = QPalette(self.label_s_v.palette())
palette.setColor(QPalette.Foreground, QColor('red'))
self.label_s_v.setPalette(palette)
label_box_2.layout().addWidget(self.label_s_v)
self.sigma_v = gui.lineEdit(label_box_2, self, "sigma_v_field", "", tooltip="Sigma", labelWidth=115, valueType=str, orientation="horizontal")
label_box_1 = gui.widgetBox(info_box_inner, "", addSpace=False, orientation="horizontal")
self.label_c_h = QLabel("centroid ")
self.label_c_h.setFixedWidth(115)
palette = QPalette(self.label_c_h.palette())
palette.setColor(QPalette.Foreground, QColor('blue'))
self.label_c_h.setPalette(palette)
label_box_1.layout().addWidget(self.label_c_h)
self.centroid_h = gui.lineEdit(label_box_1, self, "centroid_h_field", "", tooltip="Centroid", labelWidth=115, valueType=str, orientation="horizontal")
if is_2d:
label_box_2 = gui.widgetBox(info_box_inner, "", addSpace=False, orientation="horizontal")
self.label_c_v = QLabel("centroid ")
self.label_c_v.setFixedWidth(115)
palette = QPalette(self.label_c_v.palette())
palette.setColor(QPalette.Foreground, QColor('red'))
self.label_c_v.setPalette(palette)
label_box_2.layout().addWidget(self.label_c_v)
self.centroid_v = gui.lineEdit(label_box_2, self, "centroid_v_field", "", tooltip="Sigma", labelWidth=115, valueType=str, orientation="horizontal")
self.intensity.setReadOnly(True)
font = QFont(self.intensity.font())
font.setBold(True)
self.intensity.setFont(font)
palette = QPalette(self.intensity.palette())
palette.setColor(QPalette.Text, QColor('dark blue'))
palette.setColor(QPalette.Base, QColor(243, 240, 160))
self.intensity.setPalette(palette)
self.flux.setReadOnly(True)
font = QFont(self.flux.font())
font.setBold(True)
self.flux.setFont(font)
palette = QPalette(self.flux.palette())
palette.setColor(QPalette.Text, QColor('dark blue'))
palette.setColor(QPalette.Base, QColor(243, 240, 160))
self.flux.setPalette(palette)
self.total_rays.setReadOnly(True)
font = QFont(self.total_rays.font())
font.setBold(True)
self.total_rays.setFont(font)
palette = QPalette(self.intensity.palette())
palette.setColor(QPalette.Text, QColor('dark blue'))
palette.setColor(QPalette.Base, QColor(243, 240, 160))
self.total_rays.setPalette(palette)
self.total_good_rays.setReadOnly(True)
font = QFont(self.total_good_rays.font())
font.setBold(True)
self.total_good_rays.setFont(font)
palette = QPalette(self.total_good_rays.palette())
palette.setColor(QPalette.Text, QColor('dark blue'))
palette.setColor(QPalette.Base, QColor(243, 240, 160))
self.total_good_rays.setPalette(palette)
self.total_lost_rays.setReadOnly(True)
font = QFont(self.total_lost_rays.font())
font.setBold(True)
self.total_lost_rays.setFont(font)
palette = QPalette(self.total_lost_rays.palette())
palette.setColor(QPalette.Text, QColor('dark blue'))
palette.setColor(QPalette.Base, QColor(243, 240, 160))
self.total_lost_rays.setPalette(palette)
self.fwhm_h.setReadOnly(True)
font = QFont(self.fwhm_h.font())
font.setBold(True)
self.fwhm_h.setFont(font)
palette = QPalette(self.fwhm_h.palette())
palette.setColor(QPalette.Text, QColor('dark blue'))
palette.setColor(QPalette.Base, QColor(243, 240, 160))
self.fwhm_h.setPalette(palette)
self.sigma_h.setReadOnly(True)
font = QFont(self.sigma_h.font())
font.setBold(True)
self.sigma_h.setFont(font)
palette = QPalette(self.sigma_h.palette())
palette.setColor(QPalette.Text, QColor('dark blue'))
palette.setColor(QPalette.Base, QColor(243, 240, 160))
self.sigma_h.setPalette(palette)
self.centroid_h.setReadOnly(True)
font = QFont(self.centroid_h.font())
font.setBold(True)
self.centroid_h.setFont(font)
palette = QPalette(self.centroid_h.palette())
palette.setColor(QPalette.Text, QColor('dark blue'))
palette.setColor(QPalette.Base, QColor(243, 240, 160))
self.centroid_h.setPalette(palette)
if is_2d:
self.fwhm_v.setReadOnly(True)
font = QFont(self.fwhm_v.font())
font.setBold(True)
self.fwhm_v.setFont(font)
palette = QPalette(self.fwhm_v.palette())
palette.setColor(QPalette.Text, QColor('dark blue'))
palette.setColor(QPalette.Base, QColor(243, 240, 160))
self.fwhm_v.setPalette(palette)
self.sigma_v.setReadOnly(True)
font = QFont(self.sigma_v.font())
font.setBold(True)
self.sigma_v.setFont(font)
palette = QPalette(self.sigma_v.palette())
palette.setColor(QPalette.Text, QColor('dark blue'))
palette.setColor(QPalette.Base, QColor(243, 240, 160))
self.sigma_v.setPalette(palette)
self.centroid_v.setReadOnly(True)
font = QFont(self.centroid_v.font())
font.setBold(True)
self.centroid_v.setFont(font)
palette = QPalette(self.centroid_v.palette())
palette.setColor(QPalette.Text, QColor('dark blue'))
palette.setColor(QPalette.Base, QColor(243, 240, 160))
self.centroid_v.setPalette(palette)
def set_flux(self, flux=None):
if flux is None:
self.flux.setText("0.0")
self.flux_box.setVisible(False)
else:
self.flux.setText('%.3E' % flux)
self.flux_box.setVisible(True)
def clear(self):
self.intensity.setText("0.0")
self.flux.setText("0.0")
self.flux_box.setVisible(False)
self.total_rays.setText("0")
self.total_good_rays.setText("0")
self.total_lost_rays.setText("0")
self.fwhm_h.setText("0.0000")
if hasattr(self, "fwhm_v"): self.fwhm_v.setText("0.0000")
self.sigma_h.setText("0.0000")
if hasattr(self, "sigma_v"): self.sigma_v.setText("0.0000")
self.centroid_h.setText("0.0000")
if hasattr(self, "centroid_v"): self.centroid_v.setText("0.0000")
self.boundary_h.setText("")
if hasattr(self, "boundary_v"): self.centroid_v.setText("")
class DetailedHistoWidget(QWidget):
def __init__(self, x_scale_factor = 1.0, y_scale_factor = 1.0):
super(ShadowPlot.DetailedHistoWidget, self).__init__()
self.plot_canvas = gui.plotWindow(roi=False, control=False, position=True, logScale=True, fit=True)
self.plot_canvas.setDefaultPlotLines(True)
self.plot_canvas.setActiveCurveColor(color='blue')
self.plot_canvas.setMinimumWidth(590*x_scale_factor)
self.plot_canvas.setMaximumWidth(590*x_scale_factor)
self.info_box = ShadowPlot.InfoBoxWidget(x_scale_factor, y_scale_factor, is_2d=False)
layout = QGridLayout()
layout.addWidget( self.info_box, 0, 1, 1, 1)
layout.addWidget(self.plot_canvas, 0, 0, 1, 1)
layout.setColumnMinimumWidth(0, 600*x_scale_factor)
layout.setColumnMinimumWidth(1, 230*x_scale_factor)
self.setLayout(layout)
def plot_histo(self, beam, col, nolost, xrange, ref, title, xtitle, ytitle, nbins = 100, xum="", conv=1.0, ticket_to_add=None, flux=None):
ticket = beam.histo1(col, nbins=nbins, xrange=xrange, nolost=nolost, ref=ref)
if ref in [24, 25]: ticket['intensity'] = beam.getshonecol(ref, nolost=nolost).sum()
# TODO: check congruence between tickets
if not ticket_to_add is None:
last_ticket = copy.deepcopy(ticket)
ticket['histogram'] += ticket_to_add['histogram']
ticket['histogram_path'] += ticket_to_add['histogram_path']
ticket['intensity'] += ticket_to_add['intensity']
ticket['nrays'] += ticket_to_add['nrays']
ticket['good_rays'] += ticket_to_add['good_rays']
ticket['fwhm'], ticket['fwhm_quote'], ticket['fwhm_coordinates'] = get_fwhm(ticket['histogram'], ticket['bin_center'])
ticket['sigma'] = get_sigma(ticket['histogram'], ticket['bin_center'])
ticket['centroid'] = get_average(ticket['histogram'], ticket['bin_center'])
factor=ShadowPlot.get_factor(col, conv)
if ref != 0 and not ytitle is None: ytitle = ytitle + ' weighted by ' + ShadowPlot.get_shadow_label(ref)
histogram = ticket['histogram_path']
bins = ticket['bin_path']*factor
self.plot_canvas.addCurve(bins, histogram, title, symbol='', color='blue', replace=True) #'+', '^', ','
if not xtitle is None: self.plot_canvas.setGraphXLabel(xtitle)
if not ytitle is None: self.plot_canvas.setGraphYLabel(ytitle)
if not title is None: self.plot_canvas.setGraphTitle(title)
self.plot_canvas.setInteractiveMode(mode='zoom')
if ticket['fwhm'] == None: ticket['fwhm'] = 0.0
if not ticket_to_add is None:
if last_ticket['fwhm'] == None: last_ticket['fwhm'] = 0.0
n_patches = len(self.plot_canvas._backend.ax.patches)
if (n_patches > 0): self.plot_canvas._backend.ax.patches.remove(self.plot_canvas._backend.ax.patches[n_patches-1])
if not ticket['fwhm'] == 0.0:
x_fwhm_i, x_fwhm_f = ticket['fwhm_coordinates']
x_fwhm_i, x_fwhm_f = x_fwhm_i*factor, x_fwhm_f*factor
y_fwhm = ticket['fwhm_quote']
self.plot_canvas._backend.ax.add_patch(FancyArrowPatch([x_fwhm_i, y_fwhm],
[x_fwhm_f, y_fwhm],
arrowstyle=ArrowStyle.CurveAB(head_width=2, head_length=4),
color='b',
linewidth=1.5))
if min(histogram) < 0:
self.plot_canvas.setGraphYLimits(min(histogram), max(histogram))
else:
self.plot_canvas.setGraphYLimits(0, max(histogram))
self.plot_canvas.replot()
self.info_box.intensity.setText("{:4.3f}".format(ticket['intensity']))
self.info_box.set_flux(flux)
self.info_box.total_rays.setText(str(ticket['nrays']))
self.info_box.total_good_rays.setText(str(ticket['good_rays']))
self.info_box.total_lost_rays.setText(str(ticket['nrays']-ticket['good_rays']))
self.info_box.fwhm_h.setText("{:5.4f}".format(ticket['fwhm']*factor))
self.info_box.label_h.setText("FWHM " + xum)
self.info_box.sigma_h.setText("{:5.4f}".format(ticket['sigma']*factor))
self.info_box.label_s_h.setText("\u03c3 " + xum)
self.info_box.centroid_h.setText("{:5.4f}".format(ticket['centroid']*factor))
self.info_box.label_c_h.setText("centroid " + xum)
if not ticket_to_add is None:
return ticket, last_ticket
else:
return ticket, None
def clear(self):
self.plot_canvas.clear()
self.info_box.clear()
class DetailedPlotWidget(QWidget):
def __init__(self, x_scale_factor = 1.0, y_scale_factor = 1.0):
super(ShadowPlot.DetailedPlotWidget, self).__init__()
self.x_scale_factor = x_scale_factor
self.y_scale_factor = y_scale_factor
self.plot_canvas = gui.imageWiew(parent=self)
self.plot_canvas.setColormap({"name":"temperature", "normalization":"linear", "autoscale":True, "vmin":0, "vmax":0, "colors":256})
self.plot_canvas.setMinimumWidth(590 * x_scale_factor)
self.plot_canvas.setMaximumWidth(590 * y_scale_factor)
self.info_box = ShadowPlot.InfoBoxWidget(x_scale_factor, y_scale_factor)
layout = QGridLayout()
layout.addWidget(self.info_box, 0, 1, 1, 1)
layout.addWidget(self.plot_canvas, 0, 0, 1, 1)
layout.setColumnMinimumWidth(0, 600*x_scale_factor)
layout.setColumnMinimumWidth(1, 230*x_scale_factor)
self.setLayout(layout)
def plot_xy(self, beam, var_x, var_y, title, xtitle, ytitle, xrange=None, yrange=None, nolost=1, nbins=100, nbins_h=None, nbins_v=None, xum="", yum="", conv=1.0, ref=23, is_footprint=False, ticket_to_add=None, flux=None):
matplotlib.rcParams['axes.formatter.useoffset']='False'
if nbins_h == None: nbins_h = nbins
if nbins_v == None: nbins_v = nbins
ticket = beam.histo2(var_x, var_y, nbins=nbins, nbins_h=nbins_h, nbins_v=nbins_v, xrange=xrange, yrange=yrange, nolost=nolost, ref=ref)
if ref in [24, 25]: ticket['intensity'] = beam.getshonecol(ref, nolost=nolost).sum()
# TODO: check congruence between tickets
if not ticket_to_add is None:
last_ticket = copy.deepcopy(ticket)
ticket['histogram'] += ticket_to_add['histogram']
ticket['histogram_h'] += ticket_to_add['histogram_h']
ticket['histogram_v'] += ticket_to_add['histogram_v']
ticket['intensity'] += ticket_to_add['intensity']
ticket['nrays'] += ticket_to_add['nrays']
ticket['good_rays'] += ticket_to_add['good_rays']
ticket['fwhm_h'], ticket['fwhm_quote_h'], ticket['fwhm_coordinates_h'] = get_fwhm(ticket['histogram_h'], ticket['bin_h_center'])
ticket['fwhm_v'], ticket['fwhm_quote_v'], ticket['fwhm_coordinates_v'] = get_fwhm(ticket['histogram_v'], ticket['bin_v_center'])
ticket['sigma_h'] = get_sigma(ticket['histogram_h'], ticket['bin_h_center'])
ticket['sigma_v'] = get_sigma(ticket['histogram_v'], ticket['bin_v_center'])
ticket['centroid_h'] = get_average(ticket['histogram_h'], ticket['bin_h_center'])
ticket['centroid_v'] = get_average(ticket['histogram_v'], ticket['bin_v_center'])
if is_footprint:
factor1 = 1.0
factor2 = 1.0
else:
factor1=ShadowPlot.get_factor(var_x, conv)
factor2=ShadowPlot.get_factor(var_y, conv)
xx = ticket['bin_h_edges']
yy = ticket['bin_v_edges']
xmin, xmax = xx.min(), xx.max()
ymin, ymax = yy.min(), yy.max()
origin = (xmin*factor1, ymin*factor2)
scale = (abs((xmax-xmin)/nbins_h)*factor1, abs((ymax-ymin)/nbins_v)*factor2)
self.plot_canvas.setColormap({"name":QSettings().value("output/shadow-default-colormap", "temperature", str),
"normalization":"linear",
"autoscale":True,
"vmin":0,
"vmax":0,
"colors":256})
# PyMCA inverts axis!!!! histogram must be calculated reversed
self.plot_canvas.setImage(ticket['histogram'].T, origin=origin, scale=scale)
if xtitle is None: xtitle=ShadowPlot.get_shadow_label(var_x)
if ytitle is None: ytitle=ShadowPlot.get_shadow_label(var_y)
self.plot_canvas.setGraphXLabel(xtitle)
self.plot_canvas.setGraphYLabel(ytitle)
self.plot_canvas.setGraphTitle(title)
self.plot_canvas._histoHPlot.setGraphYLabel('A.U.')
self.plot_canvas._histoHPlot._backend.ax.xaxis.get_label().set_color('white')
self.plot_canvas._histoHPlot._backend.ax.xaxis.get_label().set_fontsize(1)
for label in self.plot_canvas._histoHPlot._backend.ax.xaxis.get_ticklabels():
label.set_color('white')
label.set_fontsize(1)
self.plot_canvas._histoVPlot.setGraphXLabel('A.U.')
self.plot_canvas._histoVPlot._backend.ax.yaxis.get_label().set_color('white')
self.plot_canvas._histoVPlot._backend.ax.yaxis.get_label().set_fontsize(1)
for label in self.plot_canvas._histoVPlot._backend.ax.yaxis.get_ticklabels():
label.set_color('white')
label.set_fontsize(1)
if ticket['fwhm_h'] == None: ticket['fwhm_h'] = 0.0
if ticket['fwhm_v'] == None: ticket['fwhm_v'] = 0.0
if not ticket_to_add is None:
if last_ticket['fwhm_h'] == None: last_ticket['fwhm_h'] = 0.0
if last_ticket['fwhm_v'] == None: last_ticket['fwhm_v'] = 0.0
n_patches = len(self.plot_canvas._histoHPlot._backend.ax.patches)
if (n_patches > 0): self.plot_canvas._histoHPlot._backend.ax.patches.remove(self.plot_canvas._histoHPlot._backend.ax.patches[n_patches-1])
if not ticket['fwhm_h'] == 0.0:
x_fwhm_i, x_fwhm_f = ticket['fwhm_coordinates_h']
x_fwhm_i, x_fwhm_f = x_fwhm_i*factor1, x_fwhm_f*factor1
y_fwhm = ticket['fwhm_quote_h']
self.plot_canvas._histoHPlot._backend.ax.add_patch(FancyArrowPatch([x_fwhm_i, y_fwhm],
[x_fwhm_f, y_fwhm],
arrowstyle=ArrowStyle.CurveAB(head_width=2, head_length=4),
color='b',
linewidth=1.5))
n_patches = len(self.plot_canvas._histoVPlot._backend.ax.patches)
if (n_patches > 0): self.plot_canvas._histoVPlot._backend.ax.patches.remove(self.plot_canvas._histoVPlot._backend.ax.patches[n_patches-1])
if not ticket['fwhm_v'] == 0.0:
y_fwhm_i, y_fwhm_f = ticket['fwhm_coordinates_v']
y_fwhm_i, y_fwhm_f = y_fwhm_i*factor2, y_fwhm_f*factor2
x_fwhm = ticket['fwhm_quote_v']
self.plot_canvas._histoVPlot._backend.ax.add_patch(FancyArrowPatch([x_fwhm, y_fwhm_i],
[x_fwhm, y_fwhm_f],
arrowstyle=ArrowStyle.CurveAB(head_width=2, head_length=4),
color='r',
linewidth=1.5))
self.plot_canvas._histoHPlot.replot()
self.plot_canvas._histoVPlot.replot()
self.plot_canvas.replot()
self.info_box.intensity.setText("{:4.3f}".format(ticket['intensity']))
self.info_box.set_flux(flux)
self.info_box.total_rays.setText(str(ticket['nrays']))
self.info_box.total_good_rays.setText(str(ticket['good_rays']))
self.info_box.total_lost_rays.setText(str(ticket['nrays']-ticket['good_rays']))
self.info_box.fwhm_h.setText("{:5.4f}".format(ticket['fwhm_h'] * factor1))
self.info_box.fwhm_v.setText("{:5.4f}".format(ticket['fwhm_v'] * factor2))
self.info_box.label_h.setText("FWHM " + xum)
self.info_box.label_v.setText("FWHM " + yum)
self.info_box.sigma_h.setText("{:5.4f}".format(ticket['sigma_h'] * factor1))
self.info_box.sigma_v.setText("{:5.4f}".format(ticket['sigma_v'] * factor2))
self.info_box.label_s_h.setText("\u03c3 " + xum)
self.info_box.label_s_v.setText("\u03c3 " + yum)
self.info_box.centroid_h.setText("{:5.4f}".format(ticket['centroid_h'] * factor1))
self.info_box.centroid_v.setText("{:5.4f}".format(ticket['centroid_v'] * factor2))
self.info_box.label_c_h.setText("centroid " + xum)
self.info_box.label_c_v.setText("centroid " + yum)
if not ticket_to_add is None:
return ticket, last_ticket
else:
return ticket, None
def clear(self):
self.plot_canvas.clear()
self.plot_canvas._histoHPlot.clear()
self.plot_canvas._histoVPlot.clear()
self.plot_canvas._histoHPlot._backend.ax.xaxis.get_label().set_color('white')
self.plot_canvas._histoHPlot._backend.ax.xaxis.get_label().set_fontsize(1)
for label in self.plot_canvas._histoHPlot._backend.ax.xaxis.get_ticklabels():
label.set_color('white')
label.set_fontsize(1)
self.plot_canvas._histoVPlot._backend.ax.yaxis.get_label().set_color('white')
self.plot_canvas._histoVPlot._backend.ax.yaxis.get_label().set_fontsize(1)
for label in self.plot_canvas._histoVPlot._backend.ax.yaxis.get_ticklabels():
label.set_color('white')
label.set_fontsize(1)
self.plot_canvas._histoHPlot.setGraphYLabel('A.U.')
self.plot_canvas._histoVPlot.setGraphXLabel('A.U.')
self.plot_canvas._histoHPlot.replot()
self.plot_canvas._histoVPlot.replot()
self.info_box.clear()
#########################################################################################
@classmethod
def plotxy_preview(cls, plot_window, beam, var_x, var_y, nolost=0, title='PLOTXY', xtitle=None, ytitle=None, conv=1.0, is_footprint=False):
matplotlib.rcParams['axes.formatter.useoffset']='False'
col1 = beam.getshonecol(var_x, nolost=nolost)
col2 = beam.getshonecol(var_y, nolost=nolost)
if is_footprint:
factor1 = 1.0
factor2 = 1.0
else:
factor1 = ShadowPlot.get_factor(var_x, conv)
factor2 = ShadowPlot.get_factor(var_y, conv)
if xtitle is None: xtitle=ShadowPlot.get_shadow_label(var_x)
if ytitle is None: ytitle=ShadowPlot.get_shadow_label(var_y)
plot_window.addCurve(col1*factor1, col2*factor2, title, symbol='.', color='blue', replace=True) #'+', '^', ','
if not xtitle is None: plot_window.setGraphXLabel(xtitle)
if not ytitle is None: plot_window.setGraphYLabel(ytitle)
if not title is None: plot_window.setGraphTitle(title)
plot_window.setInteractiveMode(mode='zoom')
@classmethod
def plot_histo_preview(cls, plot_window, beam, col, nolost, ref, title, xtitle, ytitle, conv=1.0):
matplotlib.rcParams['axes.formatter.useoffset']='False'
factor=ShadowPlot.get_factor(col, conv)
ticket = beam.histo1(col, nbins=100, xrange=None, nolost=nolost, ref=ref)
if ref != 0 and not ytitle is None: ytitle = ytitle + ' weighted by ' + ShadowPlot.get_shadow_label(ref)
histogram = ticket['histogram_path']
bins = ticket['bin_path']*factor
plot_window.addCurve(bins, histogram, title, symbol='', color='blue', replace=True) #'+', '^', ','
if not xtitle is None: plot_window.setGraphXLabel(xtitle)
if not ytitle is None: plot_window.setGraphYLabel(ytitle)
if not title is None: plot_window.setGraphTitle(title)
plot_window.setInteractiveMode(mode='zoom')
if min(histogram) < 0:
plot_window.setGraphYLimits(min(histogram), max(histogram))
else:
plot_window.setGraphYLimits(0, max(histogram))
plot_window.replot()
@classmethod
def get_factor(cls, var, conv):
factor = 1.0
if ShadowPlot._is_conversione_active:
if var == 1 or var == 2 or var == 3:
factor = 1e4*conv # cm to micron
elif var == 4 or var == 5 or var == 6:
factor = 1e6 # rad to urad
return factor
@classmethod
def get_shadow_label(cls, var):
return (stp.getLabel(var-1))[0]
#########################################################################################
#
# SAVE/LOAD FILES in HDF5 Format
#
#########################################################################################
class PlotXYHdf5File(h5py.File):
def __init__(self, file_name, mode="w"):
try:
super(ShadowPlot.PlotXYHdf5File, self).__init__(name=file_name, mode=mode)
except OSError as e:
if "already open" in str(e) and mode=="w":
super(ShadowPlot.PlotXYHdf5File, self).__init__(name=file_name, mode="a")
self.close()
super(ShadowPlot.PlotXYHdf5File, self).__init__(name=file_name, mode="w")
if mode != "r":
self.coordinates = self.create_group("coordinates")
self.plots = self.create_group("xy_plots")
self.additional_data = self.create_group("additional_data")
self.last_plot = self.plots.create_group("last_plot")
self.has_last_plot = False
self.has_coordinate = False
self.attrs["default"] = "coordinates/X"
self.attrs["file_name"] = file_name
self.attrs["file_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
self.attrs["creator"] = "PlotXYHdf5File.__init__"
self.attrs["code"] = "ShadowOui"
self.attrs["HDF5_Version"] = h5py.version.hdf5_version
self.attrs["h5py_version"] = h5py.version.version
def get_last_plot(self, dataset_name="intensity"):
return self.get_plot_xy(dataset_name=dataset_name)
def get_coordinates(self):
bin_h_center = self["coordinates/X"][()]
h_label = self["coordinates"].attrs["x_label"]
bin_v_center = self["coordinates/Y"][()]
v_label = self["coordinates"].attrs["y_label"]
return bin_h_center, bin_v_center, h_label, v_label
def get_plot_xy(self, plot_name="last_plot", dataset_name="intensity"):
histogram = self["/xy_plots/" + plot_name + "/" + dataset_name][()]
histogram_h = self["/xy_plots/" + plot_name + "/histogram_h"][()]
histogram_v = self["/xy_plots/" + plot_name + "/histogram_v"][()]
return histogram, histogram_h, histogram_v, self["/xy_plots/" + plot_name].attrs
def write_coordinates(self, ticket):
if not self.has_coordinate:
self.x = self.coordinates.create_dataset("X", data=ticket["bin_h_center"])
self.y = self.coordinates.create_dataset("Y", data=ticket["bin_v_center"])
self.has_coordinate = True
else:
self.x[...] = ticket["bin_h_center"]
self.y[...] = ticket["bin_v_center"]
try:
self.coordinates.attrs["x_label"] = ShadowPlot.get_shadow_label(ticket["col_h"])
self.coordinates.attrs["y_label"] = ShadowPlot.get_shadow_label(ticket["col_v"])
except:
self.coordinates.attrs["x_label"] = ticket["h_label"]
self.coordinates.attrs["y_label"] = ticket["v_label"]
def add_plot_xy(self, ticket, plot_name="last_plot", dataset_name="intensity", attributes={}):
if plot_name is None or plot_name.strip() == "" or plot_name.strip() == "last_plot":
if not self.has_last_plot:
self.lp_histogram = self.last_plot.create_dataset(dataset_name, data=ticket['histogram'])
self.lp_histogram_h = self.last_plot.create_dataset("histogram_h", data=ticket['histogram_h'])
self.lp_histogram_v = self.last_plot.create_dataset("histogram_v", data=ticket['histogram_v'])
self.has_last_plot = True
else:
if self.lp_histogram.name != "/xy_plots/last_plot/" + dataset_name:
self.last_plot.move(self.lp_histogram.name, "/xy_plots/last_plot/" + dataset_name)
self.lp_histogram[...] = ticket['histogram']
self.lp_histogram_h[...] = ticket['histogram_h']
self.lp_histogram_v[...] = ticket['histogram_v']
self.last_plot.attrs["intensity"] = ticket["intensity"]
self.last_plot.attrs["total_rays"] = ticket["nrays"]
self.last_plot.attrs["good_rays"] = ticket["good_rays"]
self.last_plot.attrs["lost_rays"] = ticket["nrays"]-ticket["good_rays"]
if not attributes is None:
for key in attributes.keys():
self.last_plot.attrs[key] = attributes[key]
else:
plot = self.plots.create_group(plot_name)
plot.create_dataset(dataset_name, data=ticket['histogram'])
plot.create_dataset("histogram_h", data=ticket['histogram_h'])
plot.create_dataset("histogram_v", data=ticket['histogram_v'])
plot.attrs["intensity"] = ticket["intensity"]
plot.attrs["total_rays"] = ticket["nrays"]
plot.attrs["good_rays"] = ticket["good_rays"]
plot.attrs["lost_rays"] = ticket["nrays"]-ticket["good_rays"]
if not attributes is None:
for key in attributes.keys():
plot.attrs[key] = attributes[key]
def add_attribute(self, attribute_name, attribute_value, dataset_name=None):
if not dataset_name is None:
self[dataset_name].attrs[attribute_name] = attribute_value
else:
self.attrs[attribute_name] = attribute_value
def get_attribute(self, attribute_name, dataset_name=None):
if not dataset_name is None:
return self[dataset_name].attrs[attribute_name]
else:
return self.attrs[attribute_name]
class HistogramHdf5File(h5py.File):
def __init__(self, file_name, mode="w"):
super(ShadowPlot.HistogramHdf5File, self).__init__(name=file_name, mode=mode)
self.coordinates = self.create_group("coordinates")
self.plots = self.create_group("histogram_plots")
self.last_plot = self.plots.create_group("last_plot")
self.has_last_plot = False
self.has_coordinate = False
self.attrs["default"] = "coordinates/X"
self.attrs["file_name"] = file_name
self.attrs["file_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
self.attrs["creator"] = "PlotXYHdf5File.__init__"
self.attrs["code"] = "ShadowOui"
self.attrs["HDF5_Version"] = h5py.version.hdf5_version
self.attrs["h5py_version"] = h5py.version.version
def write_coordinates(self, ticket):
if not self.has_coordinate:
self.x = self.coordinates.create_dataset("X", data=ticket["bin_center"])
self.has_coordinate = True
else:
self.x[...] = ticket["bin_center"]
self.coordinates.attrs["x_label"] = ShadowPlot.get_shadow_label(ticket["col"])
def add_histogram(self, ticket, plot_name="last_plot", dataset_name="intensity", attributes={}):
if plot_name is None or plot_name.strip() == "" or plot_name.strip() == "last_plot":
if not self.has_last_plot:
self.lp_histogram = self.last_plot.create_dataset(dataset_name, data=ticket['histogram'])
self.has_last_plot = True
else:
if self.lp_histogram.name != "/histogram_plots/last_plot/" + dataset_name:
self.last_plot.move(self.lp_histogram.name, "/histogram_plots/last_plot/" + dataset_name)
self.lp_histogram[...] = ticket['histogram']
self.last_plot.attrs["intensity"] = ticket["intensity"]
self.last_plot.attrs["total_rays"] = ticket["nrays"]
self.last_plot.attrs["good_rays"] = ticket["good_rays"]
self.last_plot.attrs["lost_rays"] = ticket["nrays"]-ticket["good_rays"]
if not attributes is None:
for key in attributes.keys():
self.last_plot.attrs[key] = attributes[key]
else:
plot = self.plots.create_group(plot_name)
plot.create_dataset(dataset_name, data=ticket['histogram'])
plot.attrs["intensity"] = ticket["intensity"]
plot.attrs["total_rays"] = ticket["nrays"]
plot.attrs["good_rays"] = ticket["good_rays"]
plot.attrs["lost_rays"] = ticket["nrays"]-ticket["good_rays"]
if not attributes is None:
for key in attributes.keys():
plot.attrs[key] = attributes[key]
except:
pass
from scipy.interpolate import RectBivariateSpline
class ShadowPreProcessor:
@classmethod
def read_surface_error_file(cls, filename):
file = open(congruence.checkFile(filename), "r")
rows = file.readlines()
dimensions = rows[0].split()
n_x = int(dimensions[0])
n_y = int(dimensions[1])
if n_x > 500:
raise Exception("Malformed file: maximum allowed point in X direction is 500")
x_coords = numpy.zeros(0)
y_coords = numpy.zeros(0)
z_values = numpy.zeros((n_x, n_y))
index = 1
dim_y_row = len(rows[index].split())
is_ycoord = True
first_x_row_index = 0
while(is_ycoord):
y_values = rows[index].split()
if len(y_values) == dim_y_row:
for y_value in y_values:
y_coords = numpy.append(y_coords, float(y_value))
else:
first_x_row_index = index
is_ycoord = False
index +=1
first_x_row = rows[first_x_row_index].split()
if len(first_x_row) == 2:
x_index = 0
z_index = 0
for index in range(first_x_row_index, len(rows)):
if z_index == 0:
values = rows[index].split()
x_coords = numpy.append(x_coords, float(values[0]))
z_value = float(values[1])
else:
z_value = float(rows[index])
z_values[x_index, z_index] = z_value
z_index += 1
if z_index == n_y:
x_index += 1
z_index = 0
else:
x_rows = []
for index in range(2, len(rows)):
x_row = rows[index].split("\t")
if len(x_row) != 1 + n_y:
x_row = rows[index].split()
if len(x_row) != 1 + n_y:
raise Exception("Malformed file: check format")
x_rows.append(x_row)
for x_index in range(0, len(x_rows)):
x_coords = numpy.append(x_coords, float(x_rows[x_index][0]))
for z_index in range(0, len(x_rows[x_index]) - 1):
z_value = float(x_rows[x_index][z_index + 1])
z_values[x_index, z_index] = z_value
return x_coords, y_coords, z_values
@classmethod
def apply_user_diffraction_profile(cls, crystal, h, k, l, asymmetry_angle, file_diffraction_profile, input_beam):
values = numpy.loadtxt(os.path.abspath(os.path.curdir + "/angle." +
("0" + str(input_beam._oe_number) if (input_beam._oe_number < 10) else
str(input_beam._oe_number))))
beam_incident_angles = values[:, 1]
beam_wavelengths = ShadowPhysics.getWavelengthFromShadowK(input_beam._beam.rays[:, 10])
d_spacing = xraylib.Crystal_dSpacing(xraylib.Crystal_GetCrystal(crystal), h, k, l)
bragg_angles = numpy.degrees(numpy.arcsin(0.5*beam_wavelengths/d_spacing))
diffraction_angles = 90 - (bragg_angles - asymmetry_angle)
delta_thetas = diffraction_angles - beam_incident_angles
values = numpy.loadtxt(os.path.abspath(file_diffraction_profile) if file_diffraction_profile.startswith('/') else
os.path.abspath(os.path.curdir + "/" + file_diffraction_profile))
crystal_delta_thetas = values[:, 0]
crystal_reflectivities_s = values[:, 1]
interpolated_weight_s = numpy.sqrt(numpy.interp(delta_thetas,
crystal_delta_thetas,
crystal_reflectivities_s,
left=crystal_reflectivities_s[0],
right=crystal_reflectivities_s[-1]))
if values.shape[1] == 2: interpolated_weight_p = interpolated_weight_s
elif values.shape[1] >= 3:
crystal_reflectivities_p = values[:, 2]
interpolated_weight_p = numpy.sqrt(numpy.interp(delta_thetas,
crystal_delta_thetas,
crystal_reflectivities_p,
left=crystal_reflectivities_p[0],
right=crystal_reflectivities_p[-1]))
output_beam = input_beam.duplicate()
output_beam._beam.rays[:, 6] = output_beam._beam.rays[:, 6] * interpolated_weight_s
output_beam._beam.rays[:, 7] = output_beam._beam.rays[:, 7] * interpolated_weight_s
output_beam._beam.rays[:, 8] = output_beam._beam.rays[:, 8] * interpolated_weight_s
output_beam._beam.rays[:, 15] = output_beam._beam.rays[:, 15] * interpolated_weight_p
output_beam._beam.rays[:, 16] = output_beam._beam.rays[:, 16] * interpolated_weight_p
output_beam._beam.rays[:, 17] = output_beam._beam.rays[:, 17] * interpolated_weight_p
return output_beam
@classmethod
def apply_user_reflectivity(cls, file_type, angle_units, energy_units, file_reflectivity, input_beam):
if file_type == 0: # angle vs refl.
values = numpy.loadtxt(os.path.abspath(os.path.curdir + "/angle." +
("0" + str(input_beam._oe_number) if (input_beam._oe_number < 10) else
str(input_beam._oe_number))))
beam_incident_angles = 90.0 - values[:, 1]
values = numpy.loadtxt(os.path.abspath(file_reflectivity) if file_reflectivity.startswith('/') else
os.path.abspath(os.path.curdir + "/" + file_reflectivity))
mirror_grazing_angles = values[:, 0]
mirror_reflectivities = values[:, 1]
if mirror_grazing_angles[-1] < mirror_grazing_angles[0]: # XOPPY MLayer gives angles in descendent order
mirror_grazing_angles = values[:, 0][::-1]
mirror_reflectivities = values[:, 1][::-1]
if angle_units == 0: mirror_grazing_angles = numpy.degrees(1e-3 * mirror_grazing_angles) # mrad to deg
interpolated_weight_s = numpy.sqrt(numpy.interp(beam_incident_angles,
mirror_grazing_angles,
mirror_reflectivities,
left=mirror_reflectivities[0],
right=mirror_reflectivities[-1]))
interpolated_weight_p = interpolated_weight_s
elif file_type == 1: # Energy vs Refl.
beam_energies = ShadowPhysics.getEnergyFromShadowK(input_beam._beam.rays[:, 10])
values = numpy.loadtxt(os.path.abspath(os.path.abspath(file_reflectivity) if file_reflectivity.startswith('/') else
os.path.abspath(os.path.curdir + "/" + file_reflectivity)))
mirror_energies = values[:, 0]
mirror_reflectivities = values[:, 1]
if energy_units == 1: mirror_energies *= 1e3 # KeV to eV
interpolated_weight_s = numpy.sqrt(numpy.interp(beam_energies,
mirror_energies,
mirror_reflectivities,
left=mirror_reflectivities[0],
right=mirror_reflectivities[-1]))
interpolated_weight_p = interpolated_weight_s
elif file_type == 2: # 2D Energy vs Angle vs Reflectivity
values = numpy.loadtxt(os.path.abspath(os.path.curdir + "/angle." +
("0" + str(input_beam._oe_number) if (input_beam._oe_number < 10) else
str(input_beam._oe_number))))
beam_incident_angles = 90.0 - values[:, 1]
beam_energies = ShadowPhysics.getEnergyFromShadowK(input_beam._beam.rays[:, 10])
values = numpy.loadtxt(os.path.abspath(os.path.abspath(file_reflectivity) if file_reflectivity.startswith('/') else
os.path.abspath(os.path.curdir + "/" + file_reflectivity)))
mirror_energies = values[:, 0]
mirror_grazing_angles = values[:, 1]
mirror_energies = numpy.unique(mirror_energies)
mirror_grazing_angles = numpy.unique(mirror_grazing_angles)
if angle_units == 0: mirror_grazing_angles = numpy.degrees(1e-3 * mirror_grazing_angles)
if energy_units == 1: mirror_energies *= 1e3 # KeV to eV
def get_interpolator_weight_2D(mirror_energies, mirror_grazing_angles, mirror_reflectivities):
mirror_reflectivities = numpy.reshape(mirror_reflectivities, (mirror_energies.shape[0], mirror_grazing_angles.shape[0]))
interpolator = RectBivariateSpline(mirror_energies, mirror_grazing_angles, mirror_reflectivities, kx=2, ky=2)
interpolated_weight = numpy.zeros(beam_energies.shape[0])
for energy, angle, i in zip(beam_energies, beam_incident_angles, range(interpolated_weight.shape[0])):
interpolated_weight[i] = numpy.sqrt(interpolator(energy, angle))
interpolated_weight[numpy.where(numpy.isnan(interpolated_weight))] = 0.0
return interpolated_weight
if values.shape[1] == 3:
mirror_reflectivities = values[:, 2]
interpolated_weight_s = get_interpolator_weight_2D(mirror_energies, mirror_grazing_angles, mirror_reflectivities)
interpolated_weight_p = interpolated_weight_s
elif values.shape[1] == 4:
mirror_reflectivities_s = values[:, 2]
mirror_reflectivities_p = values[:, 3]
interpolated_weight_s = get_interpolator_weight_2D(mirror_energies, mirror_grazing_angles, mirror_reflectivities_s)
interpolated_weight_p = get_interpolator_weight_2D(mirror_energies, mirror_grazing_angles, mirror_reflectivities_p)
else:
raise ValueError("User input is inconsistent: not a 2D reflectivity profile")
output_beam = input_beam.duplicate()
output_beam._beam.rays[:, 6] = output_beam._beam.rays[:, 6] * interpolated_weight_s
output_beam._beam.rays[:, 7] = output_beam._beam.rays[:, 7] * interpolated_weight_s
output_beam._beam.rays[:, 8] = output_beam._beam.rays[:, 8] * interpolated_weight_s
output_beam._beam.rays[:, 15] = output_beam._beam.rays[:, 15] * interpolated_weight_p
output_beam._beam.rays[:, 16] = output_beam._beam.rays[:, 16] * interpolated_weight_p
output_beam._beam.rays[:, 17] = output_beam._beam.rays[:, 17] * interpolated_weight_p
return output_beam
@classmethod
def apply_user_grating_efficiency(cls, grating_file_efficiency, input_beam):
beam_energies = ShadowPhysics.getEnergyFromShadowK(input_beam._beam.rays[:, 10])
values = numpy.loadtxt(os.path.abspath(os.path.abspath(grating_file_efficiency) if grating_file_efficiency.startswith('/') else
os.path.abspath(os.path.curdir + "/" + grating_file_efficiency)))
grating_energies = values[:, 0]
grating_efficiencies_s = values[:, 1]
interpolated_weight_s = numpy.sqrt(numpy.interp(beam_energies,
grating_energies,
grating_efficiencies_s,
left=grating_efficiencies_s[0],
right=grating_efficiencies_s[-1]))
if values.shape[1] == 2: interpolated_weight_p = interpolated_weight_s
elif values.shape[1] >= 3:
grating_efficiencies_p = values[:, 2]
interpolated_weight_p = numpy.sqrt(numpy.interp(beam_energies,
grating_energies,
grating_efficiencies_p,
left=grating_efficiencies_p[0],
right=grating_efficiencies_p[-1]))
output_beam = input_beam.duplicate()
output_beam._beam.rays[:, 6] = output_beam._beam.rays[:, 6] * interpolated_weight_s
output_beam._beam.rays[:, 7] = output_beam._beam.rays[:, 7] * interpolated_weight_s
output_beam._beam.rays[:, 8] = output_beam._beam.rays[:, 8] * interpolated_weight_s
output_beam._beam.rays[:, 15] = output_beam._beam.rays[:, 15] * interpolated_weight_p
output_beam._beam.rays[:, 16] = output_beam._beam.rays[:, 16] * interpolated_weight_p
output_beam._beam.rays[:, 17] = output_beam._beam.rays[:, 17] * interpolated_weight_p
return output_beam
class ShadowMath:
@classmethod
def gaussian_fit(cls, data_x, data_y):
x = asarray(data_x)
y = asarray(data_y)
y_norm = y/sum(y)
mean = sum(x*y_norm)
sigma = numpy.sqrt(sum(y_norm*(x-mean)**2)/len(x))
amplitude = max(y)
parameters, covariance_matrix = optimize.curve_fit(ShadowMath.gaussian_function, x, y, p0 = [amplitude, mean, sigma])
parameters.resize(4)
parameters[3] = 2.355*parameters[2]# FWHM
return parameters, covariance_matrix
@classmethod
def gaussian_function(cls, x, A, x0, sigma):
return A*numpy.exp(-(x-x0)**2/(2*sigma**2))
@classmethod
def pseudovoigt_fit(cls, data_x, data_y):
x = asarray(data_x)
y = asarray(data_y)
y_norm = y/sum(y)
amplitude = max(data_y)
mean = sum(x*y_norm)
fwhm = numpy.sqrt(sum(y_norm*(x-mean)**2)/len(x))*2.355
mixing = 0.1
parameters, covariance_matrix = optimize.curve_fit(ShadowMath.pseudovoigt_function,
x, y,
p0 = [amplitude, mean, fwhm, mixing],
bounds = ([0.1*amplitude, 0.9*mean, 0.1*fwhm, 1e-3],
[10.0*amplitude, 1.1*mean, 10.0*fwhm, 1.0]))
return parameters, covariance_matrix
@classmethod
def pseudovoigt_function(cls, x, A, x0, fwhm, mixing):
sigma = fwhm/2.355
gamma = fwhm/2
return A*(mixing*numpy.exp(-(x-x0)**2/(2*sigma**2)) + (1-mixing)*((gamma**2)/((x-x0)**2 + gamma**2)))
@classmethod
def caglioti_broadening_fit(cls, data_x, data_y):
x = asarray(data_x)
y = asarray(data_y)
parameters, covariance_matrix = optimize.curve_fit(ShadowMath.caglioti_broadening_function,
x, y,
p0=[0.0001, 0.0001, 0.0001],
bounds = ([ -1.0, -1.0, -1.0],
[ 1.0, 1.0, 1.0]))
return parameters, covariance_matrix
@classmethod
def caglioti_broadening_function(cls, x, U, V, W):
return numpy.sqrt(W + V * (numpy.tan(x*numpy.pi/360)) + U * (numpy.tan(x*numpy.pi/360))**2)
@classmethod
def caglioti_shape_fit(cls, data_x, data_y):
x = asarray(data_x)
y = asarray(data_y)
parameters, covariance_matrix = optimize.curve_fit(ShadowMath.caglioti_shape_function,
x, y,
p0=[0.1, 0.0, 0.0],
bounds = ([ 0.0, -1.0, -1.0],
[ 1.0, 1.0, 1.0]))
return parameters, covariance_matrix
@classmethod
def caglioti_shape_function(cls, x, a, b, c):
return a + b*(x*numpy.pi/360) + c*(x*numpy.pi/360)**2
@classmethod
def vectorial_product(cls, vector1, vector2):
result = [0.0, 0.0, 0.0]
result[0] = vector1[1]*vector2[2] - vector1[2]*vector2[1]
result[1] = -(vector1[0]*vector2[2] - vector1[2]*vector2[0])
result[2] = vector1[0]*vector2[1] - vector1[1]*vector2[0]
return result
@classmethod
def scalar_product(cls, vector1, vector2):
return vector1[0]*vector2[0] + vector1[1]*vector2[1] + vector1[2]*vector2[2]
@classmethod
def vector_modulus(cls, vector):
return numpy.sqrt(cls.scalar_product(vector, vector))
@classmethod
def vector_multiply(cls, vector, constant):
result = [0.0, 0.0, 0.0]
result[0] = vector[0] * constant
result[1] = vector[1] * constant
result[2] = vector[2] * constant
return result
@classmethod
def vector_divide(cls, vector, constant):
result = [0.0, 0.0, 0.0]
result[0] = vector[0] / constant
result[1] = vector[1] / constant
result[2] = vector[2] / constant
return result
@classmethod
def vector_normalize(cls, vector):
return cls.vector_divide(vector, cls.vector_modulus(vector))
@classmethod
def vector_sum(cls, vector1, vector2):
result = [0.0, 0.0, 0.0]
result[0] = vector1[0] + vector2[0]
result[1] = vector1[1] + vector2[1]
result[2] = vector1[2] + vector2[2]
return result
@classmethod
def vector_difference(cls, vector1, vector2):
result = [0.0, 0.0, 0.0]
result[0] = vector1[0] - vector2[0]
result[1] = vector1[1] - vector2[1]
result[2] = vector1[2] - vector2[2]
return result
##########################################################################
# Rodrigues Formula:
#
# rotated = vector * cos(rotation_angle) +
# (rotation_axis x vector) * sin(rotation_angle) +
# rotation_axis*(rotation_axis . vector)(1 - cos(rotation_angle))
#
# rotation_angle in radians
#
##########################################################################
@classmethod
def vector_rotate(cls, rotation_axis, rotation_angle, vector):
result_temp_1 = ShadowMath.vector_multiply(vector, numpy.cos(rotation_angle))
result_temp_2 = ShadowMath.vector_multiply(ShadowMath.vectorial_product(rotation_axis, vector),
numpy.sin(rotation_angle))
result_temp_3 = ShadowMath.vector_multiply(ShadowMath.vector_multiply(rotation_axis, ShadowMath.scalar_product(rotation_axis, vector)), (1 - numpy.cos(rotation_angle)))
result = ShadowMath.vector_sum(result_temp_1,
ShadowMath.vector_sum(result_temp_2, result_temp_3))
return result
@classmethod
def point_distance(cls, point1, point2):
return cls.vector_modulus(cls.vector_difference(point1, point2))
class ShadowPhysics:
A2EV = (codata.h*codata.c/codata.e)*1e+10
K2EV = 2*numpy.pi/(codata.h*codata.c/codata.e*1e+2)
@classmethod
def getWavelengthFromShadowK(cls, k_mod): # in cm
return (2*numpy.pi/k_mod)*1e+8 # in Angstrom
@classmethod
def getShadowKFromWavelength(cls, wavelength): # in A
return (2*numpy.pi/wavelength)*1e+8 # in cm
@classmethod
def getWavelengthFromEnergy(cls, energy): #in eV
return cls.A2EV/energy # in Angstrom
@classmethod
def getEnergyFromWavelength(cls, wavelength): # in Angstrom
return cls.A2EV/wavelength # in eV
@classmethod
def getEnergyFromShadowK(cls, k_mod): # in cm
return k_mod/cls.K2EV # in eV
@classmethod
def getShadowKFromEnergy(cls, energy): # in A
return cls.K2EV*energy # in cm
@classmethod
def calculateBraggAngle(cls, wavelength, h, k, l, a):
# lambda = 2 pi / |k| = 2 d sen(th)
#
# sen(th) = lambda / (2 d)
#
# d = a / sqrt(h\u00b2 + k^2 + l^2)
#
# sen(th) = (sqrt(h^2 + k^2 + l^2) * lambda)/(2 a)
theta_bragg = -1
argument = wavelength*numpy.sqrt(h**2+k**2+l**2)/(2*a)
if argument <= 1:
result = numpy.arcsin(argument)
if result > 0: theta_bragg = result
return theta_bragg
@classmethod
def checkCompoundName(cls, compound_name):
if compound_name is None: raise Exception("Compound Name is Empty")
if str(compound_name.strip()) == "": raise Exception("Compound Name is Empty")
compound_name = compound_name.strip()
try:
xraylib.CompoundParser(compound_name)
return compound_name
except:
raise Exception("Compound Name is not correct")
@classmethod
def getMaterialDensity(cls, material_name):
if material_name is None: return 0.0
if str(material_name.strip()) == "": return 0.0
try:
compoundData = xraylib.CompoundParser(material_name)
n_elements = compoundData["nElements"]
if n_elements == 1:
return xraylib.ElementDensity(compoundData["Elements"][0])
else:
density = 0.0
mass_fractions = compoundData["massFractions"]
elements = compoundData["Elements"]
for i in range(n_elements): density += xraylib.ElementDensity(elements[i]) * mass_fractions[i]
return density
except:
return 0.0
@classmethod
def ConstatoBackgroundNoised(cls, constant_value=0, n_sigma=1.0, random_generator=random.Random()):
sigma = numpy.sqrt(constant_value) # poisson statistic
noise = (n_sigma*sigma)*random_generator.random()
sign_marker = random_generator.random()
if sign_marker > 0.5:
return int(round(constant_value+noise, 0))
else:
return int(round(constant_value-noise, 0))
@classmethod
def Chebyshev(cls, n, x):
if n==0: return 1
elif n==1: return x
else: return 2*x*cls.Chebyshev(n-1, x)-cls.Chebyshev(n-2, x)
@classmethod
def ChebyshevBackground(cls, coefficients=[0,0,0,0,0,0], twotheta=0):
coefficients_set = range(0, len(coefficients))
background = 0
for index in coefficients_set:
background += coefficients[index]*cls.Chebyshev(index, twotheta)
return background
@classmethod
def ChebyshevBackgroundNoised(cls, coefficients=[0,0,0,0,0,0], twotheta=0.0, n_sigma=1.0, random_generator=random.Random()):
background = cls.ChebyshevBackground(coefficients, twotheta)
sigma = numpy.sqrt(background) # poisson statistic
noise = (n_sigma*sigma)*random_generator.random()
sign_marker = random_generator.random()
if sign_marker > 0.5:
return int(round(background+noise, 0))
else:
return int(round(background-noise, 0))
@classmethod
def ExpDecay(cls, h, x):
return numpy.exp(-h*x)
@classmethod
def ExpDecayBackground(cls, coefficients=[0,0,0,0,0,0], decayparams=[0,0,0,0,0,0], twotheta=0):
coefficients_set = range(0, len(coefficients))
background = 0
for index in coefficients_set:
background += coefficients[index]*cls.ExpDecay(decayparams[index], twotheta)
return background
@classmethod
def ExpDecayBackgroundNoised(cls, coefficients=[0,0,0,0,0,0], decayparams=[0,0,0,0,0,0], twotheta=0, n_sigma=1, random_generator=random.Random()):
background = cls.ExpDecayBackground(coefficients, decayparams, twotheta)
sigma = numpy.sqrt(background) # poisson statistic
noise = (n_sigma*sigma)*random_generator.random()
sign_marker = random_generator.random()
if sign_marker > 0.5:
return int(round(background+noise, 0))
else:
return int(round(background-noise, 0))
import re
import time
class Properties(object):
def __init__(self, props=None):
self._props = {}
self._origprops = {}
self._keymap = {}
self.othercharre = re.compile(r'(?<!\\)(\s*\=)|(?<!\\)(\s*\:)')
self.othercharre2 = re.compile(r'(\s*\=)|(\s*\:)')
self.bspacere = re.compile(r'\\(?!\s$)')
def __str__(self):
s='{'
for key,value in self._props.items():
s = ''.join((s,key,'=',value,', '))
s=''.join((s[:-2],'}'))
return s
def __parse(self, lines):
# Every line in the file must consist of either a comment
# or a key-value pair. A key-value pair is a line consisting
# of a key which is a combination of non-white space characters
# The separator character between key-value pairs is a '=',
# ':' or a whitespace character not including the newline.
# If the '=' or ':' characters are found, in the line, even
# keys containing whitespace chars are allowed.
# A line with only a key according to the rules above is also
# fine. In such case, the value is considered as the empty string.
# In order to include characters '=' or ':' in a key or value,
# they have to be properly escaped using the backslash character.
# Some examples of valid key-value pairs:
#
# key value
# key=value
# key:value
# key value1,value2,value3
# key value1,value2,value3 \
# value4, value5
# key
# This key= this value
# key = value1 value2 value3
# Any line that starts with a '#' is considered a comment
# and skipped. Also any trailing or preceding whitespaces
# are removed from the key/value.
# This is a line parser. It parses the
# contents like by line.
lineno=0
i = iter(lines)
for line in i:
lineno += 1
line = line.strip()
if not line: continue
if line[0] == '#': continue
sepidx = -1
m = self.othercharre.search(line)
if m:
first, last = m.span()
start, end = 0, first
wspacere = re.compile(r'(?<![\\\=\:])(\s)')
else:
if self.othercharre2.search(line):
wspacere = re.compile(r'(?<![\\])(\s)')
start, end = 0, len(line)
m2 = wspacere.search(line, start, end)
if m2:
first, last = m2.span()
sepidx = first
elif m:
first, last = m.span()
sepidx = last - 1
while line[-1] == '\\':
nextline = i.next()
nextline = nextline.strip()
lineno += 1
line = line[:-1] + nextline
if sepidx != -1:
key, value = line[:sepidx], line[sepidx+1:]
else:
key,value = line,''
self.processPair(key, value)
def processPair(self, key, value):
oldkey = key
oldvalue = value
keyparts = self.bspacere.split(key)
strippable = False
lastpart = keyparts[-1]
if lastpart.find('\\ ') != -1:
keyparts[-1] = lastpart.replace('\\','')
elif lastpart and lastpart[-1] == ' ':
strippable = True
key = ''.join(keyparts)
if strippable:
key = key.strip()
oldkey = oldkey.strip()
oldvalue = self.unescape(oldvalue)
value = self.unescape(value)
self._props[key] = value.strip()
if self._keymap.__contains__(key):
oldkey = self._keymap.get(key)
self._origprops[oldkey] = oldvalue.strip()
else:
self._origprops[oldkey] = oldvalue.strip()
self._keymap[key] = oldkey
def escape(self, value):
newvalue = value.replace(':','\:')
newvalue = newvalue.replace('=','\=')
return newvalue
def unescape(self, value):
newvalue = value.replace('\:',':')
newvalue = newvalue.replace('\=','=')
return newvalue
def load(self, stream):
if not hasattr(stream, 'read'):
raise TypeError('Argument should be a file object!')
if stream.mode != 'r':
raise ValueError ('Stream should be opened in read-only mode!')
try:
lines = stream.readlines()
self.__parse(lines)
except IOError as e:
raise e
def getProperty(self, key):
return self._props.get(key)
def setProperty(self, key, value):
if type(key) is str and type(value) is str:
self.processPair(key, value)
else:
raise TypeError('both key and value should be strings!')
def propertyNames(self):
return self._props.keys()
def list(self, out=sys.stdout):
out.write('-- listing properties --\n')
for key,value in self._props.items():
out.write(''.join((key,'=',value,'\n')))
def store(self, out, header=""):
if out.mode[0] != 'w':
raise ValueError('Steam should be opened in write mode!')
try:
out.write(''.join(('#',header,'\n')))
tstamp = time.strftime('%a %b %d %H:%M:%S %Z %Y', time.localtime())
out.write(''.join(('#',tstamp,'\n')))
for prop, val in self._origprops.items():
out.write(''.join((prop,'=',self.escape(val),'\n')))
out.close()
except IOError as e:
raise e
def getPropertyDict(self):
return self._props
def __getitem__(self, name):
return self.getProperty(name)
def __setitem__(self, name, value):
self.setProperty(name, value)
def __getattr__(self, name):
try:
return self.__dict__[name]
except KeyError:
if hasattr(self._props,name):
return getattr(self._props, name)
try:
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D # necessario per caricare i plot 3D
except:
pass
try:
from PyQt5.QtWidgets import QApplication, QVBoxLayout
from PyQt5.QtCore import QCoreApplication
class MathTextLabel(QWidget):
def __init__(self, mathText, size=None, parent=None, **kwargs):
QWidget.__init__(self, parent, **kwargs)
l=QVBoxLayout(self)
l.setContentsMargins(0,0,0,0)
r,g,b,a=self.palette().base().color().getRgbF()
self._figure=Figure(edgecolor=(r,g,b), facecolor=(r,g,b))
self._canvas=FigureCanvas(self._figure)
l.addWidget(self._canvas)
self._figure.clear()
if not size:
size = QCoreApplication.instance().font().pointSize()
text=self._figure.suptitle(
mathText,
x=0.0,
y=1.0,
horizontalalignment='left',
verticalalignment='top',
size=size)
self._canvas.draw()
(x0,y0),(x1,y1)=text.get_window_extent().get_points()
w=x1-x0; h=y1-y0
self._figure.set_size_inches(w/80, h/80)
self.setFixedSize(w,h)
except:
pass
if __name__ == "__main__":
#print(congruence.checkFileName("pippo.dat"))
#print(congruence.checkFileName("Files/pippo.dat"))
#print(congruence.checkFileName("Files/pippo.dat"))
#print(congruence.checkFileName("/Users/labx/Desktop/pippo.dat"))
s = " 5 8095683980.2420149 3.34799999999999994E-008"
print(s.strip().split(" "))
print("Bragg")
ShadowCongruence.checkBraggFile("/Users/labx/Oasys/bragg.dat")
print("PreRefl")
ShadowCongruence.checkPreReflFile("/Users/labx/Oasys/reflec.dat")
print("PreMLayer")
ShadowCongruence.checkPreMLayerFile("/Users/labx/Oasys/mlayer.dat")
#ShadowCongruence.checkXOPDiffractionProfileFile("/Users/labx/Oasys/mlayer.dat")
'''
print(ShadowPhysics.A2EV)
print(ShadowPhysics.Chebyshev(4, 21))
print(ShadowPhysics.Chebyshev(0, 35))
coefficients = [5.530814e+002, 2.487256e+000, -2.004860e-001, 2.246427e-003, -1.044517e-005, 1.721576e-008]
random_generator=random.Random()
print(ShadowPhysics.ChebyshevBackgroundNoised(coefficients, 10, random_generator=random_generator))
print(ShadowPhysics.ChebyshevBackgroundNoised(coefficients, 11, random_generator=random_generator))
print(ShadowPhysics.ChebyshevBackgroundNoised(coefficients, 12, random_generator=random_generator))
print(ShadowPhysics.ChebyshevBackgroundNoised(coefficients, 13, random_generator=random_generator))
print(ShadowPhysics.ChebyshevBackgroundNoised(coefficients, 14, random_generator=random_generator))
print(ShadowPhysics.ChebyshevBackgroundNoised(coefficients, 15, random_generator=random_generator))
print(ShadowPhysics.ChebyshevBackgroundNoised(coefficients, 16, random_generator=random_generator))
print(ShadowPhysics.ChebyshevBackgroundNoised(coefficients, 17, random_generator=random_generator))
print(ShadowPhysics.ChebyshevBackgroundNoised(coefficients, 18, random_generator=random_generator))
print(ShadowPhysics.ChebyshevBackgroundNoised(coefficients, 19, random_generator=random_generator))
print(ShadowPhysics.ChebyshevBackgroundNoised(coefficients, 20, random_generator=random_generator))
'''
'''
import matplotlib.pyplot as plt
x_coords, y_coords, z_values = ShadowPreProcessor.read_surface_error_file("/Users/labx/Oasys/mirror.dat")
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = numpy.meshgrid(x_coords, y_coords)
surf = ax.plot_surface(X, Y, z_values.T, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
#ax.set_zlim(-1.01, 1.01)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
app = QApplication(sys.argv)
widget = QWidget()
widget.setLayout(QVBoxLayout())
figure = Figure(figsize=(100, 100))
figure.patch.set_facecolor('white')
axis = figure.add_subplot(111, projection='3d')
axis.set_xlabel("X (cm)")
axis.set_ylabel("Y (cm)")
axis.set_zlabel("Z (cm)")
figure_canvas = FigureCanvasQTAgg(figure)
widget.layout().addWidget(figure_canvas)
figure_canvas.setFixedWidth(500)
figure_canvas.setFixedHeight(450)
x_coords, y_coords, z_values = ShadowPreProcessor.read_surface_error_file("/Users/labx/Oasys/mirror.dat")
x_to_plot, y_to_plot = numpy.meshgrid(x_coords, y_coords)
axis.plot_surface(x_to_plot, y_to_plot, z_values.T,
rstride=1, cstride=1, cmap=cm.autumn, linewidth=0.5, antialiased=True)
figure_canvas.draw()
figure_canvas.show()
widget.show()
app.exec()
''' | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/full/lang/da.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['da']={"editor":"Rich Text Editor","editorPanel":"Rich Text Editor panel","common":{"editorHelp":"Tryk ALT 0 for hjælp","browseServer":"Gennemse...","url":"URL","protocol":"Protokol","upload":"Upload","uploadSubmit":"Upload","image":"Indsæt billede","flash":"Indsæt Flash","form":"Indsæt formular","checkbox":"Indsæt afkrydsningsfelt","radio":"Indsæt alternativknap","textField":"Indsæt tekstfelt","textarea":"Indsæt tekstboks","hiddenField":"Indsæt skjult felt","button":"Indsæt knap","select":"Indsæt liste","imageButton":"Indsæt billedknap","notSet":"<intet valgt>","id":"Id","name":"Navn","langDir":"Tekstretning","langDirLtr":"Fra venstre mod højre (LTR)","langDirRtl":"Fra højre mod venstre (RTL)","langCode":"Sprogkode","longDescr":"Udvidet beskrivelse","cssClass":"Typografiark (CSS)","advisoryTitle":"Titel","cssStyle":"Typografi (CSS)","ok":"OK","cancel":"Annullér","close":"Luk","preview":"Forhåndsvisning","resize":"Træk for at skalere","generalTab":"Generelt","advancedTab":"Avanceret","validateNumberFailed":"Værdien er ikke et tal.","confirmNewPage":"Alt indhold, der ikke er blevet gemt, vil gå tabt. Er du sikker på, at du vil indlæse en ny side?","confirmCancel":"Nogle af indstillingerne er blevet ændret. Er du sikker på, at du vil lukke vinduet?","options":"Vis muligheder","target":"Mål","targetNew":"Nyt vindue (_blank)","targetTop":"Øverste vindue (_top)","targetSelf":"Samme vindue (_self)","targetParent":"Samme vindue (_parent)","langDirLTR":"Venstre til højre (LTR)","langDirRTL":"Højre til venstre (RTL)","styles":"Style","cssClasses":"Stylesheetklasser","width":"Bredde","height":"Højde","align":"Justering","left":"Venstre","right":"Højre","center":"Center","justify":"Lige margener","alignLeft":"Venstrestillet","alignRight":"Højrestillet","alignCenter":"Centreret","alignTop":"Øverst","alignMiddle":"Centreret","alignBottom":"Nederst","alignNone":"Ingen","invalidValue":"Ugyldig værdi.","invalidHeight":"Højde skal være et tal.","invalidWidth":"Bredde skal være et tal.","invalidLength":"Værdien angivet for feltet \"%1\" skal være et positivt heltal med eller uden en gyldig måleenhed (%2).","invalidCssLength":"Værdien specificeret for \"%1\" feltet skal være et positivt nummer med eller uden en CSS måleenhed (px, %, in, cm, mm, em, ex, pt, eller pc).","invalidHtmlLength":"Værdien specificeret for \"%1\" feltet skal være et positivt nummer med eller uden en CSS måleenhed (px eller %).","invalidInlineStyle":"Værdien specificeret for inline style skal indeholde en eller flere elementer med et format som \"name:value\", separeret af semikoloner","cssLengthTooltip":"Indsæt en numerisk værdi i pixel eller nummer med en gyldig CSS værdi (px, %, in, cm, mm, em, ex, pt, eller pc).","unavailable":"%1<span class=\"cke_accessibility\">, ikke tilgængelig</span>","keyboard":{"8":"Backspace","13":"Retur","16":"Shift","17":"Ctrl","18":"Alt","32":"Mellemrum","35":"Slut","36":"Hjem","46":"Slet","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Kommando"},"keyboardShortcut":"Tastatur genvej","optionDefault":"Standard"},"about":{"copy":"Copyright © $1. Alle rettigheder forbeholdes.","dlgTitle":"Om CKEditor 4","moreInfo":"For informationer omkring licens, se venligst vores hjemmeside (på engelsk):"},"basicstyles":{"bold":"Fed","italic":"Kursiv","strike":"Gennemstreget","subscript":"Sænket skrift","superscript":"Hævet skrift","underline":"Understreget"},"bidi":{"ltr":"Tekstretning fra venstre til højre","rtl":"Tekstretning fra højre til venstre"},"blockquote":{"toolbar":"Blokcitat"},"notification":{"closed":"Notefikation lukket."},"toolbar":{"toolbarCollapse":"Sammenklap værktøjslinje","toolbarExpand":"Udvid værktøjslinje","toolbarGroups":{"document":"Dokument","clipboard":"Udklipsholder/Fortryd","editing":"Redigering","forms":"Formularer","basicstyles":"Basis styles","paragraph":"Paragraf","links":"Links","insert":"Indsæt","styles":"Typografier","colors":"Farver","tools":"Værktøjer"},"toolbars":"Editors værktøjslinjer"},"clipboard":{"copy":"Kopiér","copyError":"Din browsers sikkerhedsindstillinger tillader ikke editoren at få automatisk adgang til udklipsholderen. Brug i stedet tastaturet til at kopiere teksten (Ctrl/Cmd+C).","cut":"Klip","cutError":"Din browsers sikkerhedsindstillinger tillader ikke editoren at få automatisk adgang til udklipsholderen. Brug i stedet tastaturet til at klippe teksten (Ctrl/Cmd+X).","paste":"Indsæt","pasteNotification":"Tryk %1 for at sætte ind. Din browser understøtter ikke indsættelse med værktøjslinje knappen eller kontekst menuen.","pasteArea":"Indsættelses område","pasteMsg":"Indsæt dit indhold i området nedenfor og tryk OK."},"colorbutton":{"auto":"Automatisk","bgColorTitle":"Baggrundsfarve","colors":{"000":"Sort","800000":"Mørkerød","8B4513":"Mørk orange","2F4F4F":"Mørk skifer grå","008080":"Turkis","000080":"Marine","4B0082":"Indigo","696969":"Mørkegrå","B22222":"Scarlet / Rød","A52A2A":"Brun","DAA520":"Guld","006400":"Mørkegrøn","40E0D0":"Turkis","0000CD":"Mellemblå","800080":"Lilla","808080":"Grå","F00":"Rød","FF8C00":"Mørk orange","FFD700":"Guld","008000":"Grøn","0FF":"Cyan","00F":"Blå","EE82EE":"Violet","A9A9A9":"Matgrå","FFA07A":"Laksefarve","FFA500":"Orange","FFFF00":"Gul","00FF00":"Lime","AFEEEE":"Mat turkis","ADD8E6":"Lyseblå","DDA0DD":"Mørkerød","D3D3D3":"Lysegrå","FFF0F5":"Lavendelrød","FAEBD7":"Antikhvid","FFFFE0":"Lysegul","F0FFF0":"Gul / Beige","F0FFFF":"Himmeblå","F0F8FF":"Alice blue","E6E6FA":"Lavendel","FFF":"Hvid","1ABC9C":"Stærk cyan","2ECC71":"Smaragd","3498DB":"Klar blå","9B59B6":"Ametyst","4E5F70":"Glålig blå","F1C40F":"Klar gul","16A085":"Mørk cyan","27AE60":"Mørk smaragd","2980B9":"Stærk blå","8E44AD":"Mørk violet","2C3E50":"Mat blå","F39C12":"Orange","E67E22":"Gulerod","E74C3C":"Bleg rød","ECF0F1":"Klar sølv","95A5A6":"Lys grålig cyan","DDD":"Lys grå","D35400":"Græskar","C0392B":"Stærk rød","BDC3C7":"Sølv","7F8C8D":"Glålig cyan","999":"Mørk grå"},"more":"Flere farver...","panelTitle":"Farver","textColorTitle":"Tekstfarve"},"colordialog":{"clear":"Nulstil","highlight":"Markér","options":"Farvemuligheder","selected":"Valgt farve","title":"Vælg farve"},"templates":{"button":"Skabeloner","emptyListMsg":"(Der er ikke defineret nogen skabelon)","insertOption":"Erstat det faktiske indhold","options":"Skabelon muligheder","selectPromptMsg":"Vælg den skabelon, som skal åbnes i editoren (nuværende indhold vil blive overskrevet):","title":"Indholdsskabeloner"},"contextmenu":{"options":"Muligheder for hjælpemenu"},"copyformatting":{"label":"Copy Formatting","notification":{"copied":"Formatting copied","applied":"Formatting applied","canceled":"Formatting canceled","failed":"Formatting failed. You cannot apply styles without copying them first."}},"div":{"IdInputLabel":"Id","advisoryTitleInputLabel":"Vejledende titel","cssClassInputLabel":"Typografiark","edit":"Rediger Div","inlineStyleInputLabel":"Inline Style","langDirLTRLabel":"Venstre til højre (LTR)","langDirLabel":"Sprogretning","langDirRTLLabel":"Højre til venstre (RTL)","languageCodeInputLabel":" Sprogkode","remove":"Slet Div","styleSelectLabel":"Style","title":"Opret Div Container","toolbar":"Opret Div Container"},"elementspath":{"eleLabel":"Sti på element","eleTitle":"%1 element"},"filetools":{"loadError":"Der skete en fejl ved indlæsningen af filen.","networkError":"Der skete en netværks fejl under uploadingen.","httpError404":"Der skete en HTTP fejl under uploadingen (404: File not found).","httpError403":"Der skete en HTTP fejl under uploadingen (403: Forbidden).","httpError":"Der skete en HTTP fejl under uploadingen (error status: %1).","noUrlError":"Upload URL er ikke defineret.","responseError":"Ikke korrekt server svar."},"find":{"find":"Søg","findOptions":"Find muligheder","findWhat":"Søg efter:","matchCase":"Forskel på store og små bogstaver","matchCyclic":"Match cyklisk","matchWord":"Kun hele ord","notFoundMsg":"Søgeteksten blev ikke fundet","replace":"Erstat","replaceAll":"Erstat alle","replaceSuccessMsg":"%1 forekomst(er) erstattet.","replaceWith":"Erstat med:","title":"Søg og erstat"},"fakeobjects":{"anchor":"Anker","flash":"Flashanimation","hiddenfield":"Skjult felt","iframe":"Iframe","unknown":"Ukendt objekt"},"flash":{"access":"Scriptadgang","accessAlways":"Altid","accessNever":"Aldrig","accessSameDomain":"Samme domæne","alignAbsBottom":"Absolut nederst","alignAbsMiddle":"Absolut centreret","alignBaseline":"Grundlinje","alignTextTop":"Toppen af teksten","bgcolor":"Baggrundsfarve","chkFull":"Tillad fuldskærm","chkLoop":"Gentagelse","chkMenu":"Vis Flash-menu","chkPlay":"Automatisk afspilning","flashvars":"Variabler for Flash","hSpace":"Vandret margen","properties":"Egenskaber for Flash","propertiesTab":"Egenskaber","quality":"Kvalitet","qualityAutoHigh":"Auto høj","qualityAutoLow":"Auto lav","qualityBest":"Bedste","qualityHigh":"Høj","qualityLow":"Lav","qualityMedium":"Medium","scale":"Skalér","scaleAll":"Vis alt","scaleFit":"Tilpas størrelse","scaleNoBorder":"Ingen ramme","title":"Egenskaber for Flash","vSpace":"Lodret margen","validateHSpace":"Vandret margen skal være et tal.","validateSrc":"Indtast hyperlink URL!","validateVSpace":"Lodret margen skal være et tal.","windowMode":"Vinduestilstand","windowModeOpaque":"Gennemsigtig (opaque)","windowModeTransparent":"Transparent","windowModeWindow":"Vindue"},"font":{"fontSize":{"label":"Skriftstørrelse","voiceLabel":"Skriftstørrelse","panelTitle":"Skriftstørrelse"},"label":"Skrifttype","panelTitle":"Skrifttype","voiceLabel":"Skrifttype"},"forms":{"button":{"title":"Egenskaber for knap","text":"Tekst","type":"Type","typeBtn":"Knap","typeSbm":"Send","typeRst":"Nulstil"},"checkboxAndRadio":{"checkboxTitle":"Egenskaber for afkrydsningsfelt","radioTitle":"Egenskaber for alternativknap","value":"Værdi","selected":"Valgt","required":"Påkrævet"},"form":{"title":"Egenskaber for formular","menu":"Egenskaber for formular","action":"Handling","method":"Metode","encoding":"Kodning (encoding)"},"hidden":{"title":"Egenskaber for skjult felt","name":"Navn","value":"Værdi"},"select":{"title":"Egenskaber for liste","selectInfo":"Generelt","opAvail":"Valgmuligheder","value":"Værdi","size":"Størrelse","lines":"Linjer","chkMulti":"Tillad flere valg","required":"Påkrævet","opText":"Tekst","opValue":"Værdi","btnAdd":"Tilføj","btnModify":"Redigér","btnUp":"Op","btnDown":"Ned","btnSetValue":"Sæt som valgt","btnDelete":"Slet"},"textarea":{"title":"Egenskaber for tekstboks","cols":"Kolonner","rows":"Rækker"},"textfield":{"title":"Egenskaber for tekstfelt","name":"Navn","value":"Værdi","charWidth":"Bredde (tegn)","maxChars":"Max. antal tegn","required":"Påkrævet","type":"Type","typeText":"Tekst","typePass":"Adgangskode","typeEmail":"E-mail","typeSearch":"Søg","typeTel":"Telefon nummer","typeUrl":"URL"}},"format":{"label":"Formatering","panelTitle":"Formatering","tag_address":"Adresse","tag_div":"Normal (DIV)","tag_h1":"Overskrift 1","tag_h2":"Overskrift 2","tag_h3":"Overskrift 3","tag_h4":"Overskrift 4","tag_h5":"Overskrift 5","tag_h6":"Overskrift 6","tag_p":"Normal","tag_pre":"Formateret"},"horizontalrule":{"toolbar":"Indsæt vandret streg"},"iframe":{"border":"Vis kant på rammen","noUrl":"Venligst indsæt URL på iframen","scrolling":"Aktiver scrollbars","title":"Iframe egenskaber","toolbar":"Iframe"},"image":{"alt":"Alternativ tekst","border":"Ramme","btnUpload":"Upload fil til serveren","button2Img":"Vil du lave billedknappen om til et almindeligt billede?","hSpace":"Vandret margen","img2Button":"Vil du lave billedet om til en billedknap?","infoTab":"Generelt","linkTab":"Hyperlink","lockRatio":"Lås størrelsesforhold","menu":"Egenskaber for billede","resetSize":"Nulstil størrelse","title":"Egenskaber for billede","titleButton":"Egenskaber for billedknap","upload":"Upload","urlMissing":"Kilde på billed-URL mangler","vSpace":"Lodret margen","validateBorder":"Kant skal være et helt nummer.","validateHSpace":"HSpace skal være et helt nummer.","validateVSpace":"VSpace skal være et helt nummer."},"indent":{"indent":"Forøg indrykning","outdent":"Formindsk indrykning"},"smiley":{"options":"Smileymuligheder","title":"Vælg smiley","toolbar":"Smiley"},"language":{"button":"Vælg sprog","remove":"Fjern sprog"},"link":{"acccessKey":"Genvejstast","advanced":"Avanceret","advisoryContentType":"Indholdstype","advisoryTitle":"Titel","anchor":{"toolbar":"Indsæt/redigér bogmærke","menu":"Egenskaber for bogmærke","title":"Egenskaber for bogmærke","name":"Bogmærkenavn","errorName":"Indtast bogmærkenavn","remove":"Fjern bogmærke"},"anchorId":"Efter element-Id","anchorName":"Efter ankernavn","charset":"Tegnsæt","cssClasses":"Typografiark","download":"Tving Download","displayText":"Vis tekst","emailAddress":"E-mailadresse","emailBody":"Besked","emailSubject":"Emne","id":"Id","info":"Generelt","langCode":"Tekstretning","langDir":"Tekstretning","langDirLTR":"Fra venstre mod højre (LTR)","langDirRTL":"Fra højre mod venstre (RTL)","menu":"Redigér hyperlink","name":"Navn","noAnchors":"(Ingen bogmærker i dokumentet)","noEmail":"Indtast e-mailadresse!","noUrl":"Indtast hyperlink-URL!","noTel":"Please type the phone number","other":"<anden>","phoneNumber":"Phone number","popupDependent":"Koblet/dependent (Netscape)","popupFeatures":"Egenskaber for popup","popupFullScreen":"Fuld skærm (IE)","popupLeft":"Position fra venstre","popupLocationBar":"Adresselinje","popupMenuBar":"Menulinje","popupResizable":"Justérbar","popupScrollBars":"Scrollbar","popupStatusBar":"Statuslinje","popupToolbar":"Værktøjslinje","popupTop":"Position fra toppen","rel":"Relation","selectAnchor":"Vælg et anker","styles":"Typografi","tabIndex":"Tabulatorindeks","target":"Mål","targetFrame":"<ramme>","targetFrameName":"Destinationsvinduets navn","targetPopup":"<popup vindue>","targetPopupName":"Popupvinduets navn","title":"Egenskaber for hyperlink","toAnchor":"Bogmærke på denne side","toEmail":"E-mail","toUrl":"URL","toPhone":"Phone","toolbar":"Indsæt/redigér hyperlink","type":"Type","unlink":"Fjern hyperlink","upload":"Upload"},"list":{"bulletedlist":"Punktopstilling","numberedlist":"Talopstilling"},"liststyle":{"bulletedTitle":"Værdier for cirkelpunktopstilling","circle":"Cirkel","decimal":"Decimal (1, 2, 3, osv.)","disc":"Værdier for diskpunktopstilling","lowerAlpha":"Små alfabet (a, b, c, d, e, etc.)","lowerRoman":"Små romerske (i, ii, iii, iv, v, etc.)","none":"Ingen","notset":"<ikke defineret>","numberedTitle":"Egenskaber for nummereret liste","square":"Firkant","start":"Start","type":"Type","upperAlpha":"Store alfabet (A, B, C, D, E, etc.)","upperRoman":"Store romerske (I, II, III, IV, V, etc.)","validateStartNumber":"Den nummererede liste skal starte med et rundt nummer"},"magicline":{"title":"Indsæt afsnit"},"maximize":{"maximize":"Maksimér","minimize":"Minimér"},"newpage":{"toolbar":"Ny side"},"pagebreak":{"alt":"Sideskift","toolbar":"Indsæt sideskift"},"pastetext":{"button":"Indsæt som ikke-formateret tekst","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","title":"Indsæt som ikke-formateret tekst"},"pastefromword":{"confirmCleanup":"Den tekst du forsøger at indsætte ser ud til at komme fra Word. Vil du rense teksten før den indsættes?","error":"Det var ikke muligt at fjerne formatteringen på den indsatte tekst grundet en intern fejl","title":"Indsæt fra Word","toolbar":"Indsæt fra Word"},"preview":{"preview":"Vis eksempel"},"print":{"toolbar":"Udskriv"},"removeformat":{"toolbar":"Fjern formatering"},"save":{"toolbar":"Gem"},"selectall":{"toolbar":"Vælg alt"},"showblocks":{"toolbar":"Vis afsnitsmærker"},"sourcearea":{"toolbar":"Kilde"},"specialchar":{"options":"Muligheder for specialkarakterer","title":"Vælg symbol","toolbar":"Indsæt symbol"},"scayt":{"btn_about":"Om SCAYT","btn_dictionaries":"Ordbøger","btn_disable":"Deaktivér SCAYT","btn_enable":"Aktivér SCAYT","btn_langs":"Sprog","btn_options":"Indstillinger","text_title":"Stavekontrol mens du skriver"},"stylescombo":{"label":"Typografi","panelTitle":"Formattering på stylesheet","panelTitle1":"Block typografi","panelTitle2":"Inline typografi","panelTitle3":"Object typografi"},"table":{"border":"Rammebredde","caption":"Titel","cell":{"menu":"Celle","insertBefore":"Indsæt celle før","insertAfter":"Indsæt celle efter","deleteCell":"Slet celle","merge":"Flet celler","mergeRight":"Flet til højre","mergeDown":"Flet nedad","splitHorizontal":"Del celle vandret","splitVertical":"Del celle lodret","title":"Celleegenskaber","cellType":"Celletype","rowSpan":"Række span (rows span)","colSpan":"Kolonne span (columns span)","wordWrap":"Tekstombrydning","hAlign":"Vandret justering","vAlign":"Lodret justering","alignBaseline":"Grundlinje","bgColor":"Baggrundsfarve","borderColor":"Rammefarve","data":"Data","header":"Hoved","yes":"Ja","no":"Nej","invalidWidth":"Cellebredde skal være et tal.","invalidHeight":"Cellehøjde skal være et tal.","invalidRowSpan":"Række span skal være et heltal.","invalidColSpan":"Kolonne span skal være et heltal.","chooseColor":"Vælg"},"cellPad":"Cellemargen","cellSpace":"Celleafstand","column":{"menu":"Kolonne","insertBefore":"Indsæt kolonne før","insertAfter":"Indsæt kolonne efter","deleteColumn":"Slet kolonne"},"columns":"Kolonner","deleteTable":"Slet tabel","headers":"Hoved","headersBoth":"Begge","headersColumn":"Første kolonne","headersNone":"Ingen","headersRow":"Første række","heightUnit":"height unit","invalidBorder":"Rammetykkelse skal være et tal.","invalidCellPadding":"Cellemargen skal være et tal.","invalidCellSpacing":"Celleafstand skal være et tal.","invalidCols":"Antallet af kolonner skal være større end 0.","invalidHeight":"Tabelhøjde skal være et tal.","invalidRows":"Antallet af rækker skal være større end 0.","invalidWidth":"Tabelbredde skal være et tal.","menu":"Egenskaber for tabel","row":{"menu":"Række","insertBefore":"Indsæt række før","insertAfter":"Indsæt række efter","deleteRow":"Slet række"},"rows":"Rækker","summary":"Resumé","title":"Egenskaber for tabel","toolbar":"Tabel","widthPc":"procent","widthPx":"pixels","widthUnit":"Bredde på enhed"},"undo":{"redo":"Annullér fortryd","undo":"Fortryd"},"widget":{"move":"Klik og træk for at flytte","label":"%1 widget"},"uploadwidget":{"abort":"Upload er afbrudt af brugen.","doneOne":"Filen er uploadet.","doneMany":"Du har uploadet %1 filer.","uploadOne":"Uploader fil ({percentage}%)...","uploadMany":"Uploader filer, {current} af {max} er uploadet ({percentage}%)..."},"wsc":{"btnIgnore":"Ignorér","btnIgnoreAll":"Ignorér alle","btnReplace":"Erstat","btnReplaceAll":"Erstat alle","btnUndo":"Tilbage","changeTo":"Forslag","errorLoading":"Fejl ved indlæsning af host: %s.","ieSpellDownload":"Stavekontrol ikke installeret. Vil du installere den nu?","manyChanges":"Stavekontrol færdig: %1 ord ændret","noChanges":"Stavekontrol færdig: Ingen ord ændret","noMispell":"Stavekontrol færdig: Ingen fejl fundet","noSuggestions":"(ingen forslag)","notAvailable":"Stavekontrol er desværre ikke tilgængelig.","notInDic":"Ikke i ordbogen","oneChange":"Stavekontrol færdig: Et ord ændret","progress":"Stavekontrollen arbejder...","title":"Stavekontrol","toolbar":"Stavekontrol"}}; | PypiClean |
/Multiple%20smi-2.0.3.tar.gz/Multiple smi-2.0.3/multiple_smi/client/menu_frontend/argos.py | from .default_frontend import BaseFrontend
from .icon_utils import draw_icon
import os
import stat
import json
import sys
argos_template = '''#!{python}
import re
import time
import base64
import json
import sys
with open("{icon_path}", 'rb') as bytes:
img_str = base64.b64encode(bytes.read())
print("{name} | image='{{}}'\\n---".format(img_str.decode()))
try:
with open("{json_path}") as f:
info = json.load(f)
except:
sys.exit()
print("{name}@{ip}")
for gpu in info['GPUs']:
print("{{}}, {{:.2f}} GB | color=gray".format(gpu['name'], gpu['memory']))
print("{{}}% , {{:.2f}} GB".format(gpu['utilization'], gpu['used_mem']))
print("---")
print(info['cpu']['name'] + '| color=gray')
print("{{}}%".format(info['cpu']['usage']))
print("---")
print("RAM | color=gray")
print("{{}}% ({{:.2f}} GB / {{:.2f}} GB)".format(info['ram']['usage'],
info['ram']['used'],
info['ram']['total']))
'''
class ArgosFrontend(BaseFrontend):
"""docstring for ArgosBackend"""
def __init__(self, config_folder, argos_folder=None):
super(ArgosFrontend, self).__init__(config_folder)
self.argos_folder = argos_folder or os.path.join(os.path.expanduser('~'), ".config", "argos")
assert(os.path.isdir(self.argos_folder))
def build_menu(self, machine_name, machine):
icon_path_string = os.path.join(self.config_folder, "{}.png".format(machine_name))
json_path_string = os.path.join(self.config_folder, "client_{}.json".format(machine_name))
script_string = argos_template.format(python=sys.executable,
home=os.path.expanduser("~"),
icon_path=icon_path_string,
json_path=json_path_string,
name=machine_name,
ip=machine['ip'])
script_path = os.path.join(self.argos_folder, "{}.1s.py".format(machine_name))
with open(script_path, 'w') as f:
f.write(script_string)
st = os.stat(script_path)
os.chmod(script_path, st.st_mode | stat.S_IEXEC)
self.paths[machine_name] = script_path
def update_menu(self, machine_name, machine):
png_path = os.path.join(self.config_folder, "{}.png".format(machine_name))
draw_icon(machine).write_to_png(png_path)
json_path = os.path.join(self.config_folder, "client_{}.json".format(machine_name))
with open(json_path, 'w') as f:
json.dump(machine['summary'], f, indent=2)
def new_machines(self, machine_names, machines):
for name in machine_names:
self.update_menu(name, machines[name])
self.build_menu(name, machines[name])
def lost_machine(self, machine_name, machine):
if machine_name in self.paths.keys():
if os.path.isfile(self.paths[machine_name]):
os.remove(self.paths[machine_name])
del self.paths[machine_name] | PypiClean |
/FHIRkit-0.1.2.tar.gz/FHIRkit-0.1.2/fhirkit/ValueSet.py | from datetime import date, datetime
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore
from typing import Iterable, List, Optional, Sequence, Union
from pydantic import Field, validator
from fhirkit.BaseModel import BaseModel
from fhirkit.primitive_datatypes import URI, dateTime
from fhirkit.elements import (
BackboneElement,
CodeableConcept,
Coding,
AbstractCoding,
Narrative,
UsageContext,
)
from fhirkit.Resource import CanonicalResource
class VSDesignation(BaseModel):
language: Optional[str]
use: Optional[Coding]
value: str
class VSConcept(BaseModel):
code: str
display: Optional[str]
designation: List[VSDesignation] = Field(default=[])
class VSFilter(BackboneElement):
property: str
op: Literal[
"=",
"is-a",
"descendent-of",
"is-not-a",
"regex",
"in",
"not-in",
"generalizes",
"child-of",
"descendent-leaf",
"exists",
]
value: str
class VSInclude(BackboneElement):
system: Optional[URI] = None
version: Optional[str] = None
concept: Sequence[VSConcept] = Field(default=[])
filter: Sequence[VSFilter] = Field(default=[])
valueSet: Sequence[URI] = Field(default=[])
class VSCompose(BaseModel):
include: Sequence[VSInclude] = []
exclude: Sequence[VSInclude] = []
property: Sequence[str] = []
lockedDate: Optional[date]
inactive: Optional[bool]
class VSCodingProperty(BackboneElement):
code: str
valueCode: Optional[str]
valueCoding: Optional[Coding]
valueString: Optional[str]
valueInteger: Optional[int]
valueBoolean: Optional[bool]
valueDateTime: Optional[dateTime]
valueDecimal: Optional[float]
class VSCodingWithDesignation(AbstractCoding):
designation: Sequence[VSDesignation] = Field(default_factory=list)
abstract: Optional[bool] = None
inactive: Optional[bool] = None
property: Sequence[VSCodingProperty] = Field(default_factory=list)
class VSExpansion(BackboneElement):
offset: Optional[int] = None
total: Optional[int] = None
contains: Sequence[VSCodingWithDesignation] = Field(default_factory=list)
identifier: Optional[URI] = None
timestamp: datetime = Field(default_factory=datetime.now)
class ValueSet(CanonicalResource):
resourceType: Literal["ValueSet"] = Field("ValueSet", const=True)
url: Optional[URI]
name: Optional[str]
compose: Optional[VSCompose]
expansion: Optional[VSExpansion]
useContext: Sequence[UsageContext] = Field(default_factory=list, repr=True)
@property
def has_expanded(self):
return self.expansion is not None
def expand(self):
"""Override this method to implement expansion logic.
This method should fill ValueSet.expansion.contains with concepts.
Implementing this method enables you to iterate over the ValueSet in a for-loop.
```python
class MyCustomValueSet(ValueSet)
def expand(self):
# some expansion logic
vs_example = MyCustomValueSet()
for coding in vs:
print(coding)
"
"""
raise NotImplementedError()
def validate_code(self, code: Union[Coding, CodeableConcept]):
raise NotImplementedError()
def init_expansion(self):
self.expansion = VSExpansion()
class SimpleValueSet(ValueSet):
status: Literal["active"] = Field("active", const=True)
expansion: VSExpansion
def __init__(self, *args: VSCodingWithDesignation, **kwargs):
if len(args) > 0:
assert "expansion" not in kwargs, "When passing an iterable with concepts, `expansion` should be None."
super().__init__(
expansion=VSExpansion.parse_obj({"contains":[c.dict() for c in args], "total":len(args)}),
text=Narrative(
status="generated",
div="""
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th>code</th>
<th>display</th>
<th>system</th>
<th>version</th>
</tr>
</thead>
<tbody>"""
+ "".join(
[
f"<tr><th>{c.code}</th><td>{c.display}</td><td>{c.system}</td><td>{c.version}</td></tr>"
for c in args
]
)
+ """
</tbody>
</table>
</div>""",
),
**kwargs,
)
else:
super().__init__(**kwargs)
def append(
self,
code: VSCodingWithDesignation,
):
assert self.expansion is not None, "`self.expansion` is None after initialisation with `self.init_expansion`"
self.expansion.contains.append(code)
def extend(
self,
codes: Iterable[VSCodingWithDesignation],
):
assert self.expansion is not None, "`self.expansion` is None after initialisation with `self.init_expansion`"
self.expansion.contains.extend(codes)
def validate_code(self, code: Union[Coding, CodeableConcept]):
if isinstance(code, CodeableConcept):
return any(self.validate_code(c) for c in code.coding)
elif isinstance(code, Coding):
return any(c == code for c in self)
else:
return False
ValueSet.update_forward_refs() | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/dizoo/classic_control/cartpole/entry/cartpole_ppo_offpolicy_main.py | import os
import gym
from tensorboardX import SummaryWriter
from easydict import EasyDict
from ding.config import compile_config
from ding.worker import BaseLearner, SampleSerialCollector, InteractionSerialEvaluator, NaiveReplayBuffer
from ding.envs import BaseEnvManager, DingEnvWrapper
from ding.policy import PPOOffPolicy
from ding.model import VAC
from ding.utils import set_pkg_seed, deep_merge_dicts
from dizoo.classic_control.cartpole.config.cartpole_offppo_config import cartpole_offppo_config
def wrapped_cartpole_env():
return DingEnvWrapper(
gym.make('CartPole-v0'),
EasyDict(env_wrapper='default'),
)
def main(cfg, seed=0, max_iterations=int(1e10)):
cfg = compile_config(
cfg,
BaseEnvManager,
PPOOffPolicy,
BaseLearner,
SampleSerialCollector,
InteractionSerialEvaluator,
NaiveReplayBuffer,
save_cfg=True
)
collector_env_num, evaluator_env_num = cfg.env.collector_env_num, cfg.env.evaluator_env_num
collector_env = BaseEnvManager(env_fn=[wrapped_cartpole_env for _ in range(collector_env_num)], cfg=cfg.env.manager)
evaluator_env = BaseEnvManager(env_fn=[wrapped_cartpole_env for _ in range(evaluator_env_num)], cfg=cfg.env.manager)
collector_env.seed(seed)
evaluator_env.seed(seed, dynamic_seed=False)
set_pkg_seed(seed, use_cuda=cfg.policy.cuda)
model = VAC(**cfg.policy.model)
policy = PPOOffPolicy(cfg.policy, model=model)
tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial'))
learner = BaseLearner(cfg.policy.learn.learner, policy.learn_mode, tb_logger, exp_name=cfg.exp_name)
collector = SampleSerialCollector(
cfg.policy.collect.collector, collector_env, policy.collect_mode, tb_logger, exp_name=cfg.exp_name
)
evaluator = InteractionSerialEvaluator(
cfg.policy.eval.evaluator, evaluator_env, policy.eval_mode, tb_logger, exp_name=cfg.exp_name
)
replay_buffer = NaiveReplayBuffer(cfg.policy.other.replay_buffer, exp_name=cfg.exp_name)
for _ in range(max_iterations):
if evaluator.should_eval(learner.train_iter):
stop, reward = evaluator.eval(learner.save_checkpoint, learner.train_iter, collector.envstep)
if stop:
break
new_data = collector.collect(train_iter=learner.train_iter)
replay_buffer.push(new_data, cur_collector_envstep=collector.envstep)
for i in range(cfg.policy.learn.update_per_collect):
train_data = replay_buffer.sample(learner.policy.get_attribute('batch_size'), learner.train_iter)
if train_data is not None:
learner.train(train_data, collector.envstep)
if __name__ == "__main__":
main(cartpole_offppo_config) | PypiClean |
/FunID-0.3.16.2.tar.gz/FunID-0.3.16.2/funid/external/mmseqs_Windows/README.md | # MMseqs2: ultra fast and sensitive sequence search and clustering suite
MMseqs2 (Many-against-Many sequence searching) is a software suite to search and cluster huge protein and nucleotide sequence sets. MMseqs2 is open source GPL-licensed software implemented in C++ for Linux, MacOS, and (as beta version, via cygwin) Windows. The software is designed to run on multiple cores and servers and exhibits very good scalability. MMseqs2 can run 10000 times faster than BLAST. At 100 times its speed it achieves almost the same sensitivity. It can perform profile searches with the same sensitivity as PSI-BLAST at over 400 times its speed.
## Publications
[Steinegger M and Soeding J. MMseqs2 enables sensitive protein sequence searching for the analysis of massive data sets. Nature Biotechnology, doi: 10.1038/nbt.3988 (2017)](https://www.nature.com/articles/nbt.3988).
[Steinegger M and Soeding J. Clustering huge protein sequence sets in linear time. Nature Communications, doi: 10.1038/s41467-018-04964-5 (2018)](https://www.nature.com/articles/s41467-018-04964-5).
[Mirdita M, Steinegger M and Soeding J. MMseqs2 desktop and local web server app for fast, interactive sequence searches. Bioinformatics, doi: 10.1093/bioinformatics/bty1057 (2019)](https://academic.oup.com/bioinformatics/article/35/16/2856/5280135).
[Mirdita M, Steinegger M, Breitwieser F, Soding J, Levy Karin E: Fast and sensitive taxonomic assignment to metagenomic contigs. Bioinformatics, doi: 10.1093/bioinformatics/btab184 (2021)](https://doi.org/10.1093/bioinformatics/btab184).
[](https://anaconda.org/bioconda/mmseqs2) [](https://github.com/soedinglab/mmseqs2/releases/latest) [](https://biocontainers.pro/#/tools/mmseqs2) [](https://dev.azure.com/themartinsteinegger/mmseqs2/_build/latest?definitionId=2&branchName=master) <a href="https://chat.mmseqs.com/"><img src="https://chat.mmseqs.com/api/v1/shield.svg?type=online&name=chat&icon=false" /></a>
<p align="center"><img src="https://raw.githubusercontent.com/soedinglab/mmseqs2/master/.github/mmseqs2_logo.png" height="256" /></p>
## Documentation
The MMseqs2 user guide is available in our [GitHub Wiki](https://github.com/soedinglab/mmseqs2/wiki) or as a [PDF file](https://mmseqs.com/latest/userguide.pdf) (Thanks to [pandoc](https://github.com/jgm/pandoc)!). The wiki also contains [tutorials](https://github.com/soedinglab/MMseqs2/wiki/Tutorials) to learn how to use MMseqs2 with real data. For questions please open an issue on [GitHub](https://github.com/soedinglab/MMseqs2/issues) or ask in our [chat](https://chat.mmseqs.com).
Keep posted about MMseqs2/Linclust updates by following Martin on [Twitter](https://twitter.com/thesteinegger).
## Installation
MMseqs2 can be used by [compiling from source](https://github.com/soedinglab/MMseqs2/wiki#installation), downloading a statically compiled binary, using [Homebrew](https://github.com/Homebrew/brew), [conda](https://github.com/conda/conda) or [Docker](https://github.com/moby/moby).
# install by brew
brew install mmseqs2
# install via conda
conda install -c conda-forge -c bioconda mmseqs2
# install docker
docker pull ghcr.io/soedinglab/mmseqs2
# static build with AVX2 (fastest)
wget https://mmseqs.com/latest/mmseqs-linux-avx2.tar.gz; tar xvfz mmseqs-linux-avx2.tar.gz; export PATH=$(pwd)/mmseqs/bin/:$PATH
# static build with SSE4.1
wget https://mmseqs.com/latest/mmseqs-linux-sse41.tar.gz; tar xvfz mmseqs-linux-sse41.tar.gz; export PATH=$(pwd)/mmseqs/bin/:$PATH
# static build with SSE2 (slowest, for very old systems)
wget https://mmseqs.com/latest/mmseqs-linux-sse2.tar.gz; tar xvfz mmseqs-linux-sse2.tar.gz; export PATH=$(pwd)/mmseqs/bin/:$PATH
MMseqs2 requires an AMD or Intel 64-bit system (check with `uname -a | grep x86_64`). We recommend using a system with at least the SSE4.1 instruction set (check by executing `cat /proc/cpuinfo | grep sse4_1` on Linux or `sysctl -a | grep machdep.cpu.features | grep SSE4.1` on MacOS). The AVX2 version is faster than SSE4.1, check if AVX2 is supported by executing `cat /proc/cpuinfo | grep avx2` on Linux and `sysctl -a | grep machdep.cpu.leaf7_features | grep AVX2` on MacOS). A SSE2 version is also available for very old systems.
MMseqs2 also works on ARM64 systems and on PPC64LE systems with POWER8 ISA or newer.
We provide static binaries for all supported platforms at [mmseqs.com/latest](https://mmseqs.com/latest).
MMseqs2 comes with a bash command and parameter auto completion, which can be activated by adding the following lines to your $HOME/.bash_profile:
<pre>
if [ -f /<b>Path to MMseqs2</b>/util/bash-completion.sh ]; then
source /<b>Path to MMseqs2</b>/util/bash-completion.sh
fi
</pre>
## Getting started
We provide `easy` workflows to cluster, search and assign taxonomy. These `easy` workflows are a shorthand to deal directly with FASTA/FASTQ files as input and output. MMseqs2 provides many modules to transform, filter, execute external programs and search. However, these modules use the MMseqs2 database formats, instead of the FASTA/FASTQ format. For maximum flexibility, we recommend using MMseqs2 workflows and modules directly. Please read more about this in the [documentation](https://github.com/soedinglab/mmseqs2/wiki).
### Cluster
For clustering, MMseqs2 `easy-cluster` and `easy-linclust` are available.
`easy-cluster` by default clusters the entries of a FASTA/FASTQ file using a cascaded clustering algorithm.
mmseqs easy-cluster examples/DB.fasta clusterRes tmp --min-seq-id 0.5 -c 0.8 --cov-mode 1
`easy-linclust` clusters the entries of a FASTA/FASTQ file. The runtime scales linearly with input size. This mode is recommended for huge datasets.
mmseqs easy-linclust examples/DB.fasta clusterRes tmp
Read more about the [clustering format](https://github.com/soedinglab/mmseqs2/wiki#clustering-format) in our user guide.
Please adjust the [clustering criteria](https://github.com/soedinglab/MMseqs2/wiki#clustering-criteria) and check if temporary directory provides enough free space. For disk space requirements, see the user guide.
### Search
The `easy-search` workflow searches directly with a FASTA/FASTQ files against either another FASTA/FASTQ file or an already existing MMseqs2 database.
mmseqs easy-search examples/QUERY.fasta examples/DB.fasta alnRes.m8 tmp
It is also possible to pre-compute the index for the target database. This reduces overhead when searching repeatedly against the same database.
mmseqs createdb examples/DB.fasta targetDB
mmseqs createindex targetDB tmp
mmseqs easy-search examples/QUERY.fasta targetDB alnRes.m8 tmp
The `databases` workflow provides download and setup procedures for many public reference databases, such as the Uniref, NR, NT, PFAM and many more (see [Downloading databases](https://github.com/soedinglab/mmseqs2/wiki#downloading-databases)). For example, to download and search against a database containing the Swiss-Prot reference proteins run:
mmseqs databases UniProtKB/Swiss-Prot swissprot tmp
mmseqs easy-search examples/QUERY.fasta swissprot alnRes.m8 tmp
The speed and sensitivity of the `search` can be adjusted with `-s` parameter and should be adapted based on your use case (see [setting sensitivity -s parameter](https://github.com/soedinglab/mmseqs2/wiki#set-sensitivity--s-parameter)). A very fast search would use a sensitivity of `-s 1.0`, while a very sensitive search would use a sensitivity of up to `-s 7.0`. A detailed guide how to speed up searches is [here](https://github.com/soedinglab/MMseqs2/wiki#how-to-control-the-speed-of-the-search).
The output can be customized with the `--format-output` option e.g. `--format-output "query,target,qaln,taln"` returns the query and target accession and the pairwise alignments in tab separated format. You can choose many different [output columns](https://github.com/soedinglab/mmseqs2/wiki#custom-alignment-format-with-convertalis).
:exclamation: `easy-search` in default computes the sequence identity by dividing the number of identical residues by the alignment length (`numIdentical/alnLen`). However, `search` [estimates](https://github.com/soedinglab/MMseqs2/wiki#how-does-mmseqs2-compute-the-sequence-identity) the identity in default. To output real sequence identity use `--alignment-mode 3` or `-a`.
### Taxonomy
The `easy-taxonomy` workflow can be used to assign sequences taxonomical labels. It performs a search against a sequence database with taxonomy information (seqTaxDb), chooses the most representative sets of aligned target sequences according to different strategies (according to `--lca-mode`) and computes the lowest common ancestor among those.
mmseqs createdb examples/DB.fasta targetDB
mmseqs createtaxdb targetDB tmp
mmseqs createindex targetDB tmp
mmseqs easy-taxonomy examples/QUERY.fasta targetDB alnRes tmp
By default, `createtaxdb` assigns a Uniprot accession to a taxonomical identifier to every sequence and downloads the NCBI taxonomy. We also support [BLAST](https://github.com/soedinglab/MMseqs2/wiki#create-a-sequence-database-with-taxonomic-information-from-an-existing-blast-database), [SILVA](https://github.com/soedinglab/MMseqs2/wiki#create-a-sequence-database-with-taxonomic-information-for-silva) or [custom taxonomical](https://github.com/soedinglab/MMseqs2/wiki#manually-annotate-a-sequence-database-with-taxonomic-information) databases. Many common taxonomic reference databases can be easily downloaded and set up by the [`databases` workflow](https://github.com/soedinglab/mmseqs2/wiki#downloading-databases).
Read more about the [taxonomy format](https://github.com/soedinglab/MMseqs2/wiki#taxonomy-format) and the [classification](https://github.com/soedinglab/MMseqs2/wiki#taxonomy-assignment-using-mmseqs-taxonomy) in our user guide.
### Supported search modes
MMseqs2 provides many additional search modes:
* Iterative sequences-profile searches (like PSI-BLAST) with the `--num-iterations` parameter
* [Translated searches](https://github.com/soedinglab/MMseqs2/wiki#translated-sequence-searching) of nucleotides against proteins (blastx), proteins against nucleotides (tblastn) or nucleotide against nucleotide (tblastx)
* [Iterative increasing sensitivity searches](https://github.com/soedinglab/MMseqs2/wiki#how-to-find-the-best-hit-the-fastest-way) to find only the best hits faster
* [Taxonomic assignment](https://github.com/soedinglab/MMseqs2/wiki#taxonomy-assignment-using-mmseqs-taxonomy) using 2bLCA or LCA
* Fast ungapped alignment searches to find [very similar sequence matches](https://github.com/soedinglab/MMseqs2/wiki#mapping-very-similar-sequences-using-mmseqs-map)
* Very fast and sensitive searches against [profile databases such as the PFAM](https://github.com/soedinglab/MMseqs2/wiki#how-to-create-a-target-profile-database-from-pfam)
* [Reciprocal best hits search](https://github.com/soedinglab/MMseqs2/wiki#reciprocal-best-hit-using-mmseqs-rbh)
* [Web search API and user interface](https://github.com/soedinglab/MMseqs2-App)
Many modes can also be combined. You can, for example, do a translated nucleotide against protein profile search.
### Memory requirements
MMseqs2 minimum memory requirements for `cluster` or `linclust` is 1 byte per sequence residue, `search` needs 1 byte per target residue. Sequence databases can be compressed using the `--compress` flag, DNA sequences can be reduced by a factor of `~3.5` and proteins by `~1.7`.
MMseqs2 checks the available system memory and automatically divides the target database in parts that fit into memory. Splitting the database will increase the runtime slightly. It is possible to control the memory usage using `--split-memory-limit`.
### How to run MMseqs2 on multiple servers using MPI
MMseqs2 can run on multiple cores and servers using OpenMP and Message Passing Interface (MPI).
MPI assigns database splits to each compute node, which are then computed with multiple cores (OpenMP).
Make sure that MMseqs2 was compiled with MPI by using the `-DHAVE_MPI=1` flag (`cmake -DHAVE_MPI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=. ..`). Our precompiled static version of MMseqs2 cannot use MPI. The version string of MMseqs2 will have a `-MPI` suffix, if it was built successfully with MPI support.
To search with multiple servers, call the `search` or `cluster` workflow with the MPI command exported in the RUNNER environment variable. The databases and temporary folder have to be shared between all nodes (e.g. through NFS):
RUNNER="mpirun -pernode -np 42" mmseqs search queryDB targetDB resultDB tmp
## Contributors
MMseqs2 exists thanks to all the people who contribute.
<a href="https://github.com/soedinglab/mmseqs2/graphs/contributors">
<img src="https://contributors-img.firebaseapp.com/image?repo=soedinglab/mmseqs2" />
</a>
| PypiClean |
/NREL_reV-0.8.1-py3-none-any.whl/reV/bespoke/plotting_functions.py | import numpy as np
import matplotlib.pyplot as plt
def get_xy(A):
"""separate polygon exterior coordinates to x and y
Parameters
----------
A : Polygon.exteroir.coords
Exterior coordinates from a shapely Polygon
Outputs
----------
x, y : array
Boundary polygon x and y coordinates
"""
x = np.zeros(len(A))
y = np.zeros(len(A))
for i, _ in enumerate(A):
x[i] = A[i][0]
y[i] = A[i][1]
return x, y
def plot_poly(geom, ax=None, color="black", linestyle="--", linewidth=0.5):
"""plot the wind plant boundaries
Parameters
----------
geom : Polygon | MultiPolygon
The shapely.Polygon or shapely.MultiPolygon that define the wind
plant boundary(ies).
ax : :py:class:`matplotlib.pyplot.axes`, optional
The figure axes on which the wind rose is plotted.
Defaults to :obj:`None`.
color : string, optional
The color for the wind plant boundaries
linestyle : string, optional
Style to plot the boundary lines
linewidth : float, optional
The width of the boundary lines
"""
if ax is None:
_, ax = plt.subplots()
if geom.type == 'Polygon':
exterior_coords = geom.exterior.coords[:]
x, y = get_xy(exterior_coords)
ax.fill(x, y, color="C0", alpha=0.25)
ax.plot(x, y, color=color, linestyle=linestyle, linewidth=linewidth)
for interior in geom.interiors:
interior_coords = interior.coords[:]
x, y = get_xy(interior_coords)
ax.fill(x, y, color="white", alpha=1.0)
ax.plot(x, y, "--k", linewidth=0.5)
elif geom.type == 'MultiPolygon':
for part in geom:
exterior_coords = part.exterior.coords[:]
x, y = get_xy(exterior_coords)
ax.fill(x, y, color="C0", alpha=0.25)
ax.plot(x, y, color=color, linestyle=linestyle,
linewidth=linewidth)
for interior in part.interiors:
interior_coords = interior.coords[:]
x, y = get_xy(interior_coords)
ax.fill(x, y, color="white", alpha=1.0)
ax.plot(x, y, "--k", linewidth=0.5)
return ax
def plot_turbines(x, y, r, ax=None, color="C0", nums=False):
"""plot wind turbine locations
Parameters
----------
x, y : array
Wind turbine x and y locations
r : float
Wind turbine radius
ax :py:class:`matplotlib.pyplot.axes`, optional
The figure axes on which the wind rose is plotted.
Defaults to :obj:`None`.
color : string, optional
The color for the wind plant boundaries
nums : bool, optional
Option to show the turbine numbers next to each turbine
"""
# Set up figure
if ax is None:
_, ax = plt.subplots()
n = len(x)
for i in range(n):
t = plt.Circle((x[i], y[i]), r, color=color)
ax.add_patch(t)
if nums is True:
ax.text(x[i], y[i], "%s" % (i + 1))
return ax
def plot_windrose(wind_directions, wind_speeds, wind_frequencies, ax=None,
colors=None):
"""plot windrose
Parameters
----------
wind_directions : 1D array
Wind direction samples
wind_speeds : 1D array
Wind speed samples
wind_frequencies : 2D array
Frequency of wind direction and speed samples
ax :py:class:`matplotlib.pyplot.axes`, optional
The figure axes on which the wind rose is plotted.
Defaults to :obj:`None`.
color : array, optional
The color for the different wind speed bins
"""
if ax is None:
_, ax = plt.subplots(subplot_kw=dict(polar=True))
ndirs = len(wind_directions)
nspeeds = len(wind_speeds)
if colors is None:
colors = []
for i in range(nspeeds):
colors = np.append(colors, "C%s" % i)
for i in range(ndirs):
wind_directions[i] = np.deg2rad(90.0 - wind_directions[i])
width = 0.8 * 2 * np.pi / len(wind_directions)
for i in range(ndirs):
bottom = 0.0
for j in range(nspeeds):
if i == 0:
if j < nspeeds - 1:
ax.bar(wind_directions[i], wind_frequencies[j, i],
bottom=bottom, width=width, edgecolor="black",
color=[colors[j]],
label="%s-%s m/s" % (int(wind_speeds[j]),
int(wind_speeds[j + 1]))
)
else:
ax.bar(wind_directions[i], wind_frequencies[j, i],
bottom=bottom, width=width, edgecolor="black",
color=[colors[j]],
label="%s+ m/s" % int(wind_speeds[j])
)
else:
ax.bar(wind_directions[i], wind_frequencies[j, i],
bottom=bottom, width=width, edgecolor="black",
color=[colors[j]])
bottom = bottom + wind_frequencies[j, i]
ax.legend(bbox_to_anchor=(1.3, 1), fontsize=10)
pi = np.pi
ax.set_xticks((0, pi / 4, pi / 2, 3 * pi / 4, pi, 5 * pi / 4,
3 * pi / 2, 7 * pi / 4))
ax.set_xticklabels(("E", "NE", "N", "NW", "W", "SW", "S", "SE"),
fontsize=10)
plt.yticks(fontsize=10)
plt.subplots_adjust(left=0.0, right=1.0, top=0.9, bottom=0.1)
return ax | PypiClean |
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/contrib/flatpages/views.py | from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import get_current_site
from django.core.xheaders import populate_xheaders
from django.http import Http404, HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from django.template import loader, RequestContext
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_protect
DEFAULT_TEMPLATE = 'flatpages/default.html'
# This view is called from FlatpageFallbackMiddleware.process_response
# when a 404 is raised, which often means CsrfViewMiddleware.process_view
# has not been called even if CsrfViewMiddleware is installed. So we need
# to use @csrf_protect, in case the template needs {% csrf_token %}.
# However, we can't just wrap this view; if no matching flatpage exists,
# or a redirect is required for authentication, the 404 needs to be returned
# without any CSRF checks. Therefore, we only
# CSRF protect the internal implementation.
def flatpage(request, url):
"""
Public interface to the flat page view.
Models: `flatpages.flatpages`
Templates: Uses the template defined by the ``template_name`` field,
or :template:`flatpages/default.html` if template_name is not defined.
Context:
flatpage
`flatpages.flatpages` object
"""
if not url.startswith('/'):
url = '/' + url
site_id = get_current_site(request).id
try:
f = get_object_or_404(FlatPage,
url__exact=url, sites__id__exact=site_id)
except Http404:
if not url.endswith('/') and settings.APPEND_SLASH:
url += '/'
f = get_object_or_404(FlatPage,
url__exact=url, sites__id__exact=site_id)
return HttpResponsePermanentRedirect('%s/' % request.path)
else:
raise
return render_flatpage(request, f)
@csrf_protect
def render_flatpage(request, f):
"""
Internal interface to the flat page view.
"""
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
if f.template_name:
t = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
t = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
c = RequestContext(request, {
'flatpage': f,
})
response = HttpResponse(t.render(c))
populate_xheaders(request, response, FlatPage, f.id)
return response | PypiClean |
/ExpertOp4Grid-0.1.5.post2.tar.gz/ExpertOp4Grid-0.1.5.post2/docs/README.rst | Mentions
=======
Quick Overview
--------
This is an Expert System which tries to solve a security issue on a power grid, that is on overload over a power line, when it happens. It uses cheap but non-linear topological actions to do so, and does not require any training. For any new overloaded situations, it computes an influence graph around the overload of interest, and rank the substations and topologies to explore, to find a solution. It simulates the top ranked topologies to eventually give a score of success:
4 - it solves all overloads,
3 - it solves only the overload of interest
2 - it partially solves the overload of interest
1 - it solves the overload of interest but worsen other overloads
0 - it fails. The expert agent is based
It is an implementation of the paper: "Expert system for topological action discovery in smart grids" - https://hal.archives-ouvertes.fr/hal-01897931/file/_LARGE__bf_Expert_System_for_topological_remedial_action_discovery_in_smart_grids.pdf
.. image:: ../alphaDeesp/ressources/g_over_grid2op_ltc9.PNG
Influence Graph example for overloaded line 4->5. The electrical paths highlighted there will help us identify interesting topologies to reroute the flows.
Features
--------
- Analyse a power network when a line is in overflow
- Run simulations to understand the network constraints
- Return a ranking of topological actions that would solve the overflow, or reduce it
- If ran manually (through command line), can also output a series of graph to help visualise the state of the network
Contribute
----------
- Issue Tracker: https://github.com/marota/ExpertOp4Grid/issues
- Source Code: https://github.com/marota/ExpertOp4Grid
Support
-------
If you are having issues, please let us know.
We have a discord located at: $discordlink
License
-------
Copyright 2019-2020 RTE France
RTE: http://www.rte-france.com
This Source Code is subject to the terms of the Mozilla Public License (MPL) v2.
| PypiClean |
/Ageas-0.0.1a6.tar.gz/Ageas-0.0.1a6/ageas/_main.py | import re
import os
import sys
import copy
import time
import threading
import warnings
from pkg_resources import resource_filename
import ageas
import ageas.tool.json as json
import ageas.lib.psgrn_caster as psgrn
import ageas.lib.meta_grn_caster as meta_grn
import ageas.lib.config_maker as config_maker
import ageas.lib.atlas_extractor as extractor
import ageas.database_setup.binary_class as binary_db
GRP_TYPES = ['Standard', 'Outer', 'Bridge', 'Mix']
class Launch:
"""
Main function to launch AGEAS
Args:
class1_path: <str> Default = None
Path to file or folder being considered as type class 1 data
class2_path: <str> Default = None
Path to file or folder being considered as type class 2 data
clf_keep_ratio: <float> Default = 0.5
Portion of classifier model to keep after each model selection
iteration.
.. note::
When performing SHA based model selection, this value is
set as lower bound to keep models
clf_accuracy_thread: <float> Default = 0.8
Filter thread of classifier's accuracy in local test performed at
each model selection iteration
.. note::
When performing SHA based model selection, this value is
only used at last iteration
correlation_thread: <float> Default = 0.2
Gene expression correlation thread value of GRPs
Potential GRPs failed to reach this value will be dropped
cpu_mode: <bool> Default = False
Whether force to use CPU only or not
database_path: <str> Default = None
Database header. If specified, class1_path and class2_path will be
rooted here.
database_type: <str> Default = 'gem_files'
Type of data class1_path and class1_path are directing to
Supporting:
'gem_files': Each path is directing to a GEM file.
Pseudo samples will be generated with sliding window algo
'gem_folders': Each path is directing to a GEM folder. Files in
each folder will be used to generate pseudo samples
'mex_folders': Each path is directing to a folder consisting MEX
files(***matrix.mtx***, ***genes.tsv***, ***barcodes.tsv***)
Pseudo samples will be generated with sliding window tech
factor_name_type: <str> Default = 'gene_name'
What type of ID name to use for each gene.
Supporting:
'gene_name': Gene Symbols/Names
'ens_id': Ensembl ID
.. note::
If using BioGRID as interaction database, factor_name_type
must be set to 'gene_name' for now.
# TODO: Find a way to map gene names with Ensembl IDs
feature_dropout_ratio: <float> Default = 0.1
Portion of features(GRPs) to be dropped out after each iteration of
feature selection.
feature_select_iteration: <int> Default = 1
Number of iteration for feature(GRP) selection before
key GRP extraction
interaction_database: <str> Default = 'gtrd'
Which interaction database to use for confirming a GRP has a high
possibility to exist.
Supporting:
None: No database will be used. As long as a GRP can pass all
related filters, it's good to go.
'gtrd': Using GTRD as regulatory pathway reference
https://gtrd.biouml.org/
'biogrid': Using BioGRID as regulatory pathway reference
https://thebiogrid.org/
impact_depth: <int> Default = 3
When assessing a TF's regulatory impact on other genes, how far the
distance between TF and potential regulatory source can be.
.. note::
The correlation strength of stepped correlation strength of TF
and gene still need to be greater than correlation_thread.
top_grp_amount: <int> Default = 100
Amount of GRPs an AGEAS unit would extract.
.. note::
If outlier_thread is set, since outlier GRPs are extracted
during feature selection part and will also be considered as
key GRPs, actual amount of key GRPs would be greater.
grp_changing_thread: <float> Default = 0.05
If changing portion of key GRPs extracted by AGEAS unit from two
stabilize iterations lower than this thread, these two iterations
will be considered as having consistent result.
log2fc_thread: <float> Default = None
Log2 fold change thread to filer non-differntial expressing genes.
.. note::
It's generally not encouraged to set up this filter since it can
result in lossing key TFs not having great changes on overall
expression volume but having changes on expression pattern.
If local computational power is relatively limited, setting up
this thread can help a lot to keep program runable.
link_step_allowrance: <int> Default = 1
During key atlas extraction, when finding bridge GRPs to link 2
separate regulons, how many steps will be allowed.
link_step_allowrance == 1 means, no intermediate gene can be used
and portential regulatory source must be able to interact with gene
from another regulon.
meta_load_path: <str> Default = None
Path to load meta_GRN
meta_save_path: <str> Default = None
Path to save meta_GRN
model_config_path: <str> Default = None
Path to load model config file which will be used to initialize
classifiers
model_select_iteration: <int> Default = 2
Number of iteration for classification model selection before
the mandatory filter.
mute_unit: <bool> Default = True
Whether AGEAS unit print out log while running.
.. note::
It's not mandatory but encouraged to remain True especially
when using multi protocol
mww_p_val_thread: <str> Default = 0.05
Gene expression Mann–Whitney–Wilcoxon test p-value thread.
To make sure one gene's expression profile is not constant among
differnt classes.
outlier_thread: <float> Default = 3.0
The lower bound of Z-score scaled importance value to consider a GRP
as outlier need to be retain.
protocol: <str> Default = 'solo'
AGEAS unit launching protocol.
Supporting:
'solo': All units will run separately
'multi': All units will run parallelly by multithreading
patient: <int> Default = 3
If stabilize iterations continuously having consistent result for
this value, an early stop on result stabilization will be executed.
psgrn_load_path: <str> Default = None
Path to load pseudo-sample GRNs.
psgrn_save_path: <str> Default = None
Path to save pseudo-sample GRNs.
prediction_thread: <str> or <float> Default = 'auto'
The importance thread for a GRP predicted with GRNBoost2-like algo
to be included.
Supporting:
'auto': Automatically set up thread value by minimum imporatnace
value of a interaction database recorded GRP of TF having
most amount of GRPs. If not using interaction database, it
will be set by (1 / amount of genes)
float type: Value will be set as thread directly
report_folder_path: <str> Default = None
Path to create folder for saving AGEAS report files.
save_unit_reports: <bool> Default = False
Whether saving key GRPs extracted by each AGEAS Unit or not.
If True, reports will be saved in report_folder_path under folders
named 'no_{}'.format(unit_num) starting from 0.
specie: <str> Default = 'mouse'
Specify which sepcie's interaction database shall be used.
Supporting:
'mouse'
'human'
sliding_window_size: <int> Default = 10
Number of samples a pseudo-sample generated with
sliding window technique contains.
sliding_window_stride: <int> Default = None
Stride of sliding window when generating pseudo-samples.
std_value_thread: <float> Default = None
Set up gene expression standard deviation thread by value.
To rule out genes having relatively constant expression in each type
class.
std_ratio_thread: <float> Default = None
Set up gene expression standard deviation thread by portion.
Only genes reaching top portion will be kept in each type class.
stabilize_iteration: <int> Default = 10
Number of iteration for a AGEAS unit to repeat key GRP extraction
after model and feature selections in order to find key GRPs
consistently being important.
max_train_size: <float> Default = 0.95
The largest portion of avaliable data can be used to train models.
At the mandatory model filter, this portion of data will be given to
each model to train.
unit_num: <int> Default = 2
Number of AGEAS units to launch.
warning_filter: <str> Default = 'ignore'
How warnings should be filtered.
For other options, please check 'The Warnings Filter' section in:
https://docs.python.org/3/library/warnings.html#warning-filter
z_score_extract_thread: <float> Default = 0.0
The lower bound of Z-score scaled importance value to extract a GRP.
Inputs: None
Outputs: None
Attributes:
Examples::
>>> easy = ageas.Launch(
class1_path = 'Odysseia/2kTest/ips.csv',
class2_path = 'Odysseia/2kTest/mef.csv',
)
"""
def __init__(self,
class1_path:str = None,
class2_path:str = None,
clf_keep_ratio:float = 0.5,
clf_accuracy_thread:float = 0.8,
correlation_thread:float = 0.2,
cpu_mode:bool = False,
database_path:str = None,
database_type:str = 'gem_files',
factor_name_type:str = 'gene_name',
feature_dropout_ratio:float = 0.1,
feature_select_iteration:int = 1,
interaction_database:str = 'gtrd',
impact_depth:int = 3,
top_grp_amount:int = 100,
grp_changing_thread:float = 0.05,
log2fc_thread:float = None,
link_step_allowrance:int = 1,
meta_load_path:str = None,
meta_save_path:str = None,
model_config_path:str= None,
model_select_iteration:int = 2,
mww_p_val_thread:str = 0.05,
outlier_thread:float = 3.0,
protocol:str = 'solo',
patient:int = 3,
psgrn_load_path:str = None,
psgrn_save_path:str = None,
prediction_thread = 'auto',
report_folder_path:str = None,
save_unit_reports:bool = False,
specie:str = 'mouse',
sliding_window_size:int = 10,
sliding_window_stride:int = None,
std_value_thread:float = None,
std_ratio_thread:float = None,
stabilize_iteration:int = 10,
max_train_size:float = 0.95,
unit_num:int = 2,
unit_silent:bool = True,
warning_filter:str = 'ignore',
z_score_extract_thread:float = 0.0,
):
super(Launch, self).__init__()
""" Initialization """
print('Launching Ageas')
warnings.filterwarnings(warning_filter)
start = time.time()
self.reports = list()
self.protocol = protocol
self.unit_num = unit_num
self.silent = unit_silent
self.impact_depth = impact_depth
# Get database information
self.database_info = binary_db.Setup(
database_path,
database_type,
class1_path,
class2_path,
specie,
factor_name_type,
interaction_database,
sliding_window_size,
sliding_window_stride
)
# Get model configs
if model_config_path is None:
path = resource_filename(__name__, 'data/config/list_config.js')
self.model_config = config_maker.List_Config_Reader(path)
else:
self.model_config = json.decode(model_config_path)
# Prepare report folder
self.report_folder_path = report_folder_path
if self.report_folder_path is not None:
if self.report_folder_path[-1] != '/':
self.report_folder_path += '/'
if not os.path.exists(self.report_folder_path):
os.makedirs(self.report_folder_path)
self.save_unit_reports = save_unit_reports
if self.save_unit_reports and self.report_folder_path is None:
raise Exception('Report Path must be given to save unit reports!')
print('Time to Boot: ', time.time() - start)
# Make or load psGRNs and meta GRN
start = time.time()
if meta_load_path is not None and psgrn_load_path is not None:
self.meta = meta_grn.Cast(load_path = meta_load_path)
self.pseudo_grns = psgrn.Make(load_path = psgrn_load_path)
else:
self.meta, self.pseudo_grns = self.get_pseudo_grns(
database_info = self.database_info,
std_value_thread = std_value_thread,
std_ratio_thread = std_ratio_thread,
mww_p_val_thread = mww_p_val_thread,
log2fc_thread = log2fc_thread,
prediction_thread = prediction_thread,
correlation_thread = correlation_thread,
meta_load_path = meta_load_path,
)
# Meta GRN Analysis
self.meta_report = meta_grn.Analysis(self.meta.grn)
# Save docs if specified path
if self.report_folder_path is not None:
self.meta_report.save(self.report_folder_path + 'meta_report.csv')
if psgrn_save_path is not None:
self.pseudo_grns.save(psgrn_save_path)
if meta_save_path is not None:
self.meta.grn.save_json(meta_save_path)
print('Time to cast or load Pseudo-Sample GRNs : ', time.time() - start)
print('\nDeck Ready')
start = time.time()
# Initialize a basic unit
self.basic_unit = ageas.Unit(
meta = self.meta,
pseudo_grns = self.pseudo_grns,
model_config = self.model_config,
database_info = self.database_info,
cpu_mode = cpu_mode,
correlation_thread = correlation_thread,
top_grp_amount = top_grp_amount,
z_score_extract_thread = z_score_extract_thread,
max_train_size = max_train_size,
clf_keep_ratio = clf_keep_ratio,
clf_accuracy_thread = clf_accuracy_thread,
model_select_iteration = model_select_iteration,
outlier_thread = outlier_thread,
feature_dropout_ratio = feature_dropout_ratio,
feature_select_iteration = feature_select_iteration,
patient = patient,
grp_changing_thread = grp_changing_thread,
stabilize_iteration = stabilize_iteration,
impact_depth = impact_depth,
link_step_allowrance = link_step_allowrance,
)
self.lockon = threading.Lock()
print('Protocol:', self.protocol)
print('Silent:', self.silent)
# Do everything unit by unit
if self.protocol == 'solo':
self.proto_solo()
# Multithreading protocol
elif self.protocol == 'multi':
self.proto_multi()
self.atlas = self.combine_unit_reports()
print('Operation Time: ', time.time() - start)
if self.report_folder_path is not None:
print('Generating Report Files')
self._save_atlas_as_json(
self.atlas.regulons,
self.report_folder_path + 'key_atlas.js'
)
self.atlas.report(self.meta.grn).to_csv(
self.report_folder_path + 'report.csv',
index = False
)
print('\nComplete\n')
# Protocol SOLO
def proto_solo(self):
for i in range(self.unit_num):
id = 'RN_' + str(i)
new_unit = copy.deepcopy(self.basic_unit)
print('Unit', id, 'Ready')
print('\nSending Unit', id, '\n')
if self.silent: sys.stdout = open(os.devnull, 'w')
new_unit.select_models()
new_unit.launch()
new_unit.generate_regulons()
self.reports.append(new_unit.atlas)
if self.silent: sys.stdout = sys.__stdout__
print(id, 'RTB\n')
# Protocol MULTI
def proto_multi(self):
units = []
for i in range(self.unit_num):
id = 'RN_' + str(i)
units.append(threading.Thread(target=self.multi_unit, name=id))
print('Unit', id, 'Ready')
# Time to work
print('\nSending All Units\n')
if self.silent: sys.stdout = open(os.devnull, 'w')
# Start each unit
for unit in units: unit.start()
# Wait till all thread terminates
for unit in units: unit.join()
if self.silent: sys.stdout = sys.__stdout__
print('Units RTB\n')
# Model selection and regulon contruction part run parallel
def multi_unit(self,):
new_unit = copy.deepcopy(self.basic_unit)
new_unit.select_models()
# lock here since SHAP would bring Error
self.lockon.acquire()
new_unit.launch()
self.lockon.release()
new_unit.generate_regulons()
self.reports.append(new_unit.atlas)
del new_unit
# Combine information from reports returned by each unit
def combine_unit_reports(self):
all_grps = dict()
for index, atlas in enumerate(self.reports):
# save unit report if asking
if self.save_unit_reports:
report_path = self.report_folder_path + 'no_' + str(index) + '/'
if not os.path.exists(report_path): os.makedirs(report_path)
atlas.grps.save(report_path + 'grps_importances.txt')
json.encode(atlas.outlier_grps, report_path+'outlier_grps.js')
for regulon in atlas.regulons.values():
for id, record in regulon.grps.items():
if id not in all_grps:
all_grps[id] = record
elif id in all_grps:
all_grps[id] = self._combine_grp_records(
record_1 = all_grps[id],
record_2 = record
)
# now we build regulons
regulon = extractor.Extract()
for id, grp in all_grps.items():
regulon.update_regulon_with_grp(
grp = grp,
meta_grn = self.meta.grn
)
regulon.find_bridges(meta_grn = self.meta.grn)
regulon.update_genes(impact_depth = self.impact_depth)
regulon.change_regulon_list_to_dict()
return regulon
# get pseudo-cGRNs from GEMs or GRNs
def get_pseudo_grns(self,
database_info = None,
std_value_thread = 100,
std_ratio_thread = None,
mww_p_val_thread = 0.05,
log2fc_thread = 0.1,
prediction_thread = 'auto',
correlation_thread = 0.2,
meta_load_path = None
):
meta = None
# if reading in GEMs, we need to construct pseudo-cGRNs first
# or if we are reading in MEX, make GEM first and then mimic GEM mode
if (re.search(r'gem' , database_info.type) or
re.search(r'mex' , database_info.type)):
gem_data = binary_db.Load_GEM(
database_info,
mww_p_val_thread,
log2fc_thread,
std_value_thread
)
start1 = time.time()
# Let kirke casts GRN construction guidance first
meta = meta_grn.Cast(
gem_data = gem_data,
prediction_thread = prediction_thread,
correlation_thread = correlation_thread,
load_path = meta_load_path
)
print('Time to cast Meta GRN : ', time.time() - start1)
psGRNs = psgrn.Make(
database_info = database_info,
std_value_thread = std_value_thread,
std_ratio_thread = std_ratio_thread,
correlation_thread = correlation_thread,
gem_data = gem_data,
meta_grn = meta.grn
)
# if we are reading in GRNs directly, just process them
elif re.search(r'grn' , database_info.type):
psGRNs = None
print('trainer.py: mode GRN need to be revised here')
else:
raise lib.Error('Unrecogonized database type: ', database_info.type)
return meta, psGRNs
# combine information of same GRP form different reports
def _combine_grp_records(self, record_1, record_2):
answer = copy.deepcopy(record_1)
if answer.type != record_2.type:
if answer.type == GRP_TYPES[2]:
assert answer.score == 0
if record_2.type != GRP_TYPES[2]:
answer.type = record_2.type
answer.score = record_2.score
else:
if record_2.type != GRP_TYPES[2]:
answer.type = GRP_TYPES[3]
answer.score = max(answer.score, record_2.score)
else:
answer.score = max(answer.score, record_2.score)
return answer
# change class objects to dicts and save regulons in JSON format
def _save_atlas_as_json(self, regulons, path):
json.encode({k:v.as_dict() for k,v in regulons.items()}, path) | PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/savings_meter_bill_daily_response_py3.py |
from msrest.serialization import Model
class SavingsMeterBillDailyResponse(Model):
"""SavingsMeterBillDailyResponseDTO - This class is the DTO for a meter's
bill's Cost Avoidance savings in daily format
It provides the meter, bill, use unit, and then an array of daily savings
values
Each daily value contains date that represents this day, non-weather use,
weather use, BATCC Cost, average daily unit cost,
whether any of the four adjustments (special, area, weather, other) were
performed
and a list of messages generated by the Savings Processor for this day.
:param meter_id: Unique meter identifier
:type meter_id: int
:param meter_code: Meter Code
:type meter_code: str
:param meter_info: Meter Name
:type meter_info: str
:param bill_id: Unique bill identifier
:type bill_id: int
:param period_name: Calendar Period Name (e.g. Jan)
:type period_name: str
:param calendar_period: Calendar period Number (e.g. 1)
:type calendar_period: int
:param calendar_year: Calendar year (e.g. 2019)
:type calendar_year: int
:param fiscal_period: Fiscal period number
:type fiscal_period: int
:param fiscal_year: Fiscal year
:type fiscal_year: int
:param native_use_unit:
:type native_use_unit: ~energycap.sdk.models.UnitChild
:param daily: Cost avoidance savings for this meter, day by day
:type daily: list[~energycap.sdk.models.SavingsDailyData]
"""
_attribute_map = {
'meter_id': {'key': 'meterId', 'type': 'int'},
'meter_code': {'key': 'meterCode', 'type': 'str'},
'meter_info': {'key': 'meterInfo', 'type': 'str'},
'bill_id': {'key': 'billId', 'type': 'int'},
'period_name': {'key': 'periodName', 'type': 'str'},
'calendar_period': {'key': 'calendarPeriod', 'type': 'int'},
'calendar_year': {'key': 'calendarYear', 'type': 'int'},
'fiscal_period': {'key': 'fiscalPeriod', 'type': 'int'},
'fiscal_year': {'key': 'fiscalYear', 'type': 'int'},
'native_use_unit': {'key': 'nativeUseUnit', 'type': 'UnitChild'},
'daily': {'key': 'daily', 'type': '[SavingsDailyData]'},
}
def __init__(self, *, meter_id: int=None, meter_code: str=None, meter_info: str=None, bill_id: int=None, period_name: str=None, calendar_period: int=None, calendar_year: int=None, fiscal_period: int=None, fiscal_year: int=None, native_use_unit=None, daily=None, **kwargs) -> None:
super(SavingsMeterBillDailyResponse, self).__init__(**kwargs)
self.meter_id = meter_id
self.meter_code = meter_code
self.meter_info = meter_info
self.bill_id = bill_id
self.period_name = period_name
self.calendar_period = calendar_period
self.calendar_year = calendar_year
self.fiscal_period = fiscal_period
self.fiscal_year = fiscal_year
self.native_use_unit = native_use_unit
self.daily = daily | PypiClean |
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/codeintel2/lib_srcs/node.js/0.12/util.js | var util = {};
/**
* Deprecated predecessor of console.error.
* @param string
*/
util.debug = function(string) {}
/**
* Inherit the prototype methods from one constructor into another. The
* prototype of constructor will be set to a new object created from
* superConstructor.
* @param constructor
* @param superConstructor
*/
util.inherits = function(constructor, superConstructor) {}
/**
* Deprecated predecessor of stream.pipe().
* @param readableStream
* @param writableStream
* @param callback
*/
util.pump = function(readableStream, writableStream, callback) {}
/**
* Return a string representation of object, which is useful for debugging.
* @param object
* @param options
*/
util.inspect = function(object, options) {}
/**
* Output with timestamp on stdout.
* @param string
*/
util.log = function(string) {}
/**
* This is used to create a function which conditionally writes to stderr
* based on the existence of a NODE_DEBUG environment variable. If the
* section name appears in that environment variable, then the returned
* function will be similar to console.error(). If not, then the returned
* function is a no-op.
* @param section {String}
* @returns The logging function
*/
util.debuglog = function(section) {}
/**
* Marks that a method should not be used any more.
* @param function
* @param string
*/
util.deprecate = function(function, string) {}
/**
* Deprecated predecessor of console.error.
*/
util.error = function() {}
/**
* Returns a formatted string using the first argument as a printf-like
* format.
* @param format
* @returns a formatted string using the first argument as a printf-like format
*/
util.format = function(format) {}
/**
* Internal alias for Array.isArray.
* @param object
*/
util.isArray = function(object) {}
/**
* Returns true if the given "object" is a Date. false otherwise.
* @param object
* @returns true if the given "object" is a Date
*/
util.isDate = function(object) {}
/**
* Returns true if the given "object" is an Error. false otherwise.
* @param object
* @returns true if the given "object" is an Error
*/
util.isError = function(object) {}
/**
* Returns true if the given "object" is a RegExp. false otherwise.
* @param object
* @returns true if the given "object" is a RegExp
*/
util.isRegExp = function(object) {}
/**
* Deprecated predecessor of console.log.
*/
util.print = function() {}
/**
* Deprecated predecessor of console.log.
*/
util.puts = function() {}
exports = util; | PypiClean |
/Adafruit_Blinka-8.20.1-py3-none-any.whl/adafruit_blinka/microcontroller/mcp2221/mcp2221.py | """Chip Definition for MCP2221"""
import os
import time
import atexit
import hid
# Here if you need it
MCP2221_HID_DELAY = float(os.environ.get("BLINKA_MCP2221_HID_DELAY", 0))
# Use to set delay between reset and device reopen. if negative, don't reset at all
MCP2221_RESET_DELAY = float(os.environ.get("BLINKA_MCP2221_RESET_DELAY", 0.5))
# from the C driver
# http://ww1.microchip.com/downloads/en/DeviceDoc/mcp2221_0_1.tar.gz
# others (???) determined during driver developement
RESP_ERR_NOERR = 0x00
RESP_ADDR_NACK = 0x25
RESP_READ_ERR = 0x7F
RESP_READ_COMPL = 0x55
RESP_READ_PARTIAL = 0x54 # ???
RESP_I2C_IDLE = 0x00
RESP_I2C_START_TOUT = 0x12
RESP_I2C_RSTART_TOUT = 0x17
RESP_I2C_WRADDRL_TOUT = 0x23
RESP_I2C_WRADDRL_WSEND = 0x21
RESP_I2C_WRADDRL_NACK = 0x25
RESP_I2C_WRDATA_TOUT = 0x44
RESP_I2C_RDDATA_TOUT = 0x52
RESP_I2C_STOP_TOUT = 0x62
RESP_I2C_MOREDATA = 0x43 # ???
RESP_I2C_PARTIALDATA = 0x41 # ???
RESP_I2C_WRITINGNOSTOP = 0x45 # ???
MCP2221_RETRY_MAX = 50
MCP2221_MAX_I2C_DATA_LEN = 60
MASK_ADDR_NACK = 0x40
class MCP2221:
"""MCP2221 Device Class Definition"""
VID = 0x04D8
PID = 0x00DD
GP_GPIO = 0b000
GP_DEDICATED = 0b001
GP_ALT0 = 0b010
GP_ALT1 = 0b011
GP_ALT2 = 0b100
def __init__(self):
self._hid = hid.device()
self._hid.open(MCP2221.VID, MCP2221.PID)
# make sure the device gets closed before exit
atexit.register(self.close)
if MCP2221_RESET_DELAY >= 0:
self._reset()
self._gp_config = [0x07] * 4 # "don't care" initial value
for pin in range(4):
self.gp_set_mode(pin, self.GP_GPIO) # set to GPIO mode
self.gpio_set_direction(pin, 1) # set to INPUT
def close(self):
"""Close the hid device. Does nothing if the device is not open."""
self._hid.close()
def __del__(self):
# try to close the device before destroying the instance
self.close()
def _hid_xfer(self, report, response=True):
"""Perform HID Transfer"""
# first byte is report ID, which =0 for MCP2221
# remaing bytes = 64 byte report data
# https://github.com/libusb/hidapi/blob/083223e77952e1ef57e6b77796536a3359c1b2a3/hidapi/hidapi.h#L185
self._hid.write(b"\0" + report + b"\0" * (64 - len(report)))
time.sleep(MCP2221_HID_DELAY)
if response:
# return is 64 byte response report
return self._hid.read(64)
return None
# ----------------------------------------------------------------
# MISC
# ----------------------------------------------------------------
def gp_get_mode(self, pin):
"""Get Current Pin Mode"""
return self._hid_xfer(b"\x61")[22 + pin] & 0x07
def gp_set_mode(self, pin, mode):
"""Set Current Pin Mode"""
# already set to that mode?
mode &= 0x07
if mode == (self._gp_config[pin] & 0x07):
return
# update GP mode for pin
self._gp_config[pin] = mode
# empty report, this is safe since 0's = no change
report = bytearray(b"\x60" + b"\x00" * 63)
# set the alter GP flag byte
report[7] = 0xFF
# add GP setttings
report[8] = self._gp_config[0]
report[9] = self._gp_config[1]
report[10] = self._gp_config[2]
report[11] = self._gp_config[3]
# and make it so
self._hid_xfer(report)
def _pretty_report(self, register):
report = self._hid_xfer(register)
print(" 0 1 2 3 4 5 6 7 8 9")
index = 0
for row in range(7):
print("{} : ".format(row), end="")
for _ in range(10):
print("{:02x} ".format(report[index]), end="")
index += 1
if index > 63:
break
print()
def _status_dump(self):
self._pretty_report(b"\x10")
def _sram_dump(self):
self._pretty_report(b"\x61")
def _reset(self):
self._hid_xfer(b"\x70\xAB\xCD\xEF", response=False)
self._hid.close()
time.sleep(MCP2221_RESET_DELAY)
start = time.monotonic()
while time.monotonic() - start < 5:
try:
self._hid.open(MCP2221.VID, MCP2221.PID)
except OSError:
# try again
time.sleep(0.1)
continue
return
raise OSError("open failed")
# ----------------------------------------------------------------
# GPIO
# ----------------------------------------------------------------
def gpio_set_direction(self, pin, mode):
"""Set Current GPIO Pin Direction"""
if mode:
# set bit 3 for INPUT
self._gp_config[pin] |= 1 << 3
else:
# clear bit 3 for OUTPUT
self._gp_config[pin] &= ~(1 << 3)
report = bytearray(b"\x50" + b"\x00" * 63) # empty set GPIO report
offset = 4 * (pin + 1)
report[offset] = 0x01 # set pin direction
report[offset + 1] = mode # to this
self._hid_xfer(report)
def gpio_set_pin(self, pin, value):
"""Set Current GPIO Pin Value"""
if value:
# set bit 4
self._gp_config[pin] |= 1 << 4
else:
# clear bit 4
self._gp_config[pin] &= ~(1 << 4)
report = bytearray(b"\x50" + b"\x00" * 63) # empty set GPIO report
offset = 2 + 4 * pin
report[offset] = 0x01 # set pin value
report[offset + 1] = value # to this
self._hid_xfer(report)
def gpio_get_pin(self, pin):
"""Get Current GPIO Pin Value"""
resp = self._hid_xfer(b"\x51")
offset = 2 + 2 * pin
if resp[offset] == 0xEE:
raise RuntimeError("Pin is not set for GPIO operation.")
return resp[offset]
# ----------------------------------------------------------------
# I2C
# ----------------------------------------------------------------
def _i2c_status(self):
resp = self._hid_xfer(b"\x10")
if resp[1] != 0:
raise RuntimeError("Couldn't get I2C status")
return resp
def _i2c_state(self):
return self._i2c_status()[8]
def _i2c_cancel(self):
resp = self._hid_xfer(b"\x10\x00\x10")
if resp[1] != 0x00:
raise RuntimeError("Couldn't cancel I2C")
if resp[2] == 0x10:
# bus release will need "a few hundred microseconds"
time.sleep(0.001)
# pylint: disable=too-many-arguments,too-many-branches
def _i2c_write(self, cmd, address, buffer, start=0, end=None):
if self._i2c_state() != 0x00:
self._i2c_cancel()
end = end if end else len(buffer)
length = end - start
retries = 0
while (end - start) > 0 or not buffer:
chunk = min(end - start, MCP2221_MAX_I2C_DATA_LEN)
# write out current chunk
resp = self._hid_xfer(
bytes([cmd, length & 0xFF, (length >> 8) & 0xFF, address << 1])
+ buffer[start : (start + chunk)]
)
# check for success
if resp[1] != 0x00:
if resp[2] in (
RESP_I2C_START_TOUT,
RESP_I2C_WRADDRL_TOUT,
RESP_I2C_WRADDRL_NACK,
RESP_I2C_WRDATA_TOUT,
RESP_I2C_STOP_TOUT,
):
raise RuntimeError("Unrecoverable I2C state failure")
retries += 1
if retries >= MCP2221_RETRY_MAX:
raise RuntimeError("I2C write error, max retries reached.")
time.sleep(0.001)
continue # try again
# yay chunk sent!
while self._i2c_state() == RESP_I2C_PARTIALDATA:
time.sleep(0.001)
if not buffer:
break
start += chunk
retries = 0
# check status in another loop
for _ in range(MCP2221_RETRY_MAX):
status = self._i2c_status()
if status[20] & MASK_ADDR_NACK:
raise RuntimeError("I2C slave address was NACK'd")
usb_cmd_status = status[8]
if usb_cmd_status == 0:
break
if usb_cmd_status == RESP_I2C_WRITINGNOSTOP and cmd == 0x94:
break # this is OK too!
if usb_cmd_status in (
RESP_I2C_START_TOUT,
RESP_I2C_WRADDRL_TOUT,
RESP_I2C_WRADDRL_NACK,
RESP_I2C_WRDATA_TOUT,
RESP_I2C_STOP_TOUT,
):
raise RuntimeError("Unrecoverable I2C state failure")
time.sleep(0.001)
else:
raise RuntimeError("I2C write error: max retries reached.")
# whew success!
def _i2c_read(self, cmd, address, buffer, start=0, end=None):
if self._i2c_state() not in (RESP_I2C_WRITINGNOSTOP, 0):
self._i2c_cancel()
end = end if end else len(buffer)
length = end - start
# tell it we want to read
resp = self._hid_xfer(
bytes([cmd, length & 0xFF, (length >> 8) & 0xFF, (address << 1) | 0x01])
)
# check for success
if resp[1] != 0x00:
raise RuntimeError("Unrecoverable I2C read failure")
# and now the read part
while (end - start) > 0:
for _ in range(MCP2221_RETRY_MAX):
# the actual read
resp = self._hid_xfer(b"\x40")
# check for success
if resp[1] == RESP_I2C_PARTIALDATA:
time.sleep(0.001)
continue
if resp[1] != 0x00:
raise RuntimeError("Unrecoverable I2C read failure")
if resp[2] == RESP_ADDR_NACK:
raise RuntimeError("I2C NACK")
if resp[3] == 0x00 and resp[2] == 0x00:
break
if resp[3] == RESP_READ_ERR:
time.sleep(0.001)
continue
if resp[2] in (RESP_READ_COMPL, RESP_READ_PARTIAL):
break
else:
raise RuntimeError("I2C read error: max retries reached.")
# move data into buffer
chunk = min(end - start, 60)
for i, k in enumerate(range(start, start + chunk)):
buffer[k] = resp[4 + i]
start += chunk
# pylint: enable=too-many-arguments
def _i2c_configure(self, baudrate=100000):
"""Configure I2C"""
self._hid_xfer(
bytes(
[
0x10, # set parameters
0x00, # don't care
0x00, # no effect
0x20, # next byte is clock divider
12000000 // baudrate - 3,
]
)
)
def i2c_writeto(self, address, buffer, *, start=0, end=None):
"""Write data from the buffer to an address"""
self._i2c_write(0x90, address, buffer, start, end)
def i2c_readfrom_into(self, address, buffer, *, start=0, end=None):
"""Read data from an address and into the buffer"""
self._i2c_read(0x91, address, buffer, start, end)
def i2c_writeto_then_readfrom(
self,
address,
out_buffer,
in_buffer,
*,
out_start=0,
out_end=None,
in_start=0,
in_end=None,
):
"""Write data from buffer_out to an address and then
read data from an address and into buffer_in
"""
self._i2c_write(0x94, address, out_buffer, out_start, out_end)
self._i2c_read(0x93, address, in_buffer, in_start, in_end)
def i2c_scan(self, *, start=0, end=0x79):
"""Perform an I2C Device Scan"""
found = []
for addr in range(start, end + 1):
# try a write
try:
self.i2c_writeto(addr, b"\x00")
except RuntimeError: # no reply!
continue
# store if success
found.append(addr)
return found
# ----------------------------------------------------------------
# ADC
# ----------------------------------------------------------------
def adc_configure(self, vref=0):
"""Configure the Analog-to-Digital Converter"""
report = bytearray(b"\x60" + b"\x00" * 63)
report[5] = 1 << 7 | (vref & 0b111)
self._hid_xfer(report)
def adc_read(self, pin):
"""Read from the Analog-to-Digital Converter"""
resp = self._hid_xfer(b"\x10")
return resp[49 + 2 * pin] << 8 | resp[48 + 2 * pin]
# ----------------------------------------------------------------
# DAC
# ----------------------------------------------------------------
def dac_configure(self, vref=0):
"""Configure the Digital-to-Analog Converter"""
report = bytearray(b"\x60" + b"\x00" * 63)
report[3] = 1 << 7 | (vref & 0b111)
self._hid_xfer(report)
# pylint: disable=unused-argument
def dac_write(self, pin, value):
"""Write to the Digital-to-Analog Converter"""
report = bytearray(b"\x60" + b"\x00" * 63)
report[4] = 1 << 7 | (value & 0b11111)
self._hid_xfer(report)
# pylint: enable=unused-argument
mcp2221 = MCP2221() | PypiClean |
/ObjectListView2-1.0.0.tar.gz/ObjectListView2-1.0.0/Examples/BatchedUpdateExample.py | import datetime
import os
import os.path
import threading
import time
import wx
# Where can we find the ObjectListView module?
import sys
sys.path.append("..")
from ObjectListView import FastObjectListView, ObjectListView, ColumnDefn, BatchedUpdate
# We store our images as python code
import ExampleImages
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
wx.Frame.__init__(self, *args, **kwds)
self.Init()
def Init(self):
self.InitWidgets()
self.InitObjectListView()
wx.CallLater(1, self.InitModel)
def InitWidgets(self):
# Widget creations
self.statusbar = self.CreateStatusBar(1, 0)
panel1 = wx.Panel(self, -1)
panel12 = wx.Panel(panel1, -1)
self.olv = FastObjectListView(panel1, -1, style=wx.LC_REPORT|wx.SUNKEN_BORDER)
rootText = wx.StaticText(panel12, -1, "&Tree walk from:")
self.tcRoot = wx.DirPickerCtrl(panel12, style=wx.DIRP_USE_TEXTCTRL)
self.btnStart = wx.Button(panel12, -1, "&Start")
secondsText = wx.StaticText(panel12, -1, "Seconds &between updates:")
self.scSeconds = wx.SpinCtrl(panel12, -1, "")
# Layout
sizer_3 = wx.FlexGridSizer(2, 3, 4, 4)
sizer_3.AddGrowableCol(1)
sizer_3.Add(rootText, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_3.Add(self.tcRoot, 1, wx.ALL|wx.EXPAND, 0)
sizer_3.Add(self.btnStart, 1, wx.ALL|wx.EXPAND, 0)
sizer_3.Add(secondsText, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_3.Add(self.scSeconds, 1)
panel12.SetSizer(sizer_3)
panel12.Layout()
sizer_2 = wx.FlexGridSizer(3, 1, 4, 4)
sizer_2.Add(panel12, 1, wx.ALL|wx.EXPAND, 4)
sizer_2.Add(self.olv, 1, wx.ALL|wx.EXPAND, 4)
sizer_2.AddGrowableCol(0)
sizer_2.AddGrowableRow(1)
panel1.SetSizer(sizer_2)
panel1.Layout()
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_1.Add(panel1, 1, wx.EXPAND)
self.SetSizer(sizer_1)
self.Layout()
# Event handling
self.Bind(wx.EVT_CLOSE, self.HandleClose)
self.btnStart.Bind(wx.EVT_BUTTON, self.HandleStart)
self.tcRoot.Bind(wx.EVT_DIRPICKER_CHANGED, self.HandleRootText)
# Widget initialization
self.btnStart.SetDefault()
self.scSeconds.SetRange(0, 15)
self.scSeconds.SetValue(2)
self.tcRoot.SetPath(wx.StandardPaths.Get().GetDocumentsDir())
# OK, This is the whole point of the example. Wrap the ObjectListView in a batch updater
self.olv = BatchedUpdate(self.olv, 2)
def InitModel(self):
self.backgroundProcess = None
def InitObjectListView(self):
def sizeToNiceString(byteCount):
"""
Convert the given byteCount into a string like: 9.9bytes/KB/MB/GB
"""
for (cutoff, label) in [(1024*1024*1024, "GB"), (1024*1024, "MB"), (1024, "KB")]:
if byteCount >= cutoff:
return "%.1f %s" % (byteCount * 1.0 / cutoff, label)
if byteCount == 1:
return "1 byte"
else:
return "%d bytes" % byteCount
self.olv.SetColumns([
ColumnDefn("Path", "left", 150, "GetPath"),
ColumnDefn("Files", "left", 100, "countFiles"),
ColumnDefn("File Size", "left", 100, "sizeFiles"),
ColumnDefn("Total Directories", "left", 100, "CountAllDirectories"),
ColumnDefn("Total Files", "left", 100, "CountAllFiles"),
ColumnDefn("Total File Size", "left", 100, "SizeAllFiles", stringConverter=sizeToNiceString),
])
self.olv.SetSortColumn(0)
def HandleClose(self, evt):
if self.backgroundProcess:
self.backgroundProcess.cancel()
self.Destroy()
return True
def HandleStart(self, evt):
if self.backgroundProcess:
self.backgroundProcess.cancel()
else:
self.btnStart.SetLabel("&Stop")
self.olv.SetObjects(None)
self.olv.SetEmptyListMsg("Scanning...")
self.statusbar.SetStatusText("Scanning...")
# Configure the update period. 0 means unbatched
if self.scSeconds.GetValue():
if isinstance(self.olv, BatchedUpdate):
self.olv.updatePeriod = self.scSeconds.GetValue()
else:
self.olv = BatchedUpdate(olv, self.scSeconds.GetValue())
else:
if isinstance(self.olv, BatchedUpdate):
self.olv = self.olv.objectListView
self.backgroundProcess = BackgroundProcess(work=self.Walker, done=self.DoneWalking)
self.backgroundProcess.path = self.tcRoot.GetPath()
self.backgroundProcess.runAsync()
def HandleRootText(self, evt):
pass
#if os.path.isdir(self.tcRoot.GetValue()):
# self.tcRoot.SetBackgroundColour(wx.WHITE)
#else:
# self.tcRoot.SetBackgroundColour(wx.Colour(255, 255, 0))
def Walker(self, backgroundProcess):
backgroundProcess.start = time.clock()
backgroundProcess.stats = list()
stats = [DirectoryStats(None, backgroundProcess.path)]
wx.CallAfter(self.olv.SetObjects, stats)
for stat in stats:
if backgroundProcess.isCancelled():
return
stat.startScan = time.clock()
names = os.listdir(stat.GetPath())
names.sort(key=unicode.lower)
for name in names:
if backgroundProcess.isCancelled():
return
subPath = os.path.join(stat.GetPath(), name)
if os.path.isdir(subPath):
stats.append(DirectoryStats(stat, name))
else:
stat.countFiles += 1
try:
stat.sizeFiles += os.path.getsize(subPath)
except WindowsError:
pass
stat.endScan = time.clock()
if not backgroundProcess.isCancelled():
wx.CallAfter(self.olv.AddObjects, stat.children)
wx.CallAfter(self.olv.RefreshObjects, stat.SelfPlusAncestors())
#wx.SafeYield()
#for x in stats:
# print x.GetPath(), x.CountAllDirectories(), x.CountAllFiles(), x.SizeAllFiles(), x.ElapsedScanTime()
backgroundProcess.stats = stats
def DoneWalking(self, backgroundProcess):
self.btnStart.SetLabel("&Start")
if backgroundProcess.isCancelled():
self.statusbar.SetStatusText("Tree walk was cancelled")
else:
backgroundProcess.end = time.clock()
self.olv.SetObjects(backgroundProcess.stats)
self.statusbar.SetStatusText("%d directories scanned in %.2f seconds" %
(len(backgroundProcess.stats), backgroundProcess.end - backgroundProcess.start))
self.backgroundProcess = None
class DirectoryStats(object):
"""
"""
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.children = list()
self.countFiles = 0
self.sizeFiles = 0
if self.parent:
self.parent.children.append(self)
self.startScan = None
self.endScan = None
def GetName(self):
return self.name
def GetPath(self):
if self.parent:
return os.path.join(self.parent.GetPath(), self.name)
else:
return self.name
def SelfPlusAncestors(self):
"""
Return a collection containing this object plus all its ancestors
"""
if self.parent:
return self.parent.SelfPlusAncestors() + [self]
else:
return [self]
def CountAllDirectories(self):
"""
Return the total number of directories in this directory, recursively
"""
if self.children:
return len(self.children) + sum(x.CountAllDirectories() for x in self.children)
else:
return 0
def CountAllFiles(self):
"""
Return the total number of files in this directory, recursively
"""
if self.children:
return self.countFiles + sum(x.CountAllFiles() for x in self.children)
else:
return self.countFiles
def SizeAllFiles(self):
"""
Return the total number of byes of all files in this directory, recursively
"""
if self.children:
return self.sizeFiles + sum(x.SizeAllFiles() for x in self.children)
else:
return self.sizeFiles
def ElapsedScanTime(self):
"""
Return the number of seconds it took to scan just this directory (not its descendents)
"""
if self.endScan and self.startScan:
return self.endScan - self.startScan
else:
return 0
class BackgroundProcess(object):
"""
A BackgroundProcess is a long-running, cancellable thread that can
report progress and done events.
This object can be used by:
1) subclassing and overriding 'doWork' method
2) passing a callable as the "work" parameter to the constructor
"""
__author__ = "Phillip Piper"
__date__ = "19 April 2008"
__version__ = "0.1"
def __init__(self, work=None, progress=None, done=None):
"""
Initialize a background process.
Parameters:
work
A callable that accepts a single parameter: the process itself. This
callable executes the long running process. This should periodically check
to see if the process has been cancelled (using the isCancelled method),
as well as reporting its progress (using the notifyProgress method). If
this is None, the process will do nothing. Subclasses that override the
"doWork" method should not use this parameter
progress
A callable that accepts two parameters: the process itself, and a value
given to the notifyProgress method (often an int representing percentage done).
done
A callable that accepts a single parameter: the process itself. If not None,
this is called when the process finishes.
"""
self.thread = None
self.abortEvent = threading.Event()
self.workCallback = work
self.progressCallback = progress
self.doneCallback = done
#----------------------------------------------------------------------------
# Commands
def run(self):
"""
Run the process synchronously
"""
self.runAsync()
self.wait()
def runAsync(self):
"""
Start a process to run asynchronously
"""
if self.isRunning():
return
self.abortEvent.clear()
self.thread = threading.Thread(target=self._worker)
self.thread.setDaemon(True)
self.thread.start()
def wait(self):
"""
Wait until the process is finished
"""
self.thread.join()
def cancel(self):
"""
Cancel the process
"""
self.abortEvent.set()
def isCancelled(self):
"""
Has this process been cancelled?
"""
return self.abortEvent.isSet()
def isRunning(self):
"""
Return true if the process is still running
"""
return self.thread is not None and self.thread.isAlive()
#----------------------------------------------------------------------------
# Implementation
def _worker(self):
"""
This is the actual thread process
"""
self.doWork()
self.reportDone()
def doWork(self):
"""
Do the real work of the thread.
Subclasses should override this method to perform the long-running task.
That task should call "isCancelled" regularly and "reportProgress" periodically.
"""
if self.workCallback:
self.workCallback(self)
def reportProgress(self, value):
"""
Report that some progress has been made
"""
time.sleep(0.001) # Force a switch to other threads
if self.progressCallback and not self.isCancelled():
self.progressCallback(self, value)
def reportDone(self):
"""
Report that the thread has finished
"""
if self.doneCallback:
self.doneCallback(self)
if __name__ == '__main__':
#walker("c:\\temp")
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_1 = MyFrame(None, -1, "BatchedUpdate Example")
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop() | PypiClean |
/Axelrod-4.13.0.tar.gz/Axelrod-4.13.0/docs/how-to/contributing/strategy/instructions.rst | Instructions
============
Here is the file structure for the Axelrod repository::
.
├── axelrod
│ └── __init__.py
│ └── ecosystem.py
│ └── game.py
│ └── player.py
│ └── plot.py
│ └── result_set.py
│ └── round_robin.py
│ └── tournament.py
│ └── /strategies/
│ └── __init__.py
│ └── _strategies.py
│ └── cooperator.py
│ └── defector.py
│ └── grudger.py
│ └── titfortat.py
│ └── gobymajority.py
│ └── ...
│ └── /tests/
│ └── integration
│ └── strategies
│ └── unit
│ └── test_*.py
└── README.md
To contribute a strategy you need to follow as many of the following steps as possible:
1. Fork the `github repository <https://github.com/Axelrod-Python/Axelrod>`_.
2. Add a :code:`<strategy>.py` file to the strategies directory or add a
strategy to a pre existing :code:`<strategy>.py` file.
3. Update the :code:`./axelrod/strategies/_strategies.py` file.
4. If you created a new :code:`<strategy>.py` file add it to
:code:`.docs/reference/all_strategies.rst`.
5. Write some unit tests in the :code:`./axelrod/tests/strategies/` directory.
6. This one is also optional: ping us a message and we'll add you to the
Contributors team. This would add an Axelrod-Python organisation badge to
your profile.
7. Send us a pull request.
**If you would like a hand with any of the above please do get in touch: we're
always delighted to have new strategies.**
| PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/db/models/functions/text.py | from django.db.models.expressions import Func, Value
from django.db.models.fields import IntegerField
from django.db.models.functions import Coalesce
from django.db.models.lookups import Transform
class BytesToCharFieldConversionMixin:
"""
Convert CharField results from bytes to str.
MySQL returns long data types (bytes) instead of chars when it can't
determine the length of the result string. For example:
LPAD(column1, CHAR_LENGTH(column2), ' ')
returns the LONGTEXT (bytes) instead of VARCHAR.
"""
def convert_value(self, value, expression, connection):
if connection.features.db_functions_convert_bytes_to_str:
if self.output_field.get_internal_type() == 'CharField' and isinstance(value, bytes):
return value.decode()
return super().convert_value(value, expression, connection)
class Chr(Transform):
function = 'CHR'
lookup_name = 'chr'
def as_mysql(self, compiler, connection, **extra_context):
return super().as_sql(
compiler, connection, function='CHAR',
template='%(function)s(%(expressions)s USING utf16)',
**extra_context
)
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(
compiler, connection,
template='%(function)s(%(expressions)s USING NCHAR_CS)',
**extra_context
)
def as_sqlite(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='CHAR', **extra_context)
class ConcatPair(Func):
"""
Concatenate two arguments together. This is used by `Concat` because not
all backend databases support more than two arguments.
"""
function = 'CONCAT'
def as_sqlite(self, compiler, connection, **extra_context):
coalesced = self.coalesce()
return super(ConcatPair, coalesced).as_sql(
compiler, connection, template='%(expressions)s', arg_joiner=' || ',
**extra_context
)
def as_mysql(self, compiler, connection, **extra_context):
# Use CONCAT_WS with an empty separator so that NULLs are ignored.
return super().as_sql(
compiler, connection, function='CONCAT_WS',
template="%(function)s('', %(expressions)s)",
**extra_context
)
def coalesce(self):
# null on either side results in null for expression, wrap with coalesce
c = self.copy()
c.set_source_expressions([
Coalesce(expression, Value('')) for expression in c.get_source_expressions()
])
return c
class Concat(Func):
"""
Concatenate text fields together. Backends that result in an entire
null expression when any arguments are null will wrap each argument in
coalesce functions to ensure a non-null result.
"""
function = None
template = "%(expressions)s"
def __init__(self, *expressions, **extra):
if len(expressions) < 2:
raise ValueError('Concat must take at least two expressions')
paired = self._paired(expressions)
super().__init__(paired, **extra)
def _paired(self, expressions):
# wrap pairs of expressions in successive concat functions
# exp = [a, b, c, d]
# -> ConcatPair(a, ConcatPair(b, ConcatPair(c, d))))
if len(expressions) == 2:
return ConcatPair(*expressions)
return ConcatPair(expressions[0], self._paired(expressions[1:]))
class Left(Func):
function = 'LEFT'
arity = 2
def __init__(self, expression, length, **extra):
"""
expression: the name of a field, or an expression returning a string
length: the number of characters to return from the start of the string
"""
if not hasattr(length, 'resolve_expression'):
if length < 1:
raise ValueError("'length' must be greater than 0.")
super().__init__(expression, length, **extra)
def get_substr(self):
return Substr(self.source_expressions[0], Value(1), self.source_expressions[1])
def as_oracle(self, compiler, connection, **extra_context):
return self.get_substr().as_oracle(compiler, connection, **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
return self.get_substr().as_sqlite(compiler, connection, **extra_context)
class Length(Transform):
"""Return the number of characters in the expression."""
function = 'LENGTH'
lookup_name = 'length'
output_field = IntegerField()
def as_mysql(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='CHAR_LENGTH', **extra_context)
class Lower(Transform):
function = 'LOWER'
lookup_name = 'lower'
class LPad(BytesToCharFieldConversionMixin, Func):
function = 'LPAD'
def __init__(self, expression, length, fill_text=Value(' '), **extra):
if not hasattr(length, 'resolve_expression') and length is not None and length < 0:
raise ValueError("'length' must be greater or equal to 0.")
super().__init__(expression, length, fill_text, **extra)
class LTrim(Transform):
function = 'LTRIM'
lookup_name = 'ltrim'
class Ord(Transform):
function = 'ASCII'
lookup_name = 'ord'
output_field = IntegerField()
def as_mysql(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='ORD', **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='UNICODE', **extra_context)
class Repeat(BytesToCharFieldConversionMixin, Func):
function = 'REPEAT'
def __init__(self, expression, number, **extra):
if not hasattr(number, 'resolve_expression') and number is not None and number < 0:
raise ValueError("'number' must be greater or equal to 0.")
super().__init__(expression, number, **extra)
def as_oracle(self, compiler, connection, **extra_context):
expression, number = self.source_expressions
length = None if number is None else Length(expression) * number
rpad = RPad(expression, length, expression)
return rpad.as_sql(compiler, connection, **extra_context)
class Replace(Func):
function = 'REPLACE'
def __init__(self, expression, text, replacement=Value(''), **extra):
super().__init__(expression, text, replacement, **extra)
class Reverse(Transform):
function = 'REVERSE'
lookup_name = 'reverse'
def as_oracle(self, compiler, connection, **extra_context):
# REVERSE in Oracle is undocumented and doesn't support multi-byte
# strings. Use a special subquery instead.
return super().as_sql(
compiler, connection,
template=(
'(SELECT LISTAGG(s) WITHIN GROUP (ORDER BY n DESC) FROM '
'(SELECT LEVEL n, SUBSTR(%(expressions)s, LEVEL, 1) s '
'FROM DUAL CONNECT BY LEVEL <= LENGTH(%(expressions)s)) '
'GROUP BY %(expressions)s)'
),
**extra_context
)
class Right(Left):
function = 'RIGHT'
def get_substr(self):
return Substr(self.source_expressions[0], self.source_expressions[1] * Value(-1))
class RPad(LPad):
function = 'RPAD'
class RTrim(Transform):
function = 'RTRIM'
lookup_name = 'rtrim'
class StrIndex(Func):
"""
Return a positive integer corresponding to the 1-indexed position of the
first occurrence of a substring inside another string, or 0 if the
substring is not found.
"""
function = 'INSTR'
arity = 2
output_field = IntegerField()
def as_postgresql(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='STRPOS', **extra_context)
class Substr(Func):
function = 'SUBSTRING'
def __init__(self, expression, pos, length=None, **extra):
"""
expression: the name of a field, or an expression returning a string
pos: an integer > 0, or an expression returning an integer
length: an optional number of characters to return
"""
if not hasattr(pos, 'resolve_expression'):
if pos < 1:
raise ValueError("'pos' must be greater than 0")
expressions = [expression, pos]
if length is not None:
expressions.append(length)
super().__init__(*expressions, **extra)
def as_sqlite(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='SUBSTR', **extra_context)
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='SUBSTR', **extra_context)
class Trim(Transform):
function = 'TRIM'
lookup_name = 'trim'
class Upper(Transform):
function = 'UPPER'
lookup_name = 'upper' | PypiClean |
/NeodroidVision-0.3.0-py36-none-any.whl/neodroidvision/utilities/skimage_utilities/line_peaks.py |
__author__ = "heider"
__doc__ = r"""
Created on 5/5/22
"""
from pathlib import Path
import numpy
from matplotlib import cm, pyplot
from skimage import color, io
from skimage.draw import line
from skimage.transform import hough_line, hough_line_peaks
if __name__ == "__main__":
def aushd():
"""description"""
file = "3.jpg"
# file = "NdNLO.jpg"
# image = cv2.imread(str(Path.home() / "Pictures" / file))
# Constructing test image
image = color.rgb2gray(io.imread(str(Path.home() / "Pictures" / file)))
# Classic straight-line Hough transform
# Set a precision of 0.05 degree.
tested_angles = numpy.linspace(-numpy.pi / 2, numpy.pi / 2, 3600)
h, theta, d = hough_line(image, theta=tested_angles)
hpeaks = hough_line_peaks(h, theta, d, threshold=0.2 * h.max())
fig, ax = pyplot.subplots()
ax.imshow(image, cmap=cm.gray)
for _, angle, dist in zip(*hpeaks):
(x0, y0) = dist * numpy.array([numpy.cos(angle), numpy.sin(angle)])
ax.axline((x0, y0), slope=numpy.tan(angle + numpy.pi / 2))
pyplot.show()
def auishd():
"""description"""
# Constructing test image
image = numpy.zeros((200, 200))
idx = numpy.arange(25, 175)
image[idx, idx] = 255
image[line(45, 25, 25, 175)] = 255
image[line(25, 135, 175, 155)] = 255
# Classic straight-line Hough transform
# Set a precision of 0.5 degree.
tested_angles = numpy.linspace(-numpy.pi / 2, numpy.pi / 2, 360, endpoint=False)
h, theta, d = hough_line(image, theta=tested_angles)
# Generating figure 1
fig, axes = pyplot.subplots(1, 3, figsize=(15, 6))
ax = axes.ravel()
ax[0].imshow(image, cmap=cm.gray)
ax[0].set_title("Input image")
ax[0].set_axis_off()
angle_step = 0.5 * numpy.diff(theta).mean()
d_step = 0.5 * numpy.diff(d).mean()
bounds = [
numpy.rad2deg(theta[0] - angle_step),
numpy.rad2deg(theta[-1] + angle_step),
d[-1] + d_step,
d[0] - d_step,
]
ax[1].imshow(numpy.log(1 + h), extent=bounds, cmap=cm.gray, aspect=1 / 1.5)
ax[1].set_title("Hough transform")
ax[1].set_xlabel("Angles (degrees)")
ax[1].set_ylabel("Distance (pixels)")
ax[1].axis("image")
ax[2].imshow(image, cmap=cm.gray)
ax[2].set_ylim((image.shape[0], 0))
ax[2].set_axis_off()
ax[2].set_title("Detected lines")
for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):
(x0, y0) = dist * numpy.array([numpy.cos(angle), numpy.sin(angle)])
ax[2].axline((x0, y0), slope=numpy.tan(angle + numpy.pi / 2))
pyplot.tight_layout()
pyplot.show()
auishd() | PypiClean |
/DjangoDav-0.0.1b16.tar.gz/DjangoDav-0.0.1b16/djangodav/utils.py |
import datetime, time, calendar
from wsgiref.handlers import format_date_time
from django.utils.feedgenerator import rfc2822_date
try:
from email.utils import parsedate_tz
except ImportError:
from email.Utils import parsedate_tz
import lxml.builder as lb
# Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
FORMAT_RFC_822 = '%a, %d %b %Y %H:%M:%S GMT'
# Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
FORMAT_RFC_850 = '%A %d-%b-%y %H:%M:%S GMT'
# Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
FORMAT_ASC = '%a %b %d %H:%M:%S %Y'
WEBDAV_NS = "DAV:"
WEBDAV_NSMAP = {'D': WEBDAV_NS}
D = lb.ElementMaker(namespace=WEBDAV_NS, nsmap=WEBDAV_NSMAP)
def get_property_tag_list(res, *names):
props = []
for name in names:
tag = get_property_tag(res, name)
if tag is None:
continue
props.append(tag)
return props
def get_property_tag(res, name):
if name == 'resourcetype':
if res.is_collection:
return D(name, D.collection)
return D(name)
try:
if hasattr(res, name):
return D(name, unicode(getattr(res, name)))
except AttributeError:
return
def safe_join(root, *paths):
"""The provided os.path.join() does not work as desired. Any path starting with /
will simply be returned rather than actually being joined with the other elements."""
if not root.startswith('/'):
root = '/' + root
for path in paths:
while root.endswith('/'):
root = root[:-1]
while path.startswith('/'):
path = path[1:]
root += '/' + path
return root
def url_join(base, *paths):
"""Assuming base is the scheme and host (and perhaps path) we will join the remaining
path elements to it."""
paths = safe_join(*paths) if paths else ""
while base.endswith('/'):
base = base[:-1]
return base + paths
def ns_split(tag):
"""Splits the namespace and property name from a clark notation property name."""
if tag.startswith("{") and "}" in tag:
ns, name = tag.split("}", 1)
return ns[1:-1], name
return "", tag
def ns_join(ns, name):
"""Joins a namespace and property name into clark notation."""
return '{%s:}%s' % (ns, name)
def rfc3339_date(dt):
if not dt:
return ''
return dt.strftime('%Y-%m-%dT%H:%M:%SZ')
def rfc1123_date(dt):
if not dt:
return ''
return rfc2822_date(dt)
def parse_time(timestring):
value = None
for fmt in (FORMAT_RFC_822, FORMAT_RFC_850, FORMAT_ASC):
try:
value = time.strptime(timestring, fmt)
except ValueError:
pass
if value is None:
try:
# Sun Nov 6 08:49:37 1994 +0100 ; ANSI C's asctime() format with timezone
value = parsedate_tz(timestring)
except ValueError:
pass
if value is None:
return
return calendar.timegm(value) | PypiClean |
/GNN4LP-0.1.0-py3-none-any.whl/src/graph_att_gan/train.py | import scipy.sparse as sp
import numpy as np
import torch
import time
import os
from configparser import ConfigParser
import sys
sys.path.append('/home/shiyan/project/gnn4lp/')
from src.util.load_data import load_data_with_features, load_data_without_features, sparse_to_tuple, mask_test_edges, preprocess_graph
from src.util.loss import arga_loss_function, varga_loss_function
from src.util.metrics import get_roc_score
from src.util import define_optimizer
from src.graph_att_gan.model import GATModelGAN
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Train():
def __init__(self):
pass
def train_model(self, config_path):
if os.path.exists(config_path) and (os.path.split(config_path)[1].split('.')[0] == 'config') and (
os.path.splitext(config_path)[1].split('.')[1] == 'cfg'):
# load config file
config = ConfigParser()
config.read(config_path)
section = config.sections()[0]
# data catalog path
data_catalog = config.get(section, "data_catalog")
# node cites path
node_cites_path = config.get(section, "node_cites_path")
node_cites_path = os.path.join(data_catalog, node_cites_path)
# node features path
node_features_path = config.get(section, 'node_features_path')
node_features_path = os.path.join(data_catalog, node_features_path)
# model save/load path
model_path = config.get(section, "model_path")
# model param config
with_feats = config.getboolean(section, 'with_feats') # 是否带有节点特征
hidden_dim1 = config.getint(section, "hidden_dim1")
hidden_dim2 = config.getint(section, "hidden_dim2")
hidden_dim3 = config.getint(section, 'hidden_dim3')
dropout = config.getfloat(section, "dropout")
vae_bool = config.getboolean(section, 'vae_bool')
alpha = config.getfloat(section, 'alpha')
lr = config.getfloat(section, "lr")
lr_decay = config.getfloat(section, 'lr_decay')
weight_decay = config.getfloat(section, "weight_decay")
gamma = config.getfloat(section, "gamma")
momentum = config.getfloat(section, "momentum")
eps = config.getfloat(section, "eps")
clip = config.getfloat(section, "clip")
epochs = config.getint(section, "epochs")
optimizer_name = config.get(section, "optimizer")
if with_feats:
# 加载带节点特征的数据集
adj, features = load_data_with_features(node_cites_path, node_features_path)
else:
# 加载不带节点特征的数据集
adj = load_data_without_features(node_cites_path)
features = sp.identity(adj.shape[0])
num_nodes = adj.shape[0]
num_edges = adj.sum()
features = sparse_to_tuple(features)
num_features = features[2][1]
# 去除对角线元素
# 下边的右部分为:返回adj_orig的对角元素(一维),并增加一维,抽出adj_orig的对角元素并构建只有这些对角元素的对角矩阵
adj_orig = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj_orig.eliminate_zeros()
adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj_orig)
adj = adj_train
# 返回D^{-0.5}SD^{-0.5}的coords, data, shape,其中S=A+I
adj_norm = preprocess_graph(adj)
adj_label = adj_train + sp.eye(adj_train.shape[0])
# adj_label = sparse_to_tuple(adj_label)
adj_label = torch.FloatTensor(adj_label.toarray()).to(DEVICE)
'''
注意,adj的每个元素非1即0。pos_weight是用于训练的邻接矩阵中负样本边(既不存在的边)和正样本边的倍数(即比值),这个数值在二分类交叉熵损失函数中用到,
如果正样本边所占的比例和负样本边所占比例失衡,比如正样本边很多,负样本边很少,那么在求loss的时候可以提供weight参数,将正样本边的weight设置小一点,负样本边的weight设置大一点,
此时能够很好的平衡两类在loss中的占比,任务效果可以得到进一步提升。参考:https://www.zhihu.com/question/383567632
负样本边的weight都为1,正样本边的weight都为pos_weight
'''
pos_weight = float(adj.shape[0] * adj.shape[0] - num_edges) / num_edges
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)
# create model
print('create model ...')
model = GATModelGAN(num_features, hidden_dim1=hidden_dim1, hidden_dim2=hidden_dim2, hidden_dim3=hidden_dim3, dropout=dropout, alpha=alpha, vae_bool=vae_bool)
# define optimizer
if optimizer_name == 'adam':
optimizer = define_optimizer.define_optimizer_adam(model, lr=lr, weight_decay=weight_decay)
elif optimizer_name == 'adamw':
optimizer = define_optimizer.define_optimizer_adamw(model, lr=lr, weight_decay=weight_decay)
elif optimizer_name == 'sgd':
optimizer = define_optimizer.define_optimizer_sgd(model, lr=lr, momentum=momentum,
weight_decay=weight_decay)
elif optimizer_name == 'adagrad':
optimizer = define_optimizer.define_optimizer_adagrad(model, lr=lr, lr_decay=lr_decay,
weight_decay=weight_decay)
elif optimizer_name == 'rmsprop':
optimizer = define_optimizer.define_optimizer_rmsprop(model, lr=lr, weight_decay=weight_decay,
momentum=momentum)
elif optimizer_name == 'adadelta':
optimizer = define_optimizer.define_optimizer_adadelta(model, lr=lr, weight_decay=weight_decay)
else:
raise NameError('No define optimization function name!')
model = model.to(DEVICE)
# 稀疏张量被表示为一对致密张量:一维张量和二维张量的索引。可以通过提供这两个张量来构造稀疏张量
adj_norm = torch.sparse.FloatTensor(torch.LongTensor(adj_norm[0].T),
torch.FloatTensor(adj_norm[1]),
torch.Size(adj_norm[2]))
features = torch.sparse.FloatTensor(torch.LongTensor(features[0].T),
torch.FloatTensor(features[1]),
torch.Size(features[2])).to_dense()
adj_norm = adj_norm.to(DEVICE)
features = features.to(DEVICE)
norm = torch.FloatTensor(np.array(norm)).to(DEVICE)
pos_weight = torch.tensor(pos_weight).to(DEVICE)
num_nodes = torch.tensor(num_nodes).to(DEVICE)
print('start training...')
best_valid_roc_score = float('-inf')
hidden_emb = None
model.train()
for epoch in range(epochs):
t = time.time()
optimizer.zero_grad()
# 解码后的邻接矩阵,判别器
recovered, dis_real, dis_fake, mu, logvar = model(features, adj_norm)
if vae_bool:
loss = varga_loss_function(preds=recovered, labels=adj_label,
mu=mu, logvar=logvar,
dis_real=dis_real, dis_fake=dis_fake,
n_nodes=num_nodes,
norm=norm, pos_weight=pos_weight)
else:
loss = arga_loss_function(preds=recovered, labels=adj_label,
dis_real=dis_real, dis_fake=dis_fake,
norm=norm, pos_weight=pos_weight)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
cur_loss = loss.item()
optimizer.step()
hidden_emb = mu.data.cpu().numpy()
# 评估验证集,val set
roc_score, ap_score = get_roc_score(hidden_emb, adj_orig, val_edges, val_edges_false)
# 保存最好的roc score
if roc_score > best_valid_roc_score:
best_valid_roc_score = roc_score
# 不需要保存整个model,只需保存hidden_emb,因为后面的解码是用hidden_emb内积的形式作推断
np.save(model_path, hidden_emb)
print("Epoch:", '%04d' % (epoch + 1), "train_loss = ", "{:.5f}".format(cur_loss),
"val_roc_score = ", "{:.5f}".format(roc_score),
"average_precision_score = ", "{:.5f}".format(ap_score),
"time=", "{:.5f}".format(time.time() - t)
)
print("Optimization Finished!")
# 评估测试集,test set
roc_score, ap_score = get_roc_score(hidden_emb, adj_orig, test_edges, test_edges_false)
print('test roc score: {}'.format(roc_score))
print('test ap score: {}'.format(ap_score))
else:
raise FileNotFoundError('File config.cfg not found : ' + config_path)
if __name__ == '__main__':
config_path = os.path.join(os.getcwd(), 'config.cfg')
train = Train()
train.train_model(config_path) | PypiClean |
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_rnn_mnist.py | from __future__ import print_function
import numpy as np
np.random.seed(1337)
# 生产环境
# from aa_deep_learning.aadeeplearning.aadeeplearning_old import AADeepLearning
from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import mnist
from aa_deep_learning.AADeepLearning.datasets import np_utils
# 测试环境
# from aadeeplearning import aadeeplearning as aa
# 10分类
nb_classes = 10
# keras中的mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,按以下格式调用即可
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_test = x_test[:64]
y_test = y_test[:64]
# print(x_test[0])
# 画出minist 数字
# import matplotlib.pyplot as plt
# fig = plt.figure()
# plt.imshow(x_test[0],cmap = 'binary')#黑白显示
# plt.show()
# 后端使用tensorflow时,即tf模式下,
# 会将100张RGB三通道的16*32彩色图表示为(100,16,32,3),
# 第一个维度是样本维,表示样本的数目,
# 第二和第三个维度是高和宽,
# 最后一个维度是通道维,表示颜色通道数
# x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
# x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
# input_shape = (img_rows, img_cols, 1)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 将类别向量(从0到nb_classes的整数向量)映射为二值类别矩阵,
# 相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
# 打印出相关信息
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print('x_test shape:', x_test.shape)
print('y_test shape:', y_test.shape)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 学习率衰减: 通常设置为 0.99
"learning_rate_decay": 0.9999,
# 优化策略: sgd/momentum/rmsprop
"optimizer": "momentum",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.95,
# rmsprop优化器的衰减系数
"rmsprop_decay": 0.95,
# 正则化系数
"reg_coefficient": 0,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 预训练参数模型所在路径
"pre_train_model": "./iter5.gordonmodel"
}
net = [
{
# 层名
"name": "rnn_1",
# 层类型
"type": "rnn",
# 神经元个数
"neurons_number": 60,
# 权重初始化方式 msra/xavier/gaussian/xavier
"weight_init": "xavier"
},
{
# 层名
"name": "relu_1",
# 层类型
"type": "relu"
},
{
# 层名
"name": "fully_connected_1",
# 层类型
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian/xaver
"weight_init": "msra"
},
{
# 层名
"name": "softmax",
# 层类型
"type": "softmax"
}
]
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:",accuracy)
"""
# 输出训练好的模型在测试集上的表现
print('Test score:', score[0])
print('Test accuracy:', score[1])
# Test score: 0.032927570413
# Test accuracy: 0.9892
""" | PypiClean |
/Bottlechest-0.7.1-cp34-cp34m-macosx_10_9_x86_64.whl/bottlechest/src/template/func/nanequal.w3d.py | "nanequal template"
from copy import deepcopy
import bottlechest as bn
__all__ = ["nanequal"]
FLOAT_DTYPES = [x for x in bn.dtypes if 'float' in x]
INT_DTYPES = [x for x in bn.dtypes if 'int' in x]
# Float dtypes (not axis=None) ----------------------------------------------
floats = {}
floats['dtypes'] = FLOAT_DTYPES
floats['axisNone'] = False
floats['force_output_dtype'] = 'bool'
floats['reuse_non_nan_func'] = False
floats['top'] = """
@cython.boundscheck(False)
@cython.wraparound(False)
def NAME_NDIMd_DTYPE_axisAXIS(np.ndarray[np.DTYPE_t, ndim=NDIM] a,
np.ndarray[np.DTYPE_t, ndim=NDIM] b):
"Check whether two arrays are equal, ignoring NaNs, in NDIMd array with dtype=DTYPE along axis=AXIS."
cdef int f = 1
cdef np.DTYPE_t ai
"""
loop = {}
loop[2] = """\
for iINDEX0 in range(nINDEX0):
f = 1
for iINDEX1 in range(nINDEX1):
ai = a[INDEXALL]
bi = b[INDEXALL]
if ai != bi and ai == ai and bi == bi:
y[INDEXPOP] = 0
f = 0
break
if f == 1:
y[INDEXPOP] = 1
return y
"""
loop[3] = """\
for iINDEX0 in range(nINDEX0):
for iINDEX1 in range(nINDEX1):
f = 1
for iINDEX2 in range(nINDEX2):
ai = a[INDEXALL]
bi = b[INDEXALL]
if ai != bi and ai == ai and bi == bi:
y[INDEXPOP] = 0
f = 0
break
if f == 1:
y[INDEXPOP] = 1
return y
"""
floats['loop'] = loop
# Float dtypes (axis=None) --------------------------------------------------
floats_None = deepcopy(floats)
floats_None['axisNone'] = True
loop = {}
loop[1] = """\
for iINDEX0 in range(nINDEX0):
ai = a[INDEXALL]
bi = b[INDEXALL]
if ai != bi and ai == ai and bi == bi:
return np.bool_(False)
return np.bool_(True)
"""
loop[2] = """\
for iINDEX0 in range(nINDEX0):
for iINDEX1 in range(nINDEX1):
ai = a[INDEXALL]
bi = b[INDEXALL]
if ai != bi and ai == ai and bi == bi:
return np.bool_(False)
return np.bool_(True)
"""
loop[3] = """\
for iINDEX0 in range(nINDEX0):
for iINDEX1 in range(nINDEX1):
for iINDEX2 in range(nINDEX2):
ai = a[INDEXALL]
bi = b[INDEXALL]
if ai != bi and ai == ai and bi == bi:
return np.bool_(False)
return np.bool_(True)
"""
floats_None['loop'] = loop
# Int dtypes (not axis=None) ------------------------------------------------
ints = deepcopy(floats)
ints['dtypes'] = INT_DTYPES
loop = {}
loop[2] = """\
for iINDEX0 in range(nINDEX0):
f = 1
for iINDEX1 in range(nINDEX1):
ai = a[INDEXALL]
bi = b[INDEXALL]
if ai != bi:
y[INDEXPOP] = 0
f = 0
break
if f == 1:
y[INDEXPOP] = 1
return y
"""
loop[3] = """\
for iINDEX0 in range(nINDEX0):
for iINDEX1 in range(nINDEX1):
f = 1
for iINDEX2 in range(nINDEX2):
ai = a[INDEXALL]
bi = b[INDEXALL]
if ai != bi:
y[INDEXPOP] = 0
f = 0
break
if f == 1:
y[INDEXPOP] = 1
return y
"""
ints['loop'] = loop
# Slow, unaccelerated ndim/dtype --------------------------------------------
slow = {}
slow['name'] = "nanequal"
slow['signature'] = "arr1, arr2"
slow['func'] = "bn.slow.nanequal(arr1, arr2, axis=AXIS)"
# Template ------------------------------------------------------------------
nanequal = {}
nanequal['name'] = 'nanequal'
nanequal['is_reducing_function'] = True
nanequal['cdef_output'] = True
nanequal['slow'] = slow
nanequal['templates'] = {}
nanequal['templates']['float'] = floats
nanequal['templates']['float_None'] = floats_None
nanequal['templates']['int'] = ints
nanequal['pyx_file'] = 'func/%sbit/nanequal.pyx'
nanequal['main'] = '''"nanequal auto-generated from template"
def nanequal(arr1, arr2, axis=None):
"""
Test whether two array are equal along a given axis, ignoring NaNs.
Returns single boolean unless `axis` is not ``None``.
Parameters
----------
arr1 : array_like
First input array. If `arr` is not an array, a conversion is attempted.
arr2 : array_like
Second input array
axis : {int, None}, optional
Axis along which arrays are compared. The default (`axis` = ``None``)
is to compare flattened arrays. `axis` may be
negative, in which case it counts from the last to the first axis.
Returns
-------
y : bool or ndarray
A new boolean or `ndarray` is returned.
See also
--------
bottlechest.nancmp: Compare two arrays, ignoring NaNs
Examples -- TODO: PROVIDE EXAMPLES!
--------
>>> bn.nanequal(1)
False
>>> bn.nanequal(np.nan)
True
>>> bn.nanequal([1, np.nan])
True
>>> a = np.array([[1, 4], [1, np.nan]])
>>> bn.nanequal(a)
True
>>> bn.nanequal(a, axis=0)
array([False, True], dtype=bool)
"""
func, arr1, arr2 = nanequal_selector(arr1, arr2, axis)
return func(arr1, arr2)
def nanequal_selector(arr1, arr2, axis):
"""
Return nanequal function and array that matches `arr` and `axis`.
Under the hood Bottleneck uses a separate Cython function for each
combination of ndim, dtype, and axis. A lot of the overhead in bn.nanequal()
is in checking that `axis` is within range, converting `arr` into an
array (if it is not already an array), and selecting the function to use.
You can get rid of the overhead by doing all this before you, for example,
enter an inner loop, by using the this function.
Parameters
----------
arr1 : array_like
First input array. If `arr` is not an array, a conversion is attempted.
arr2 : array_like
Second input array
axis : {int, None}, optional
Axis along which arrays are compared. The default (`axis` = ``None``)
is to compare flattened arrays. `axis` may be
negative, in which case it counts from the last to the first axis.
Returns
-------
func : function
The nanequal function that matches the number of dimensions and
dtype of the input array and the axis.
a1 : ndarray
If the input array `arr1` is not a ndarray, then `a1` will contain the
result of converting `arr1` into a ndarray.
a2 : ndarray
Equivalent for arr2.
Examples TODO: PROVIDE EXAMPLES
--------
Create a numpy array:
>>> arr = np.array([1.0, 2.0, 3.0])
Obtain the function needed to determine if there are any NaN in `arr`:
>>> func, a = bn.func.nanequal_selector(arr, axis=0)
>>> func
<function nanequal_1d_float64_axisNone>
Use the returned function and array to determine if there are any
NaNs:
>>> func(a)
False
"""
cdef np.ndarray a1, a2
if type(arr1) is np.ndarray:
a1 = arr1
else:
a1 = np.array(arr1, copy=False)
if type(arr2) is np.ndarray:
a2 = arr2
else:
a2 = np.array(arr2, copy=False)
cdef int ndim = PyArray_NDIM(a1)
cdef int ndim2 = PyArray_NDIM(a1)
if ndim != ndim2:
raise ValueError("arrays have different dimensions, %i != %i" %
(ndim, ndim2))
cdef int dtype = PyArray_TYPE(a1)
cdef np.npy_intp *dim1, *dim2
cdef int i
cdef tuple key = (ndim, dtype, axis)
if dtype == PyArray_TYPE(a2):
dim1 = PyArray_DIMS(a1)
dim2 = PyArray_DIMS(a2)
for i in range(ndim):
if dim1[i] != dim2[i]:
raise ValueError("shape mismatch");
if (axis is not None) and (axis < 0):
axis += ndim
try:
func = nanequal_dict[key]
return func, a1, a2
except KeyError:
pass
if axis is not None:
if (axis < 0) or (axis >= ndim):
raise ValueError("axis(=%d) out of bounds" % axis)
try:
func = nanequal_slow_dict[axis]
except KeyError:
tup = (str(ndim), str(a1.dtype), str(axis))
raise TypeError("Unsupported ndim/dtype/axis (%s/%s/%s)." % tup)
return func, a1, a2
''' | PypiClean |
/DendroPy-4.6.1.tar.gz/DendroPy-4.6.1/src/dendropy/legacy/seqsim.py |
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
DEPRECATED IN DENDROPY 4: USE `dendropy.simulate.charsim`.
"""
import dendropy
from dendropy.simulate import charsim
from dendropy.utility import deprecate
def generate_hky_dataset(seq_len,
tree_model,
mutation_rate=1.0,
kappa=1.0,
base_freqs=[0.25, 0.25, 0.25, 0.25],
root_states=None,
dataset=None,
rng=None):
deprecate.dendropy_deprecation_warning(
preamble="Deprecated since DendroPy 4: The 'dendropy.seqsim.generate_hky_dataset()' function has been replaced with 'dendropy.simulate.charsim.hky85_chars()'.",
old_construct="from dendropy import seqsim\ndataset = seqsim.generate_hky_dataset(...)",
new_construct="import dendropy\nfrom dendropy.simulate import charsim\ndataset = dendropy.DataSet()\nchar_matrix = charsim.hky85_chars(...)\ndataset.add_char_matrix(char_matrix)")
if dataset is None:
dataset = dendropy.DataSet()
char_matrix = dataset.new_char_matrix(char_matrix_type="dna", taxon_namespace=tree_model.taxon_namespace)
charsim.hky85_chars(
seq_len=seq_len,
tree_model=tree_model,
mutation_rate=mutation_rate,
kappa=kappa,
base_freqs=base_freqs,
root_states=root_states,
char_matrix=char_matrix,
rng=rng)
return dataset
def generate_hky_characters(seq_len,
tree_model,
mutation_rate=1.0,
kappa=1.0,
base_freqs=[0.25, 0.25, 0.25, 0.25],
root_states=None,
char_matrix=None,
rng=None):
deprecate.dendropy_deprecation_warning(
preamble="Deprecated since DendroPy 4: The 'dendropy.seqsim.generate_hky_characters()' function has been replaced with 'dendropy.simulate.charsim.hky85_chars()'.",
old_construct="from dendropy import seqsim\nchar_matrix = seqsim.generate_hky_characters(...)",
new_construct="from dendropy.simulate import charsim\nchar_matrix = charsim.hky85_chars(...)")
return charsim.hky85_chars(
seq_len=seq_len,
tree_model=tree_model,
mutation_rate=mutation_rate,
kappa=kappa,
base_freqs=base_freqs,
root_states=root_states,
char_matrix=char_matrix,
rng=rng)
# def generate_dataset(seq_len,
# tree_model,
# seq_model,
# mutation_rate=1.0,
# root_states=None,
# dataset=None,
# rng=None):
# deprecate.dendropy_deprecation_warning(
# preamble="Deprecated since DendroPy 4: The 'dendropy.seqsim.generate_hky_characters()' function has been replaced with 'dendropy.simulate.charsim.hky85_chars()'.",
# old_construct="from dendropy import seqsim\nchar_matrix = seqsim.generate_hky_characters(...)",
# new_construct="from dendropy.simulate import charsim\nchar_matrix = discrete.hky85_chars(...)")
# def generate_char_matrix(seq_len,
# tree_model,
# seq_model,
# mutation_rate=1.0,
# root_states=None,
# char_matrix=None,
# rng=None):
# pass
# class SeqEvolver(object):
# def __init__(self,
# seq_model=None,
# mutation_rate=None,
# seq_attr='sequences',
# seq_model_attr="seq_model",
# edge_length_attr="length",
# edge_rate_attr="mutation_rate",
# seq_label_attr='taxon'):
# pass | PypiClean |
/GaitAnalysisToolKit-0.2.0.tar.gz/GaitAnalysisToolKit-0.2.0/gaitanalysis/markers.py |
# builtin
from distutils.version import LooseVersion
# external
import numpy as np
from numpy.core.umath_tests import matrix_multiply
def det3(ar):
"""Returns the determinants of an array of 3 x 3 matrices.
Parameters
----------
ar : array_like, shape(n, 3, 3)
A array of 3 x 3 arrays.
Returns
-------
tot : ndarray, shape(n, )
An array of determinants.
Notes
-----
This is extremely faster than calling numpy.linalg.det for 3 x 3
matrices and is adopted from:
http://mail.scipy.org/pipermail/numpy-discussion/2008-August/036928.html
"""
a = ar[..., 0, 0]; b = ar[..., 0, 1]; c = ar[..., 0, 2]
d = ar[..., 1, 0]; e = ar[..., 1, 1]; f = ar[..., 1, 2]
g = ar[..., 2, 0]; h = ar[..., 2, 1]; i = ar[..., 2, 2]
t = a.copy(); t *= e; t *= i; tot = t
t = b.copy(); t *= f; t *= g; tot += t
t = c.copy(); t *= d; t *= h; tot += t
t = g.copy(); t *= e; t *= c; tot -= t
t = h.copy(); t *= f; t *= a; tot -= t
t = i.copy(); t *= d; t *= b; tot -= t
return tot
def soederkvist(first_positions, second_positions):
"""Returns the rotation matrix and translation vector that relates two
sets of markers in 3D space that are assumed to be attached to the same
rigid body in two different positions and orientations given noisy
measurements of the marker sets' global positions.
Parameters
----------
first_positions : array_like, shape(n, m, 3) or shape(1, m, 3)
The x, y, and z coordinates of m markers in n positions in a global
reference frame.
second_positions : array_like, shape(n, m, 3)
The x, y, and z coordinates of the same m markers in n positions in
a global reference frame.
Returns
-------
rotation : ndarray, shape(n, 3, 3)
These rotation matrices is defined such that v1 = R * v2 where v1 is
the vector, v, expressed in a reference frame associated with the
first position and v2 is the same vector expressed in a reference
frame associated with the second position.
translation : ndarray, shape(n, 3)
The translation vector from the first position to the second
position expressed in the same frame as the x and y values.
Notes
-----
The relationship between x, y, R and d is defined as:
yi = R * xi + d
This alogrithm is explicitly taken from:
I. Soederkvist and P.A. Wedin (1993) Determining the movement of the
skeleton using well-configured markers. J. Biomech. 26:1473-1477.
But the same algorithm is described in:
J.H. Challis (1995) A prodecure for determining rigid body transformation
parameters, J. Biomech. 28, 733-737.
with the latter also includes possibilities for scaling, reflection, and
weighting of marker data.
"""
num_frames, num_markers, num_coordinates = first_positions.shape
# TODO : This may be an uneccesary memory increase and broadcasting may
# deal with this properly without having to do this explicitly.
if num_frames == 1:
first_positions = np.repeat(first_positions,
second_positions.shape[0], 0)
num_frames = first_positions.shape[0]
if num_markers != first_positions.shape[1]:
raise ValueError('The first and second positions must have the ' +
'same number of markers.')
if num_coordinates != 3 or second_positions.shape[2] != 3:
raise ValueError('You must have three coordinates for each marker.')
if num_frames != second_positions.shape[0]:
raise ValueError('The first and second positions must have the ' +
'same number of frames.')
# This is the mean location of the markers at each position.
# n x 3
mx = first_positions.mean(1)
my = second_positions.mean(1)
# Subtract the mean location of the markers to remove the translation
# and leave the markers that have only been rotated with respect to one
# another (about their mean).
# n x m x 3 = n x m x 3 - n x 1 x 3
A = first_positions - np.expand_dims(mx, 1)
B = second_positions - np.expand_dims(my, 1)
# n x 3 x m
B_T = B.transpose((0, 2, 1))
# n x 3 x 3 = n x 3 x m * n x m x 3
C = matrix_multiply(B_T, A)
# TODO : The svd of a 3 x 3 may involve simple math and it would be more
# efficient to hard code it like the `det3` function for determinants.
# Note that svd in NumPy svd returns the transpose of Q as compared to
# Matlab/Octave.
# n x 3 x 3, n x 3, n x 3 x 3 = svd(n x 3 x 3)
if LooseVersion(np.__version__) < LooseVersion('1.8.0'):
P = np.zeros_like(C)
Q = np.zeros_like(C)
for i, c in enumerate(C):
P[i], T, Q[i] = np.linalg.svd(c)
else:
P, T, Q = np.linalg.svd(C)
# n x 3 x 3 = n x 3 x 3 * n x 3 x 3
rotations = matrix_multiply(P, Q)
# n determinants
det_P_Q = det3(rotations)
# I think this construction of an identity matrix is here because the
# determinants can sometimes be -1 instead of 1. This may represent a
# reflection and plugging in the determinant deals with that. If the
# determinants are all positive 1's then we can skip this operation.
if (np.abs(det_P_Q - 1.0) < 1e-16).all():
# n x 3 x 3
I = np.zeros((num_frames, 3, 3))
I[:, 0, 0] = 1.0
I[:, 1, 1] = 1.0
I[:, 2, 2] = det_P_Q
# n x 3 x 3
rotations = matrix_multiply(matrix_multiply(P, I), Q)
# n x 3 = squeeze(n x 3 x 1 - n x 3 x 3 * n x 3 x 1)
translations = np.squeeze(np.expand_dims(my, 2) -
matrix_multiply(rotations, np.expand_dims(mx, 2)))
return rotations, translations | PypiClean |
/Netfoll_TL-2.0.1-py3-none-any.whl/netfoll_tl/tl/types/storage.py | from ...tl.tlobject import TLObject
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
from datetime import datetime
class FileGif(TLObject):
CONSTRUCTOR_ID = 0xcae1aadf
SUBCLASS_OF_ID = 0xf3a1e6f3
def to_dict(self):
return {
'_': 'FileGif'
}
def _bytes(self):
return b''.join((
b'\xdf\xaa\xe1\xca',
))
@classmethod
def from_reader(cls, reader):
return cls()
class FileJpeg(TLObject):
CONSTRUCTOR_ID = 0x7efe0e
SUBCLASS_OF_ID = 0xf3a1e6f3
def to_dict(self):
return {
'_': 'FileJpeg'
}
def _bytes(self):
return b''.join((
b'\x0e\xfe~\x00',
))
@classmethod
def from_reader(cls, reader):
return cls()
class FileMov(TLObject):
CONSTRUCTOR_ID = 0x4b09ebbc
SUBCLASS_OF_ID = 0xf3a1e6f3
def to_dict(self):
return {
'_': 'FileMov'
}
def _bytes(self):
return b''.join((
b'\xbc\xeb\tK',
))
@classmethod
def from_reader(cls, reader):
return cls()
class FileMp3(TLObject):
CONSTRUCTOR_ID = 0x528a0677
SUBCLASS_OF_ID = 0xf3a1e6f3
def to_dict(self):
return {
'_': 'FileMp3'
}
def _bytes(self):
return b''.join((
b'w\x06\x8aR',
))
@classmethod
def from_reader(cls, reader):
return cls()
class FileMp4(TLObject):
CONSTRUCTOR_ID = 0xb3cea0e4
SUBCLASS_OF_ID = 0xf3a1e6f3
def to_dict(self):
return {
'_': 'FileMp4'
}
def _bytes(self):
return b''.join((
b'\xe4\xa0\xce\xb3',
))
@classmethod
def from_reader(cls, reader):
return cls()
class FilePartial(TLObject):
CONSTRUCTOR_ID = 0x40bc6f52
SUBCLASS_OF_ID = 0xf3a1e6f3
def to_dict(self):
return {
'_': 'FilePartial'
}
def _bytes(self):
return b''.join((
b'Ro\xbc@',
))
@classmethod
def from_reader(cls, reader):
return cls()
class FilePdf(TLObject):
CONSTRUCTOR_ID = 0xae1e508d
SUBCLASS_OF_ID = 0xf3a1e6f3
def to_dict(self):
return {
'_': 'FilePdf'
}
def _bytes(self):
return b''.join((
b'\x8dP\x1e\xae',
))
@classmethod
def from_reader(cls, reader):
return cls()
class FilePng(TLObject):
CONSTRUCTOR_ID = 0xa4f63c0
SUBCLASS_OF_ID = 0xf3a1e6f3
def to_dict(self):
return {
'_': 'FilePng'
}
def _bytes(self):
return b''.join((
b'\xc0cO\n',
))
@classmethod
def from_reader(cls, reader):
return cls()
class FileUnknown(TLObject):
CONSTRUCTOR_ID = 0xaa963b05
SUBCLASS_OF_ID = 0xf3a1e6f3
def to_dict(self):
return {
'_': 'FileUnknown'
}
def _bytes(self):
return b''.join((
b'\x05;\x96\xaa',
))
@classmethod
def from_reader(cls, reader):
return cls()
class FileWebp(TLObject):
CONSTRUCTOR_ID = 0x1081464c
SUBCLASS_OF_ID = 0xf3a1e6f3
def to_dict(self):
return {
'_': 'FileWebp'
}
def _bytes(self):
return b''.join((
b'LF\x81\x10',
))
@classmethod
def from_reader(cls, reader):
return cls() | PypiClean |
/DynamicistToolKit-0.5.3.tar.gz/DynamicistToolKit-0.5.3/dtk/process.py |
# standard library
from distutils.version import LooseVersion
# external dependencies
import numpy as np
from numpy.fft import fft, fftfreq
from scipy import __version__ as scipy_version
from scipy.integrate import trapz, cumtrapz
from scipy.interpolate import UnivariateSpline
from scipy.optimize import fmin
from scipy.signal import butter, filtfilt
try:
from scipy.stats import nanmean
except ImportError: # NOTE : nanmean was removed from SciPy in version 0.18.0.
from numpy import nanmean
from scipy import sparse
import matplotlib.pyplot as plt
def sync_error(tau, signal1, signal2, time, plot=False):
'''Returns the error between two signal time histories given a time
shift, tau.
Parameters
----------
tau : float
The time shift.
signal1 : ndarray, shape(n,)
The signal that will be interpolated. This signal is
typically "cleaner" that signal2 and/or has a higher sample rate.
signal2 : ndarray, shape(n,)
The signal that will be shifted to syncronize with signal 1.
time : ndarray, shape(n,)
The time vector for the two signals
plot : boolean, optional, default=False
If true a plot will be shown of the resulting signals.
Returns
-------
error : float
Error between the two signals for the given tau.
'''
# make sure tau isn't too large
if np.abs(tau) >= time[-1]:
raise ValueError(('abs(tau), {0}, must be less than or equal to ' +
'{1}').format(str(np.abs(tau)), str(time[-1])))
# this is the time for the second signal which is assumed to lag the first
# signal
shiftedTime = time + tau
# create time vector where the two signals overlap
if tau > 0:
intervalTime = shiftedTime[np.nonzero(shiftedTime < time[-1])]
else:
intervalTime = shiftedTime[np.nonzero(shiftedTime > time[0])]
# interpolate between signal 1 samples to find points that correspond in
# time to signal 2 on the shifted time
sig1OnInterval = np.interp(intervalTime, time, signal1)
# truncate signal 2 to the time interval
if tau > 0:
sig2OnInterval = signal2[np.nonzero(shiftedTime <= intervalTime[-1])]
else:
sig2OnInterval = signal2[np.nonzero(shiftedTime >= intervalTime[0])]
if plot is True:
fig, axes = plt.subplots(2, 1)
axes[0].plot(time, signal1, time, signal2)
axes[0].legend(('Signal 1', 'Signal 2'))
axes[0].set_title("Before shifting.")
axes[1].plot(intervalTime, sig1OnInterval, intervalTime,
sig2OnInterval)
axes[1].set_title("After shifting.")
axes[1].legend(('Signal 1', 'Signal 2'))
plt.show()
# calculate the error between the two signals
error = np.linalg.norm(sig1OnInterval - sig2OnInterval)
return error
def find_timeshift(signal1, signal2, sample_rate, guess=None, plot=False):
'''Returns the timeshift, tau, of the second signal relative to the
first signal.
Parameters
----------
signal1 : array_like, shape(n, )
The base signal.
signal2 : array_like, shape(n, )
A signal shifted relative to the first signal. The second signal
should be leading the first signal.
sample_rate : integer or float
Sample rate of the signals. This should be the same for each signal.
guess : float, optional, default=None
If you've got a good guess for the time shift then supply it here.
plot : boolean, optional, defaul=False
If true, a plot of the error landscape will be shown.
Returns
-------
tau : float
The timeshift between the two signals.
'''
# raise an error if the signals are not the same length
if len(signal1) != len(signal2):
raise ValueError('Signals are not the same length!')
# subtract the mean and normalize both signals
signal1 = normalize(subtract_mean(signal1))
signal2 = normalize(subtract_mean(signal2))
time = time_vector(len(signal1), sample_rate)
if guess is None:
# set up the error landscape, error vs tau
# We assume the time shift is
tau_range = np.linspace(-time[len(time) // 4], time[len(time) // 4],
num=len(time) // 10)
# TODO : Can I vectorize this?
error = np.zeros_like(tau_range)
for i, val in enumerate(tau_range):
error[i] = sync_error(val, signal1, signal2, time)
if plot is True:
plt.figure()
plt.plot(tau_range, error)
plt.xlabel('tau')
plt.ylabel('error')
plt.show()
# find initial condition from landscape
tau0 = tau_range[np.argmin(error)]
else:
tau0 = guess
print("The minimun of the error landscape is {}.".format(tau0))
tau, fval = fmin(sync_error, tau0, args=(signal1, signal2, time),
full_output=True, disp=True)[0:2]
return tau
def truncate_data(tau, signal1, signal2, sample_rate):
'''Returns the truncated vectors with respect to the time shift tau. It
assume you've found the time shift between two signals with
find_time_shift or something similar.
Parameters
----------
tau : float
The time shift.
signal1 : array_like, shape(n, )
A time series.
signal2 : array_like, shape(n, )
A time series.
sample_rate : integer
The sample rate of the two signals.
Returns
-------
truncated1 : ndarray, shape(m, )
The truncated time series.
truncated2 : ndarray, shape(m, )
The truncated time series.
'''
t = time_vector(len(signal1), sample_rate)
# shift the first signal
t1 = t - tau
t2 = t
# make the common time interval
common_interval = t2[np.nonzero(t2 < t1[-1])]
truncated1 = np.interp(common_interval, t1, signal1)
truncated2 = signal2[np.nonzero(t2 <= common_interval[-1])]
return truncated1, truncated2
def least_squares_variance(A, sum_of_residuals):
"""Returns the variance in the ordinary least squares fit and the
covariance matrix of the estimated parameters.
Parameters
----------
A : ndarray, shape(n,d)
The left hand side matrix in Ax=B.
sum_of_residuals : float
The sum of the residuals (residual sum of squares).
Returns
-------
variance : float
The variance of the fit.
covariance : ndarray, shape(d,d)
The covariance of x in Ax = b.
"""
# I am pretty sure that the residuals from numpy.linalg.lstsq is the SSE
# (the residual sum of squares).
degrees_of_freedom = (A.shape[0] - A.shape[1])
variance = sum_of_residuals / degrees_of_freedom
# There may be a way to use the pinv here for more efficient
# computations. (A^T A)^-1 A^T = np.linalg.pinv(A) so np.linalg.pinv(A)
# (A^T)^-1 ... or maybe not.
if sparse.issparse(A):
inv = sparse.linalg.inv
prod = A.T * A
else:
inv = np.linalg.inv
prod = np.dot(A.T, A)
covariance = variance * inv(prod)
return variance, covariance
def coefficient_of_determination(measured, predicted):
"""Computes the coefficient of determination with respect to a measured
and predicted array.
Parameters
----------
measured : array_like, shape(n,)
The observed or measured values.
predicted : array_like, shape(n,)
The values predicted by a model.
Returns
-------
r_squared : float
The coefficient of determination.
Notes
-----
The coefficient of determination [also referred to as R^2 and VAF
(variance accounted for)] is computed either of these two ways::
sum( [predicted - mean(measured)] ** 2 )
R^2 = ----------------------------------------
sum( [measured - mean(measured)] ** 2 )
or::
sum( [measured - predicted] ** 2 )
R^2 = 1 - ---------------------------------------
sum( [measured - mean(measured)] ** 2 )
"""
# 2-norm => np.sqrt(np.sum(measured - predicted)**2))
numerator = np.linalg.norm(measured - predicted) ** 2
denominator = np.linalg.norm(measured - measured.mean()) ** 2
r_squared = 1.0 - numerator / denominator
return r_squared
def fit_goodness(ym, yp):
'''
Calculate the goodness of fit.
Parameters
----------
ym : ndarray, shape(n,)
The vector of measured values.
yp : ndarry, shape(n,)
The vector of predicted values.
Returns
-------
rsq : float
The r squared value of the fit.
SSE : float
The error sum of squares.
SST : float
The total sum of squares.
SSR : float
The regression sum of squares.
Notes
-----
SST = SSR + SSE
'''
ym_bar = np.mean(ym)
SSR = sum((yp - ym_bar) ** 2)
SST = sum((ym - ym_bar) ** 2)
SSE = SST - SSR
rsq = SSR / SST
return rsq, SSE, SST, SSR
def spline_over_nan(x, y):
"""
Returns a vector of which a cubic spline is used to fill in gaps in the
data from nan values.
Parameters
----------
x : ndarray, shape(n,)
This x values should not contain nans.
y : ndarray, shape(n,)
The y values may contain nans.
Returns
-------
ySpline : ndarray, shape(n,)
The splined y values. If `y` doesn't contain any nans then `ySpline` is
`y`.
Notes
-----
The splined data is identical to the input data, except that the nan's are
replaced by new data from the spline fit.
"""
# if there are nans in the data then spline away
if np.isnan(y).any():
# remove the values with nans
xNoNan = x[np.nonzero(np.isnan(y) == False)]
yNoNan = y[np.nonzero(np.isnan(y) == False)]
# fit a spline through the data
spline = UnivariateSpline(xNoNan, yNoNan, k=3, s=0)
return spline(x)
else:
return y
def curve_area_stats(x, y):
'''
Return the box plot stats of a curve based on area.
Parameters
----------
x : ndarray, shape (n,)
The x values
y : ndarray, shape (n,m)
The y values
n are the time steps
m are the various curves
Returns
-------
A dictionary containing:
median : ndarray, shape (m,)
The x value corresponding to 0.5*area under the curve
lq : ndarray, shape (m,)
lower quartile
uq : ndarray, shape (m,)
upper quartile
98p : ndarray, shape (m,)
98th percentile
2p : ndarray, shape (m,)
2nd percentile
'''
area = trapz(y, x=x, axis=0) # shape (m,)
percents = np.array([0.02*area, 0.25*area, 0.5*area, 0.75*area, 0.98*area]) # shape (5,m)
CumArea = cumtrapz(y.T, x=x.T) # shape(m,n)
xstats = {'2p':[], 'lq':[], 'median':[], 'uq':[], '98p':[]}
for j, curve in enumerate(CumArea):
flags = [False for flag in range(5)]
for i, val in enumerate(curve):
if val > percents[0][j] and flags[0] == False:
xstats['2p'].append(x[i])
flags[0] = True
elif val > percents[1][j] and flags[1] == False:
xstats['lq'].append(x[i])
flags[1] = True
elif val > percents[2][j] and flags[2] == False:
xstats['median'].append(x[i])
flags[2] = True
elif val > percents[3][j] and flags[3] == False:
xstats['uq'].append(x[i])
flags[3] = True
elif val > percents[4][j] and flags[4] == False:
xstats['98p'].append(x[i])
flags[4] = True
if flags[4] == False:
# this is what happens if it finds none of the above
xstats['2p'].append(0.)
xstats['lq'].append(0.)
xstats['median'].append(0.)
xstats['uq'].append(0.)
xstats['98p'].append(0.)
for k, v in xstats.items():
xstats[k] = np.array(v)
return xstats
def freq_spectrum(data, sampleRate):
"""
Return the frequency spectrum of a data set.
Parameters
----------
data : ndarray, shape (m,) or shape(n,m)
The array of time signals where n is the number of variables and m is
the number of time steps.
sampleRate : int
The signal sampling rate in hertz.
Returns
-------
frequency : ndarray, shape (p,)
The frequencies where p is a power of 2 close to m.
amplitude : ndarray, shape (p,n)
The amplitude at each frequency.
"""
def nextpow2(i):
'''
Return the next power of 2 for the given number.
'''
n = 2
while n < i: n *= 2
return n
time = 1. / sampleRate # sample time
try:
L = data.shape[1] # length of data if (n, m)
except:
L = data.shape[0] # length of data if (n,)
# calculate the closest power of 2 for the length of the data
n = nextpow2(L)
Y = fft(data, n) / L # divide by L for scaling
f = fftfreq(n, d=time)
#f = sampleRate/2.*linspace(0, 1, n)
#print 'f =', f, f.shape, type(f)
frequency = f[1:n / 2]
try:
amplitude = 2 * abs(Y[:, 1:n / 2]).T # multiply by 2 because we take half the vector
#power = abs(Y[:, 1:n/2])**2
except:
amplitude = 2 * abs(Y[1:n / 2])
#power = abs(Y[1:n/2])**2
return frequency, amplitude
def butterworth(data, cutoff, samplerate, order=2, axis=-1, btype='lowpass',
**kwargs):
"""Returns the data filtered by a forward/backward Butterworth filter.
Parameters
----------
data : ndarray, shape(n,) or shape(n,m)
The data to filter. Only handles 1D and 2D arrays.
cutoff : float
The filter cutoff frequency in hertz.
samplerate : float
The sample rate of the data in hertz.
order : int
The order of the Butterworth filter.
axis : int
The axis to filter along.
btype : {'lowpass'|'highpass'|'bandpass'|'bandstop'}
The type of filter. Default is 'lowpass'.
kwargs : keyword value pairs
Any extra arguments to get passed to scipy.signal.filtfilt.
Returns
-------
filtered_data : ndarray
The low pass filtered version of data.
Notes
-----
The provided cutoff frequency is corrected by a multiplicative factor to
ensure the double pass filter cutoff frequency matches that of a single
pass filter, see [Winter2009]_.
References
----------
.. [Winter2009] David A. Winter (2009) Biomechanics and motor control of
human movement. 4th edition. Hoboken: Wiley.
"""
if len(data.shape) > 2:
raise ValueError('This function only works with 1D or 2D arrays.')
nyquist_frequency = 0.5 * samplerate
# Since we use filtfilt below, we correct the cutoff frequency to ensure
# the filter**2 crosses the -3 dB line at the cutoff frequency.
# |H(w)| = sqrt(1 / (1 + (w / wc)**(2n)))
# wc : cutoff frequency
# n : Butterworth filter order
# |H**2(w)| = 1 / (1 + (w / wc)**(2n))
# |H**2(wc)| = 1 / (1 + (wc / wa)**(2n)) = 1 / sqrt(2) = -3 dB
# wa : adjusted cutoff frequency for double filter
# wa = (np.sqrt(2.0) - 1.0) ** (-1.0 / (2.0 * n))
correction_factor = (np.sqrt(2.0) - 1.0) ** (-1.0 / (2.0 * order))
# Wn is the ratio of the corrected cutoff frequency to the Nyquist
# frequency.
Wn = correction_factor * cutoff / nyquist_frequency
b, a = butter(order, Wn, btype=btype)
# SciPy 0.9.0 has a simple filtfilt, with no optional arguments. SciPy
# 0.10.0 introduced the axis argument. So, to stay compatible with
# 0.9.0, which is the SciPy installed on Ubuntu 12.04 LTS, we check the
# version. The version in SciPy 0.9.0 doesn't have kwargs either.
nine = LooseVersion('0.9.0')
ten = LooseVersion('0.10.0')
current = LooseVersion(scipy_version)
if current >= nine and current < ten:
print('SciPy 0.9.0 only supports 1D filtfilt, ' +
'so you get a slow version.')
if len(data.shape) == 2:
if axis == 0:
data = data.T
filtered = np.zeros_like(data)
for i, vector in enumerate(data):
filtered[i] = filtfilt(b, a, vector)
if axis == 0:
return filtered.T
else:
return filtered
else:
return filtfilt(b, a, data)
elif current >= ten:
return filtfilt(b, a, data, axis=axis, **kwargs)
def subtract_mean(sig, hasNans=False):
'''
Subtracts the mean from a signal with nanmean.
Parameters
----------
sig : ndarray, shape(n,)
hasNans : boolean, optional
If your data has nans use this flag if you want to ignore them.
Returns
-------
ndarray, shape(n,)
sig minus the mean of sig
'''
if hasNans:
return sig - nanmean(sig)
else:
return sig - np.mean(sig)
def normalize(sig, hasNans=False):
'''
Normalizes the vector with respect to the maximum value.
Parameters
----------
sig : ndarray, shape(n,)
hasNans : boolean, optional
If your data has nans use this flag if you want to ignore them.
Returns
-------
normSig : ndarray, shape(n,)
The signal normalized with respect to the maximum value.
'''
# TODO : This could be a try/except statement instead of an optional
# argument.
if hasNans:
normSig = sig / np.nanmax(sig)
else:
normSig = sig / np.max(sig)
return normSig
def derivative(x, y, method='forward', padding=None):
"""Returns the derivative of y with respect to x.
Parameters
----------
x : ndarray, shape(n,)
The monotonically increasing independent variable.
y : ndarray, shape(n,) or shape(n, m)
The dependent variable(s).
method : string, optional
'forward'
Use the forward difference method.
'backward'
Use the backward difference method.
'central'
Use the central difference method.
'combination'
This is equivalent to ``method='central', padding='second
order'`` and is in place for backwards compatibility. Selecting
this method will ignore and user supplied padding settings.
padding : None, float, 'adjacent' or 'second order', optional
The default, None, will result in the derivative vector being n-a in
length where a=1 for forward and backward and a=2 for central. If
you provide a float this value will be used to pad the result so
that len(dydx) == n. If 'adjacent' is used, the nearest neighbor
will be used for padding. If 'second order' is chosen second order
foward and backward difference are used to pad the end points.
Returns
-------
dydx : ndarray, shape(n,) or shape(n-1,)
for combination else shape(n-1,)
"""
x = np.asarray(x)
y = np.asarray(y)
if method == 'combination':
method = 'central'
padding = 'second order'
if len(x.shape) > 1:
raise ValueError('x must be have shape(n,).')
if len(y.shape) > 2:
raise ValueError('y can at most have two dimensions.')
if x.shape[0] != y.shape[0]:
raise ValueError('x and y must have the same first dimension.')
if method == 'forward' or method == 'backward':
if x.shape[0] < 2:
raise ValueError('x must have a length of at least 2.')
if len(y.shape) == 1:
deriv = np.diff(y) / np.diff(x)
else:
deriv = (np.diff(y.T) / np.diff(x)).T
elif method == 'central':
if x.shape[0] < 3:
raise ValueError('x must have a length of at least 3.')
if len(y.shape) == 1:
deriv = (y[2:] - y[:-2]) / (x[2:] - x[:-2])
else:
deriv = ((y[2:] - y[:-2]).T / (x[2:] - x[:-2])).T
else:
msg = ("There is no {} method here! Try 'forward', 'backward', "
"'central', or 'combination'.").format(method)
raise NotImplementedError(msg)
if padding is None:
dydx = deriv
else:
dydx = np.zeros_like(y)
if padding == 'adjacent':
dydx[0] = deriv[0]
dydx[-1] = deriv[-1]
elif padding == 'second order':
dydx[0] = ((-3.0*y[0] + 4.0*y[1] - y[2]) / 2.0 / (x[1] - x[0]))
dydx[-1] = ((3.0*y[-1] - 4.0*y[-2] + y[-3]) / 2.0 /
(x[-1] - x[-2]))
else:
dydx[0] = padding
dydx[-1] = padding
if method == 'forward':
dydx[:-1] = deriv
elif method == 'backward':
dydx[1:] = deriv
elif method == 'central':
dydx[1:-1] = deriv
return dydx
def time_vector(num_samples, sample_rate, start_time=0.0):
'''Returns a time vector starting at zero.
Parameters
----------
num_samples : int
Total number of samples.
sample_rate : float
Sample rate of the signal in hertz.
start_time : float, optional, default=0.0
The start time of the time series.
Returns
-------
time : ndarray, shape(numSamples,)
Time vector starting at zero.
'''
ns = num_samples
sr = float(sample_rate)
return np.linspace(start_time, (ns - 1) / sr + start_time, num=ns) | PypiClean |
/Nano-CAT-0.7.2.tar.gz/Nano-CAT-0.7.2/nanoCAT/recipes/mol_filter.py | from typing import Union, Iterable, Dict, TypeVar, Callable
import numpy as np
from scipy.spatial.distance import cdist
from scm.plams import Molecule, Atom
__all__ = ['get_mol_length', 'filter_mol', 'filter_data']
T = TypeVar('T')
def get_mol_length(mol: Union[np.ndarray, Molecule],
atom: Union[np.ndarray, Atom]) -> float:
"""Return the distance between **atom** and the atom in **mol** which it is furthest removed from.
Examples
--------
Use the a molecules length for filtering a list of molecules:
.. code:: python
>>> from CAT.recipes import get_mol_length, filter_mol
>>> from scm.plams import Molecule
>>> mol_list = [Molecule(...), ...]
>>> data = [...]
>>> filter = lambda mol: get_mol_length(mol, mol.properties.get('anchor')) < 10
>>> mol_dict = filter_mol(mol_list, data, filter=filter)
Parameters
----------
mol : :class:`~scm.plams.mol.molecule.Molecule` or :class:`numpy.ndarray`
A PLAMS molecule or a 2D numpy array with a molecules Cartesian coordinates.
atom : :class:`~scm.plams.mol.atom.Atom` or :class:`numpy.ndarray`
A PLAMS atom or a 1D numpy array with an atoms Cartesian coordinates.
Returns
-------
:class:`float`
The largest distance between **atom** and all other atoms **mol**.
See Also
--------
:func:`filter_mol`
Filter **mol_list** and **data** based on elements from **mol_list**.
""" # noqa: E501
if isinstance(atom, Atom):
atom_xyz = np.fromiter(atom.coords, 3, dtype=float)
atom_xyz.shape = (1, 3)
else:
atom_xyz = np.asarray(atom, dtype=float).reshape((1, 3))
dist = cdist(atom_xyz, mol)
return dist.max()
def filter_mol(mol_list: Iterable[Molecule], data: Iterable[T],
filter: Callable[[Molecule], bool]) -> Dict[Molecule, T]:
"""Filter **mol_list** and **data** based on elements from **mol_list**.
Examples
--------
.. code:: python
>>> from scm.plams import Molecule
>>> from CAT.recipes import filter_mol
>>> mol_list = [Molecule(...), ...]
>>> data = [...]
>>> mol_dict1 = filter_mol(mol_list, data, filter=lambda n: n < 10)
>>> prop1 = [...]
>>> prop2 = [...]
>>> prop3 = [...]
>>> multi_data = zip([prop1, prop2, prop3])
>>> mol_dict2 = filter_mol(mol_list, multi_data, filter=lambda n: n < 10)
>>> keys = mol_dict1.keys()
>>> values = mol_dict1.values()
>>> mol_dict3 = filter_mol(keys, values, filter=lambda n: n < 5)
Parameters
----------
mol_list : :class:`~collections.abc.Iterable` [:class:`~scm.plams.mol.molecule.Molecule`]
An iterable of the, to-be filtered, PLAMS molecules.
data : :class:`Iterable[T]<collections.abc.Iterable>`
An iterable which will be assigned as values to the to-be returned dict.
These parameters will be filtered in conjunction with **mol_list**.
Note that **mol_list** and **data** *should* be of the same length.
filter : :class:`Callable[[Molecule], bool]<collections.abc.Callable>`
A callable for filtering the distance vector.
An example would be :code:`lambda n: max(n) < 10`.
Returns
-------
:class:`dict` [:class:`~scm.plams.mol.molecule.Molecule`, :class:`T<typing.TypeVar>`]
A dictionary with all (filtered) molecules as keys and elements from **data** as values.
See Also
--------
:func:`filter_data`
Filter **mol_list** and **data** based on elements from **data**.
"""
return {mol: item for mol, item in zip(mol_list, data) if filter(mol)}
def filter_data(mol_list: Iterable[Molecule], data: Iterable[T],
filter: Callable[[T], bool]) -> Dict[Molecule, T]:
"""Filter **mol_list** and **data** based on elements from **data**.
Examples
--------
See :func:`filter_mol` for a number of input examples.
Parameters
----------
mol_list : :class:`~collections.abc.Iterable` [:class:`~scm.plams.mol.molecule.Molecule`]
An iterable of the, to-be filtered, PLAMS molecules.
data : :class:`Iterable[T]<collections.abc.Iterable>`
An iterable which will be assigned as values to the to-be returned dict.
These parameters will be filtered in conjunction with **mol_list**.
Note that **mol_list** and **data** *should* be of the same length.
filter : :class:`Callable[[T], bool]<collections.abc.Callable>`
A callable for filtering the elements of **data**.
An example would be :code:`lambda n: n < 10`.
Returns
-------
:class:`dict` [:class:`~scm.plams.mol.molecule.Molecule`, :class:`T<typing.TypeVar>`]
A dictionary with all (filtered) molecules as keys and elements from **data** as values.
See Also
--------
:func:`filter_mol`
Filter **mol_list** and **data** based on elements from **mol_list**.
"""
return {mol: item for mol, item in zip(mol_list, data) if filter(item)} | PypiClean |
/LGNpy-1.0.0-py3-none-any.whl/lgnpy/LinearGaussian.py | import pandas as pd
import numpy as np
import networkx as nx
import copy
from .Graph import Graph
from .logging_config import Logger
class LinearGaussian(Graph):
"""
Implemented Linear Gaussian Algorithm
"""
def __init__(self):
"""
Inherits base graph methods from Graph
"""
super().__init__()
self.log = Logger()
def __get_node_values(self, node):
"""
Get mean and variance of node using Linear Gaussian CPD. Calculated by finding betas
"""
index_to_keep = [self.nodes.index(node)]
index_to_reduce = [self.nodes.index(idx) for idx in list(self.g.pred[node])]
values = self.__get_parent_calculated_means(list(self.g.pred[node]))
val = {n: round(v, 3) for n, v in zip(list(self.g.pred[node]), values)}
mu_j = self.mean[index_to_keep]
mu_i = self.mean[index_to_reduce]
sig_i_j = self.cov[np.ix_(index_to_reduce, index_to_keep)]
sig_j_i = self.cov[np.ix_(index_to_keep, index_to_reduce)]
sig_i_i_inv = np.linalg.inv(self.cov[np.ix_(index_to_reduce, index_to_reduce)])
sig_j_j = self.cov[np.ix_(index_to_keep, index_to_keep)]
covariance = sig_j_j - np.dot(np.dot(sig_j_i, sig_i_i_inv), sig_i_j)
beta_0 = mu_j - np.dot(np.dot(sig_j_i, sig_i_i_inv), mu_i)
beta = np.dot(sig_j_i, sig_i_i_inv)
new_mu = beta_0 + np.dot(beta, values)
node_values = {n: round(v, 3) for n, v in zip(list(self.g.pred[node]), values)}
node_beta = list(np.around(np.array(list(beta_0) + list(beta[0])), 2))
self.parameters[node] = {"node_values": node_values, "node_betas": node_beta}
return new_mu[0], covariance[0][0]
def __get_parent_calculated_means(self, nodes):
"""
Get evidences of parents given node name list
"""
pa_e = []
for node in nodes:
ev = self.calculated_means[node]
if ev is None:
ev = self.mean[self.nodes.index(node)]
pa_e.append(ev)
return pa_e
def get_model_parameters(self):
"""
Get parameters for each node
"""
return self.parameters
def __build_results(self):
"""
Make Pandas dataframe with the results.
"""
self.inf_summary = pd.DataFrame(
index=self.nodes,
columns=[
"Evidence",
"Mean",
"Mean_inferred",
"Variance",
"Variance_inferred",
],
)
self.inf_summary.loc[:, "Mean"] = self.mean
self.inf_summary["Evidence"] = self.inf_summary.index.to_series().map(
self.evidences
)
self.inf_summary.loc[:, "Variance"] = list(np.around(np.diag(self.cov),3))
self.inf_summary["Mean_inferred"] = self.inf_summary.index.to_series().map(
self.calculated_means
)
self.inf_summary["Variance_inferred"] = self.inf_summary.index.to_series().map(
self.calculated_vars
)
self.inf_summary["u_%change"] = (
(self.inf_summary["Mean_inferred"] - self.inf_summary["Mean"])
/ self.inf_summary["Mean"]
) * 100
self.inf_summary = (
self.inf_summary.round(4)
.replace(pd.np.nan, "", regex=True)
.replace(0, "", regex=True)
)
return self.inf_summary
def __get_pure_root_nodes(self, graph):
root_nodes = [
x
for x in graph.nodes()
if graph.out_degree(x) >= 1 and graph.in_degree(x) == 0
]
children_of_root_nodes = []
for node in root_nodes:
children_of_root_nodes.extend(list(graph.succ[node]))
pure_children = []
for node in children_of_root_nodes:
node_parents = list(graph.pred[node])
flag = False
for parent in node_parents:
if graph.in_degree(parent) != 0:
flag = True
if not flag:
pure_children.append(node)
return list(set(pure_children))
def __remove_pred_edges(self, node, graph):
preds = graph.pred[node]
for parent in list(preds):
graph.remove_edge(parent, node)
def __print_message(self,log_instance,node):
log_instance.debug(f"Calculated:'{node}'= {round(self.calculated_means[node], 3)}")
log_instance.debug(f"Parent nodes used: {self.parameters[node]['node_values']}")
log_instance.debug(f"Beta calculated: {self.parameters[node]['node_betas']}")
def run_inference(self, debug=True, return_results=True):
"""
Run Inference on network with given evidences.
"""
g_temp = copy.deepcopy(self.g)
self._log = self.log.setup_logger(debug=debug)
self._log.debug("Started")
if all(x == None for x in self.evidences.values()):
self._log.debug("No evidences were set. Proceeding without evidence")
self.parameters = dict.fromkeys(self.nodes)
self.calculated_means = copy.deepcopy(self.evidences)
self.calculated_vars = dict.fromkeys(self.nodes)
self.done_flags = dict.fromkeys(self.nodes)
it=0
while not nx.is_empty(g_temp):
it+=1
pure_children = self.__get_pure_root_nodes(g_temp)
for child in pure_children:
if self.evidences[child] is None:
self.calculated_means[child], self.calculated_vars[child] = self.__get_node_values(child)
self.__print_message(self._log,child)
else:
self._log.debug(f"Skipped Calculating:'{child}' as evidence is available.")
g_temp.remove_nodes_from(list(g_temp.pred[child]))
return self.__build_results()
def get_inference_results(self):
"""Get inference result
Returns
-------
dataframe: Dataframe with inference results.
"""
return self.inf_summary | PypiClean |
/MarkDo-0.3.0.tar.gz/MarkDo-0.3.0/markdo/static/foundation/js/foundation/foundation.magellan.js | ;(function ($, window, document, undefined) {
'use strict';
Foundation.libs.magellan = {
name : 'magellan',
version : '5.0.3',
settings : {
active_class: 'active',
threshold: 0
},
init : function (scope, method, options) {
this.fixed_magellan = $("[data-magellan-expedition]");
this.magellan_placeholder = $('<div></div>').css({
height: this.fixed_magellan.outerHeight(true)
}).hide().insertAfter(this.fixed_magellan);
this.set_threshold();
this.set_active_class(method);
this.last_destination = $('[data-magellan-destination]').last();
this.events();
},
events : function () {
var self = this;
$(this.scope)
.off('.magellan')
.on('arrival.fndtn.magellan', '[data-magellan-arrival]', function (e) {
var $destination = $(this),
$expedition = $destination.closest('[data-magellan-expedition]'),
active_class = $expedition.attr('data-magellan-active-class')
|| self.settings.active_class;
$destination
.closest('[data-magellan-expedition]')
.find('[data-magellan-arrival]')
.not($destination)
.removeClass(active_class);
$destination.addClass(active_class);
});
this.fixed_magellan
.off('.magellan')
.on('update-position.fndtn.magellan', function() {
var $el = $(this);
})
.trigger('update-position');
$(window)
.off('.magellan')
.on('resize.fndtn.magellan', function() {
this.fixed_magellan.trigger('update-position');
}.bind(this))
.on('scroll.fndtn.magellan', function() {
var windowScrollTop = $(window).scrollTop();
self.fixed_magellan.each(function() {
var $expedition = $(this);
if (typeof $expedition.data('magellan-top-offset') === 'undefined') {
$expedition.data('magellan-top-offset', $expedition.offset().top);
}
if (typeof $expedition.data('magellan-fixed-position') === 'undefined') {
$expedition.data('magellan-fixed-position', false);
}
var fixed_position = (windowScrollTop + self.settings.threshold) > $expedition.data("magellan-top-offset");
var attr = $expedition.attr('data-magellan-top-offset');
if ($expedition.data("magellan-fixed-position") != fixed_position) {
$expedition.data("magellan-fixed-position", fixed_position);
if (fixed_position) {
$expedition.addClass('fixed');
$expedition.css({position:"fixed", top:0});
self.magellan_placeholder.show();
} else {
$expedition.removeClass('fixed');
$expedition.css({position:"", top:""});
self.magellan_placeholder.hide();
}
if (fixed_position && typeof attr != 'undefined' && attr != false) {
$expedition.css({position:"fixed", top:attr + "px"});
}
}
});
});
if (this.last_destination.length > 0) {
$(window).on('scroll.fndtn.magellan', function (e) {
var windowScrollTop = $(window).scrollTop(),
scrolltopPlusHeight = windowScrollTop + $(window).height(),
lastDestinationTop = Math.ceil(self.last_destination.offset().top);
$('[data-magellan-destination]').each(function () {
var $destination = $(this),
destination_name = $destination.attr('data-magellan-destination'),
topOffset = $destination.offset().top - $destination.outerHeight(true) - windowScrollTop;
if (topOffset <= self.settings.threshold) {
$("[data-magellan-arrival='" + destination_name + "']").trigger('arrival');
}
// In large screens we may hit the bottom of the page and dont reach the top of the last magellan-destination, so lets force it
if (scrolltopPlusHeight >= $(self.scope).height() && lastDestinationTop > windowScrollTop && lastDestinationTop < scrolltopPlusHeight) {
$('[data-magellan-arrival]').last().trigger('arrival');
}
});
});
}
},
set_threshold : function () {
if (typeof this.settings.threshold !== 'number') {
this.settings.threshold = (this.fixed_magellan.length > 0) ?
this.fixed_magellan.outerHeight(true) : 0;
}
},
set_active_class : function (options) {
if (options && options.active_class && typeof options.active_class === 'string') {
this.settings.active_class = options.active_class;
}
},
off : function () {
$(this.scope).off('.fndtn.magellan');
$(window).off('.fndtn.magellan');
},
reflow : function () {}
};
}(jQuery, this, this.document)); | PypiClean |
/Flask-Actions-0.6.6.tar.gz/Flask-Actions-0.6.6/flaskext/actions/server_actions.py | import sys,os
from werkzeug import script
def runfcgi(application, before_daemon=None):
def action(
protocol = 'fcgi',
hostname = ('h', ''),
port = ('p', 3001),
socket = '',
method = 'threaded',
daemonize = False,
workdir = '.',
pidfile = '',
maxspare = 5,
minspare = 2,
maxchildren = 50,
maxrequests = 0,
debug = False,
outlog = '/dev/null',
errlog = '/dev/null',
umask = 022,
):
"""run application use flup
you can choose these arguments:
protocol : scgi, fcgi or ajp
method : threaded or fork
socket : Unix domain socket
children : number of threads or processes"""
from .fastcgi import runfastcgi
runfastcgi(
application = application,
protocol = protocol,
host = hostname,
port = port,
socket = socket,
method = method,
daemonize = daemonize,
workdir = workdir,
pidfile = pidfile,
maxspare = maxspare,
minspare = minspare,
maxchildren = maxchildren,
maxrequests = maxrequests,
debug = debug,
outlog = outlog,
errlog = errlog,
umask = umask,
)
return action
def run_twisted_server(app):
def action(host=('h','127.0.0.1'),port=('p', 8000)):
"""run application use twisted http server
"""
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, app))
reactor.listenTCP(int(port), factory, interface=self.host)
reactor.run()
return action
def run_appengine_server(app):
def action(host=('h','127.0.0.1'),port=('p', 8000)):
"""run application use appengine http server
"""
from google.appengine.ext.webapp import util
util.run_wsgi_app(app)
return action
def run_gunicorn_server(app):
def action(bind=('b','127.0.0.1:8000'),workers=('w',4),pid=('p','tmp/flask.pid'),log_file='tmp/flask.log',log_level='info'):
"""run application use gunicorn http server
"""
from gunicorn import version_info
if version_info < (0, 9, 0):
from gunicorn.arbiter import Arbiter
from gunicorn.config import Config
arbiter = Arbiter(Config({'bind':bind,'workers': workers,'pidfile':pidfile,'logfile':logfile}), app)
arbiter.run()
else:
from gunicorn.app.base import Application
class FlaskApplication(Application):
def init(self, parser, opts, args):
return {
'bind': bind,
'workers': workers,
'pidfile':pid,
'logfile':log_file,
'loglevel':log_level,
}
def load(self):
return app
FlaskApplication().run()
return action
def run_tornado_server(app):
"""run application use tornado http server
"""
def action(port=('p', 8000)):
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
container = tornado.wsgi.WSGIContainer(app)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=port)
tornado.ioloop.IOLoop.instance().start()
return action
def run_fapws_server(app):
def action(host=('h','127.0.0.1'),port=('p', '8000')):
"""run application use fapws http server
"""
import fapws._evwsgi as evwsgi
from fapws import base
evwsgi.start(host, port)
evwsgi.set_base_module(base)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
return action
def run_meinheld_server(app):
def action(host=('h','127.0.0.1'),port=('p', 8000)):
"""run application use Meinheld http server
"""
from meinheld import server
server.listen((host, port))
server.run(app)
return action
def run_cherrypy_server(app):
def action(host=('h','127.0.0.1'),port=('p', 8000)):
"""run application use CherryPy http server
"""
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer((host, port), app)
server.start()
return action
def run_paste_server(app):
def action(host=('h','127.0.0.1'),port=('p', '8000')):
"""run application use Paste http server
"""
from paste import httpserver
httpserver.serve(app, host=host, port=port)
return action
def run_diesel_server(app):
def action(host=('h','127.0.0.1'),port=('p', 8000)):
"""run application use diesel http server
"""
from diesel.protocols.wsgi import WSGIApplication
application = WSGIApplication(app, port=self.port)
application.run()
return action
def run_gevent_server(app):
def action(host=('h','127.0.0.1'),port=('p', 8000)):
"""run application use gevent http server
"""
from gevent import wsgi
wsgi.WSGIServer((host, port), app).serve_forever()
return action
def run_eventlet_server(app):
def action(host=('h','127.0.0.1'),port=('p', 8000)):
"""run application use eventlet http server
"""
from eventlet import wsgi, listen
wsgi.server(listen((host, port)), app)
return action
def run_eurasia_server(app):
def action(hostname=('h', '0.0.0.0'), port=('p', 8000)):
"""run application use eurasia http server"""
try:
from eurasia import WSGIServer
except ImportError:
print "You need to install eurasia"
sys.exit()
server = WSGIServer(app, bindAddress=(hostname, port))
server.run()
return action
def run_rocket_server(app):
def action(host=('h','127.0.0.1'),port=('p', 8000)):
"""run application use rocket http server
"""
from rocket import Rocket
server = Rocket((host, port), 'wsgi', { 'wsgi_app' : app })
server.start()
return action
server_actionnames = {
'runfcgi':runfcgi,
'runtwisted':run_twisted_server,
'run_appengine':run_appengine_server,
'run_gevent':run_gevent_server,
'run_eventlet':run_eventlet_server,
'run_gunicorn':run_gunicorn_server,
'run_rocket':run_rocket_server,
'run_eurasia':run_eurasia_server,
'run_tornado':run_tornado_server,
'run_fapws':run_fapws_server,
'run_meinheld':run_meinheld_server,
'run_cherrypy':run_cherrypy_server,
'run_paste_server':run_paste_server,
'run_diesel':run_diesel_server,
} | PypiClean |
/Congo-0.0.1.tar.gz/Congo-0.0.1/portfolio/component/static/portfolio/js/portfolio.js | var Portfolio = {
/**
Google Analytics tracking
**/
trackEvent: function(category, action, label, value) {
if (typeof ga !== 'undefined') {
ga("send", "event", category, action, label, value)
}
},
//------
/**
BASIC Login
**/
basic_login: function() {
var that = this
$("#portfolio-login-login-form").submit(function(e){
e.preventDefault();
that.trackEvent("User", "LOGIN", "Email")
this.submit()
})
$("#portfolio-login-signup-form").submit(function(e){
e.preventDefault();
that.trackEvent("User", "SIGNUP", "Email")
this.submit()
})
$("#portfolio-login-lostpassword-form").submit(function(e){
e.preventDefault();
that.trackEvent("User", "LOSTPASSWORD", "Email")
this.submit()
})
},
//-------
/**
OAUTH Login
Requires hello.js for front end authentication
**/
oauth_login: function(config, redirect) {
var that = this
hello.init(config, {redirect_uri: redirect, scope: "email"})
$("[portfolio\\:oauth-login]").click(function(){
var el = $(this)
var form = el.closest("form")
var status = form.find(".status-message")
var provider = el.attr("pilot:oauth-login")
if (provider == "google-plus") {
provider = "google"
}
hello(provider).login({"force": true}).then( function(p){
hello(provider).api( '/me' ).then( function(r){
var msg = form.data("success-message") || "Signing in..."
status.removeClass("alert alert-danger")
.addClass("alert alert-success").html(msg)
var image_url = r.thumbnail
switch(provider) {
case "facebook":
image_url += "?type=large"
break
case "google":
image_url = image_url.split("?")[0]
}
form.find("[name='provider']").val(provider)
form.find("[name='provider_user_id']").val(r.id)
form.find("[name='name']").val(r.name)
form.find("[name='email']").val(r.email)
form.find("[name='image_url']").val(image_url)
that.trackEvent("User", "LOGIN", "SOCIAL:" + provider)
form.submit()
});
}, function( e ){
var msg = form.data("error-message") || "Unable to signin..."
status.removeClass("alert alert-success")
.addClass("alert alert-danger").html(msg)
});
})
}
} | PypiClean |
/Leytonium-10.tar.gz/Leytonium-10/diffuse/viewer/undo.py |
# This file is part of Leytonium.
#
# Leytonium is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Leytonium is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Leytonium. If not, see <http://www.gnu.org/licenses/>.
# This file incorporates work covered by the following copyright and
# permission notice:
# Copyright (C) 2006-2019 Derrick Moser <[email protected]>
# Copyright (C) 2015-2020 Romain Failliot <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the license, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. You may also obtain a copy of the GNU General Public License
# from the Free Software Foundation by visiting their web site
# (http://www.fsf.org/) or by writing to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# Undo for changes to the cached line ending style
class SetFormatUndo:
def __init__(self, f, format, old_format):
self.data = (f, format, old_format)
def undo(self, viewer):
f, _, old_format = self.data
viewer.setFormat(f, old_format)
def redo(self, viewer):
f, format, _ = self.data
viewer.setFormat(f, format)
# Undo for the creation of Line objects
class InstanceLineUndo:
def __init__(self, f, i, reverse):
self.data = (f, i, reverse)
def undo(self, viewer):
f, i, reverse = self.data
viewer.instanceLine(f, i, not reverse)
def redo(self, viewer):
f, i, reverse = self.data
viewer.instanceLine(f, i, reverse)
# Undo for changing the text for a Line object
class UpdateLineTextUndo:
def __init__(self, f, i, old_is_modified, old_text, is_modified, text):
self.data = (f, i, old_is_modified, old_text, is_modified, text)
def undo(self, viewer):
f, i, old_is_modified, old_text, _, _ = self.data
viewer.updateLineText(f, i, old_is_modified, old_text)
def redo(self, viewer):
f, i, _, _, is_modified, text = self.data
viewer.updateLineText(f, i, is_modified, text)
# Undo for inserting a spacing line in a single pane
class InsertNullUndo:
def __init__(self, f, i, reverse):
self.data = (f, i, reverse)
def undo(self, viewer):
f, i, reverse = self.data
viewer.insertNull(f, i, not reverse)
def redo(self, viewer):
f, i, reverse = self.data
viewer.insertNull(f, i, reverse)
# Undo for manipulating a section of the line matching data
class InvalidateLineMatchingUndo:
def __init__(self, i, n, new_n):
self.data = (i, n, new_n)
def undo(self, viewer):
i, n, new_n = self.data
viewer.invalidateLineMatching(i, new_n, n)
def redo(self, viewer):
i, n, new_n = self.data
viewer.invalidateLineMatching(i, n, new_n)
# Undo for alignment changes
class AlignmentChangeUndo:
def __init__(self, finished):
self.data = finished
def undo(self, viewer):
finished = self.data
viewer.alignmentChange(not finished)
def redo(self, viewer):
finished = self.data
viewer.alignmentChange(finished)
# Undo for changing how lines are cut into blocks for alignment
class UpdateBlocksUndo:
def __init__(self, old_blocks, blocks):
self.data = (old_blocks, blocks)
def undo(self, viewer):
old_blocks, _ = self.data
viewer.updateBlocks(old_blocks)
def redo(self, viewer):
_, blocks = self.data
viewer.updateBlocks(blocks)
# Undo for replacing the lines for a single pane with a new set
class ReplaceLinesUndo:
def __init__(self, f, lines, new_lines, max_num, new_max_num):
self.data = (f, lines, new_lines, max_num, new_max_num)
def undo(self, viewer):
f, lines, new_lines, max_num, new_max_num = self.data
viewer.replaceLines(f, new_lines, lines, new_max_num, max_num)
def redo(self, viewer):
f, lines, new_lines, max_num, new_max_num = self.data
viewer.replaceLines(f, lines, new_lines, max_num, new_max_num)
# Undo for changing the selection mode and range
class EditModeUndo:
def __init__(self, mode, current_pane, current_line, current_char, selection_line, selection_char, cursor_column):
self.data = (mode, current_pane, current_line, current_char, selection_line, selection_char, cursor_column)
def undo(self, viewer):
mode, current_pane, current_line, current_char, selection_line, selection_char, cursor_column = self.data
viewer.setEditMode(mode, current_pane, current_line, current_char, selection_line, selection_char, cursor_column)
def redo(self, viewer):
self.undo(viewer)
# Undo for changes to the pane ordering
class SwapPanesUndo:
def __init__(self, f_dst, f_src):
self.data = (f_dst, f_src)
def undo(self, viewer):
f_dst, f_src = self.data
viewer.swapPanes(f_src, f_dst)
def redo(self, viewer):
f_dst, f_src = self.data
viewer.swapPanes(f_dst, f_src) | PypiClean |
/CGF-0.0.7-py3-none-any.whl/CGFCore/opengameframework/deterministic.py | import time
import matplotlib.pyplot as plt
import numpy as np
class OpenGame:
def __init__(self, players):
self.players = players
self.strategies = {player: None for player in players}
self.history = []
self.future = []
def set_strategy(self, player, strategy):
self.strategies[player] = strategy
def play(self, history):
raise NotImplementedError("Each game must implement its own play function.")
def coplay(self, future, outcome):
raise NotImplementedError("Each game must implement its own coplay function.")
def utility(self, history, future, outcome, player):
raise NotImplementedError("Each game must implement its own utility function.")
class PrisonersDilemma(OpenGame):
def __init__(self, agents):
player_names = [f"player{i+1}" for i in range(len(agents))]
super().__init__(player_names)
self.agents = agents
self.payoffs = {("cooperate", "cooperate"): {player: 2 + np.random.normal(0, 0.1) for player in player_names},
("cooperate", "defect"): {player: 0 if player == "player1" else 3 for player in player_names},
("defect", "cooperate"): {player: 3 if player == "player1" else 0 for player in player_names},
("defect", "defect"): {player: 1 for player in player_names}
}
def play(self, history):
state = tuple(history)
actions = [agent.choose_action(state) for agent in self.agents]
return tuple(actions)
def coplay(self, future, outcome):
return future
def feedback(self, outcome):
rewards = [self.payoffs[outcome][player] for player in self.players]
return rewards
def choose_action(self, state):
prob_cooperate = 0.5 + 0.01 * len(self.history) # As an example, increase the probability of cooperation over time
return np.random.choice(["cooperate", "defect"], p=[prob_cooperate, 1-prob_cooperate])
def utility(self, history, future, outcome, player):
return self.payoffs[outcome][player]
class CompositeOpenGame(OpenGame):
def __init__(self, game1, game2, composition_type="sequential"):
super().__init__(game1.players + game2.players)
self.game1 = game1
self.game2 = game2
self.composition_type = composition_type
def play(self, history):
if np.random.rand() > 0.5:
self.composition_type = "sequential"
else:
self.composition_type = "parallel"
if self.composition_type == "sequential":
outcome1 = self.game1.play(history)
outcome2 = self.game2.play(history + [outcome1])
return outcome1, outcome2
elif self.composition_type == "parallel":
outcome1 = self.game1.play(history)
outcome2 = self.game2.play(history)
return outcome1, outcome2
else:
raise ValueError("Invalid composition type")
def coplay(self, future, outcome):
future1, future2 = future
outcome1, outcome2 = outcome
coplayed_future1 = self.game1.coplay(future1, outcome1)
coplayed_future2 = self.game2.coplay(future2, outcome2)
return coplayed_future1, coplayed_future2
def utility(self, history, future, outcome, player):
outcome1, outcome2 = outcome
if player in self.game1.players:
coplayed_future1, _ = self.coplay(future, outcome)
return self.game1.utility(history, coplayed_future1, outcome1, player)
elif player in self.game2.players:
_, coplayed_future2 = self.coplay(future, outcome)
return self.game2.utility(history, coplayed_future2, outcome2, player)
else:
raise ValueError(f"{player} is not a player in this composite game")
class RLAgent:
def __init__(self, actions):
self.actions = actions
self.q_table = {}
self.learning_rate = 0.1
self.discount_factor = 0.9
self.epsilon = 0.1
def choose_action(self, state):
if np.random.uniform(0, 1) < self.epsilon:
return np.random.choice(self.actions)
q_values = [self.get_q_value(state, action) for action in self.actions]
return self.actions[np.argmax(q_values)]
def get_q_value(self, state, action):
return self.q_table.get((state, action), 0)
def learn(self, state, action, reward, next_state):
predict = self.get_q_value(state, action)
target = reward + self.discount_factor * np.max([self.get_q_value(next_state, next_action) for next_action in self.actions])
self.q_table[(state, action)] = predict + self.learning_rate * (target - predict)
self.learning_rate = 0.1 + np.random.normal(0, 0.01)
def train_rl_agents(episodes=None):
if not episodes:
episodes = 1000 + np.random.randint(-100, 100)
agents = [RLAgent(actions=["cooperate", "defect"]) for _ in range(2)]
game = PrisonersDilemma(agents)
rewards_over_time = [] # Store average rewards for each episode
action_counts = {"cooperate": 0, "defect": 0}
for episode in range(episodes):
history = ["previous_play"]
outcome = game.play(history)
rewards = game.feedback(outcome)
avg_reward = np.mean(rewards) # Calculate average reward for the episode
rewards_over_time.append(avg_reward) # Append to the list
for agent, reward in zip(agents, rewards):
state = tuple(history)
action = agent.choose_action(state)
action_counts[action] += 1
next_history = history + list(outcome)
next_state = tuple(next_history)
agent.learn(state, action, reward, next_state)
history = next_history
return agents, rewards_over_time, action_counts
trained_agents, rewards_over_time, action_counts = train_rl_agents()
def test_open_games():
# Create RLAgent instances
agent1 = RLAgent(actions=["cooperate", "defect"])
agent2 = RLAgent(actions=["cooperate", "defect"])
# Test PrisonersDilemma with RLAgent instances
game = PrisonersDilemma([agent1, agent2])
history = ["previous_play"]
outcome = game.play(history)
future = game.coplay(["potential_future"], outcome)
utilities = {player: game.utility(history, future, outcome, player) for player in game.players}
print(f"Outcome: {outcome}")
print(f"Utilities: {utilities}")
test_open_games()
def test_composite_open_games():
# Create RLAgent instances
agent1 = RLAgent(actions=["cooperate", "defect"])
agent2 = RLAgent(actions=["cooperate", "defect"])
agent3 = RLAgent(actions=["cooperate", "defect"])
agent4 = RLAgent(actions=["cooperate", "defect"])
game1 = PrisonersDilemma([agent1, agent2])
game2 = PrisonersDilemma([agent3, agent4])
composite_game = CompositeOpenGame(game1, game2, "parallel")
history = ["previous_play"]
outcome = composite_game.play(history)
future = composite_game.coplay(["potential_future1", "potential_future2"], outcome)
utilities = {player: composite_game.utility(history, future, outcome, player) for player in composite_game.players}
print(f"Outcome: {outcome}")
print(f"Utilities: {utilities}")
test_composite_open_games()
# Outside the function, you can measure the time and call the function:
start_time_train = time.time()
trained_agents = train_rl_agents()
end_time_train = time.time()
print(f"Training Execution time: {end_time_train - start_time_train} seconds")
# Plotting the average reward over time
plt.figure() # Create a new figure for the first plot
plt.plot(rewards_over_time)
plt.xlabel('Episode')
plt.ylabel('Average Reward')
plt.title('Average Reward per Episode')
plt.tight_layout() # Adjust layout to ensure no overlap
plt.show()
# Plotting the action counts
plt.figure() # Create a new figure for the second plot
actions = list(action_counts.keys())
counts = list(action_counts.values())
plt.bar(actions, counts)
plt.xlabel('Action')
plt.ylabel('Count')
plt.title('Action Distribution')
plt.tight_layout() # Adjust layout to ensure no overlap
plt.show() | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_dyo.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"Dimas",
"Tene\u014b",
"Talata",
"Alarbay",
"Aramisay",
"Arjuma",
"Sibiti"
],
"MONTH": [
"Sanvie",
"F\u00e9birie",
"Mars",
"Aburil",
"Mee",
"Sue\u014b",
"S\u00fauyee",
"Ut",
"Settembar",
"Oktobar",
"Novembar",
"Disambar"
],
"SHORTDAY": [
"Dim",
"Ten",
"Tal",
"Ala",
"Ara",
"Arj",
"Sib"
],
"SHORTMONTH": [
"Sa",
"Fe",
"Ma",
"Ab",
"Me",
"Su",
"S\u00fa",
"Ut",
"Se",
"Ok",
"No",
"De"
],
"fullDate": "EEEE d MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y HH:mm:ss",
"mediumDate": "d MMM y",
"mediumTime": "HH:mm:ss",
"short": "d/M/y HH:mm",
"shortDate": "d/M/y",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "CFA",
"DECIMAL_SEP": ",",
"GROUP_SEP": "\u00a0",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "dyo",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/FFTA-0.3.5.1-py3-none-any.whl/ffta/gkpfm/transfer_func.py | import time
from math import pi
import numpy as np
import pandas as pd
import pyUSID as usid
from igor2 import binarywave as bw
from matplotlib import pyplot as plt
from pyUSID import Dimension
from scipy import signal as sg
import ffta
from ..analysis.fft import get_noise_floor
def transfer_function(h5_file, tf_file='', params_file='',
psd_freq=1e6, offset=0.0016, sample_freq=10e6,
plot=False):
'''
Reads in the transfer function .ibw, then creates two datasets within
a parent folder 'Transfer_Function'
This will destructively overwrite an existing Transfer Function in there
1) TF (transfer function)
2) Freq (frequency axis for computing Fourier Transforms)
:param h5_file:
:type h5_file:
:param tf_file: Transfer Function .ibw File
:type tf_file: ibw
:param params_file: The filepath in string format for the parameters file containing
Q, AMPINVOLS, etc.
:type params_file: string
:param psd_freq: The maximum range of the Power Spectral Density.
For Asylum Thermal Tunes, this is often 1 MHz on MFPs and 2 MHz on Cyphers
:type psd_freq: float
:param offset: To avoid divide-by-zero effects since we will divide by the transfer function
when generating GKPFM data
:type offset: float
:param sample_freq: The desired output sampling. This should match your data.
:type sample_freq: float
:param plot:
:type plot: bool, optional
:returns: the Transfer Function group
:rtype:
'''
if not any(tf_file):
tf_file = usid.io_utils.file_dialog(caption='Select Transfer Function file ',
file_filter='IBW Files (*.ibw)')
data = bw.load(tf_file)
tf = data.get('wave').get('wData')
if 'Transfer_Function' in h5_file:
del h5_file['/Transfer_Function']
h5_file.create_group('Transfer_Function')
h5_file['Transfer_Function'].create_dataset('TF', data=tf)
freq = np.linspace(0, psd_freq, len(tf))
h5_file['Transfer_Function'].create_dataset('Freq', data=freq)
parms = params_list(params_file, psd_freq=psd_freq)
for k in parms:
h5_file['Transfer_Function'].attrs[k] = float(parms[k])
tfnorm = float(parms['Q']) * (tf - np.min(tf)) / (np.max(tf) - np.min(tf))
tfnorm += offset
h5_file['Transfer_Function'].create_dataset('TFnorm', data=tfnorm)
TFN_RS, FQ_RS = resample_tf(h5_file, psd_freq=psd_freq, sample_freq=sample_freq)
TFN_RS = float(parms['Q']) * (TFN_RS - np.min(TFN_RS)) / (np.max(TFN_RS) - np.min(TFN_RS))
TFN_RS += offset
h5_file['Transfer_Function'].create_dataset('TFnorm_resampled', data=TFN_RS)
h5_file['Transfer_Function'].create_dataset('Freq_resampled', data=FQ_RS)
if plot:
plt.figure()
plt.plot(freq, tfnorm, 'b')
plt.plot(FQ_RS, TFN_RS, 'r')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude (m)')
plt.yscale('log')
plt.title('Transfer Function')
return h5_file['Transfer_Function']
def resample_tf(h5_file, psd_freq=1e6, sample_freq=10e6):
'''
Resamples the Transfer Function based on the desired target frequency
This is important for dividing the transfer function elements together
:param h5_file:
:type h5_file:
:param psd_freq: The maximum range of the Power Spectral Density.
For Asylum Thermal Tunes, this is often 1 MHz on MFPs and 2 MHz on Cyphers
:type psd_freq: float
:param sample_freq: The desired output sampling. This should match your data.
:type sample_freq: float
:returns: tuple (TFN_RS, FQ_RS)
WHERE
[type] TFN_RS is...
[type] FQ_RS is...
'''
TFN = h5_file['Transfer_Function/TFnorm'][()]
# FQ = h5_file['Transfer_Function/Freq'][()]
# Generate the iFFT from the thermal tune data
tfn = np.fft.ifft(TFN)
# tq = np.linspace(0, 1/np.abs(FQ[1] - FQ[0]), len(tfn))
# Resample
scale = int(sample_freq / psd_freq)
print('Rescaling by', scale, 'X')
tfn_rs = sg.resample(tfn, len(tfn) * scale) # from 1 MHz to 10 MHz
TFN_RS = np.fft.fft(tfn_rs)
FQ_RS = np.linspace(0, sample_freq, len(tfn_rs))
return TFN_RS, FQ_RS
def params_list(path='', psd_freq=1e6, lift=50):
'''
Reads in a Parameters file as saved in Igor as a dictionary
For use in creating attributes of transfer Function
:param path:
:type path: str
:param psd_freq:
:type psd_freq:
:param lift:
:type lift:
:returns: parameters dictionary
:rtype: dict
'''
if not any(path):
path = usid.io.io_utils.file_dialog(caption='Select Parameters Files ',
file_filter='Text (*.txt)')
df = pd.read_csv(path, sep='\t', header=1)
df = df.set_index(df['Unnamed: 0'])
df = df.drop(columns='Unnamed: 0')
parm_dict = df.to_dict()['Initial']
parm_dict['PSDFreq'] = psd_freq
parm_dict['Lift'] = lift
return parm_dict
def test_Ycalc(h5_main, pixel_ind=[0, 0], transfer_func=None, resampled=True, ratios=None,
verbose=True, noise_floor=1e-3, phase=-pi, plot=False, scaling=1):
'''
Divides the response by the transfer function
:param h5_main:
:type h5_main: h5py dataset of USIDataset
:param pixel_ind:
:type pixel_ind: list
:param transfer_func: This can be the resampled or normal transfer function
For best results, use the "normalized" transfer function
"None" will default to /Transfer_Function folder
:type transfer_func: transfer function, optional
:param resampled: Whether to use the upsampled Transfer Function or the original
:type resampled: bool, optional
:param ratios:
:type ratios:
:param verbose: Gives user feedback during processing
:type verbose: bool, optional
:param noise_floor: For calculating what values to filter as the noise floor of the data
0 or None circumvents this
:type noise_floor: float, optional
:param phase: Practically any value between -pi and +pi works
:type phase: float, optional
:param plot:
:type plot: bool, optional
:param scaling: scales the transfer function by this number if, for example, the TF was
acquired on a line and you're dividing by a point (or vice versa)'
:type scaling: int, optional
:returns: tuple (TFratios, Yout, yout)
WHERE
[type] TFratios is...
[type] Yout is...
[type] yout is...
'''
t0 = time.time()
parm_dict = usid.hdf_utils.get_attributes(h5_main)
drive_freq = parm_dict['drive_freq']
response = ffta.hdf_utils.get_utils.get_pixel(h5_main, pixel_ind, array_form=True, transpose=False).flatten()
response -= np.mean(response)
RESP = np.fft.fft(response)
Yout = np.zeros(len(RESP), dtype=complex)
# Create frequency axis for the pixel
samp = parm_dict['sampling_rate']
fq_y = np.linspace(0, samp, len(Yout))
noise_limit = np.ceil(get_noise_floor(RESP, noise_floor)[0])
# Get the transfer function and transfer function frequency values
fq_tf = h5_main.file['Transfer_Function/Freq'][()]
if not transfer_func:
if resampled:
transfer_func = h5_main.file['Transfer_Function/TFnorm_resampled'][()]
fq_tf = h5_main.file['Transfer_Function/Freq_resampled'][()]
else:
transfer_func = h5_main.file['Transfer_Function/TFnorm'][()]
if verbose:
t1 = time.time()
print('Time for pixels:', t1 - t0)
Yout_divided = np.zeros(len(RESP), dtype=bool)
TFratios = np.ones(len(RESP))
# Calculate the TF scaled to the sample size of response function
for x, f in zip(transfer_func, fq_tf):
if np.abs(x) > noise_floor:
xx = np.searchsorted(fq_y, f)
if not Yout_divided[xx]:
TFratios[xx] = x
TFratios[-xx] = x
Yout_divided[xx] = True
signal_bins = np.arange(len(TFratios))
signal_kill = np.where(np.abs(RESP) < noise_limit)
pass_frequencies = np.delete(signal_bins, signal_kill)
drive_bin = (np.abs(fq_y - drive_freq)).argmin()
RESP_ph = (RESP) * np.exp(-1j * fq_y / (fq_y[drive_bin]) * phase)
# Step 3C) iFFT the response above a user defined noise floor to recover Force in time domain.
Yout[pass_frequencies] = RESP_ph[pass_frequencies]
Yout = Yout / (TFratios * scaling)
yout = np.real(np.fft.ifft(np.fft.ifftshift(Yout)))
if verbose:
t2 = time.time()
print('Time for pixels:', t2 - t1)
if plot:
fig, ax = plt.subplots(figsize=(12, 7))
ax.semilogy(fq_y, np.abs(Yout), 'b', label='F3R')
ax.semilogy(fq_y[signal_bins], np.abs(Yout[signal_bins]), 'og', label='F3R')
ax.semilogy(fq_y[signal_bins], np.abs(RESP[signal_bins]), '.r', label='Response')
ax.set_xlabel('Frequency (kHz)', fontsize=16)
ax.set_ylabel('Amplitude (a.u.)', fontsize=16)
ax.legend(fontsize=14)
ax.set_yscale('log')
ax.set_xlim(0, 3 * drive_freq)
ax.set_title('Noise Spectrum', fontsize=16)
return TFratios, Yout, yout
def Y_calc(h5_main, transfer_func=None, resampled=True, ratios=None, verbose=False,
noise_floor=1e-3, phase=-pi, plot=False, scaling=1):
'''
Divides the response by the transfer function
:param h5_main:
:type h5_main: h5py dataset of USIDataset
:param transfer_func: This can be supplied or use the calculated version
For best results, use the "normalized" transfer function
"None" will default to /Transfer_Function folder
:type transfer_func: transfer function, optional
:param resampled: Whether to use the upsampled Transfer Function or the original
:type resampled: bool, optional
:param ratios: Array of the size of h5_main (1-D) with the transfer function data
If not given, it's found via the test_Y_calc function
:type ratios: array, optional
:param verbose: Gives user feedback during processing
:type verbose: bool, optional
:param noise_floor: For calculating what values to filter as the noise floor of the data
0 or None circumvents this
:type noise_floor: float, optional
:param phase: Practically any value between -pi and +pi works
:type phase: float, optional
:param plot:
:type plot: bool, optional
:param scaling: scales the transfer function by this number if, for example, the TF was
acquired on a line and you're dividing by a point (or vice versa)'
scaling : int, optional
:returns: tuple (Yout, yout)
WHERE
[type] Yout is...
[type] yout is...
'''
parm_dict = usid.hdf_utils.get_attributes(h5_main)
drive_freq = parm_dict['drive_freq']
ds = h5_main[()]
Yout = np.zeros(ds.shape, dtype=complex)
yout = np.zeros(ds.shape)
# Create frequency axis for the pixel
samp = parm_dict['sampling_rate']
fq_y = np.linspace(0, samp, Yout.shape[1])
response = ds[0, :]
response -= np.mean(response)
RESP = np.fft.fft(response)
noise_limit = np.ceil(get_noise_floor(RESP, noise_floor)[0])
# Get the transfer function and transfer function frequency values
# Use test calc to scale the transfer function to the correct size
if not transfer_func:
if resampled:
transfer_func, _, _ = test_Ycalc(h5_main, resampled=True,
verbose=verbose, noise_floor=noise_floor)
else:
transfer_func, _, _ = test_Ycalc(h5_main, resampled=False,
verbose=verbose, noise_floor=noise_floor)
import time
t0 = time.time()
signal_bins = np.arange(len(transfer_func))
for c in np.arange(h5_main.shape[0]):
if verbose:
if c % 100 == 0:
print('Pixel:', c)
response = ds[c, :]
response -= np.mean(response)
RESP = np.fft.fft(response)
signal_kill = np.where(np.abs(RESP) < noise_limit)
pass_frequencies = np.delete(signal_bins, signal_kill)
drive_bin = (np.abs(fq_y - drive_freq)).argmin()
RESP_ph = (RESP) * np.exp(-1j * fq_y / (fq_y[drive_bin]) * phase)
Yout[c, pass_frequencies] = RESP_ph[pass_frequencies]
Yout[c, :] = Yout[c, :] / (transfer_func * scaling)
yout[c, :] = np.real(np.fft.ifft(Yout[c, :]))
t1 = time.time()
print('Time for pixels:', t1 - t0)
return Yout, yout
def check_phase(h5_main, transfer_func, phase_list=[-pi, -pi / 2, 0],
plot=True, noise_tolerance=1e-6, samp_rate=10e6):
'''
Uses the list of phases in phase_list to plot the various phase offsets
relative to the driving excitation
:param h5_main:
:type h5_main: h5py dataset of USIDataset
:param transfer_func: This can be supplied or use the calculated version
For best results, use the "normalized" transfer function
"None" will default to /Transfer_Function folder
:type transfer_func: transfer function, optional
:param phase_list:
:type phase_list: List of float
:param plot:
:type plot: bool, optional
:param noise_tolerance:
:type noise_tolerance:
:param samp_rate:
:type samp_rate:
'''
ph = -3.492 # phase from cable delays between excitation and response
row_ind = 0
test_row = np.fft.fftshift(np.fft.fft(h5_main[row_ind]))
noise_floor = get_noise_floor(test_row, noise_tolerance)[0]
print('Noise floor = ', noise_floor)
Noiselimit = np.ceil(noise_floor)
parm_dict = usid.hdf_utils.get_attributes(h5_main)
drive_freq = parm_dict['drive_freq']
freq = np.arange(-samp_rate / 2, samp_rate / 2, samp_rate / len(test_row))
tx = np.arange(0, parm_dict['total_time'], parm_dict['total_time'] / len(freq))
exc_params = {'ac': 1, 'dc': 0, 'phase': 0, 'frequency': drive_freq}
exc_params['ac']
excitation = (exc_params['ac'] * np.sin(tx * 2 * pi * exc_params['frequency'] \
+ exc_params['phase']) + exc_params['dc'])
for ph in phase_list:
# Try Force Conversion on Filtered data of single line (row_ind above)
G_line = np.zeros(freq.size, dtype=complex) # G = raw
G_wPhase_line = np.zeros(freq.size, dtype=complex) # G_wphase = phase-shifted
signal_ind_vec = np.arange(freq.size)
ind_drive = (np.abs(freq - drive_freq)).argmin()
# filt_line is from filtered data above
test_line = test_row - np.mean(test_row)
test_line = np.fft.fftshift(np.fft.fft(test_line))
signal_kill = np.where(np.abs(test_line) < Noiselimit)
signal_ind_vec = np.delete(signal_ind_vec, signal_kill)
# Original/raw data; TF_norm is from the Tune file transfer function
G_line[signal_ind_vec] = test_line[signal_ind_vec]
G_line = (G_line / transfer_func)
G_time_line = np.real(np.fft.ifft(np.fft.ifftshift(G_line))) # time-domain
# Phase-shifted data
test_shifted = (test_line) * np.exp(-1j * freq / (freq[ind_drive]) * ph)
G_wPhase_line[signal_ind_vec] = test_shifted[signal_ind_vec]
G_wPhase_line = (G_wPhase_line / transfer_func)
G_wPhase_time_line = np.real(np.fft.ifft(np.fft.ifftshift(G_wPhase_line)))
phaseshifted = np.reshape(G_wPhase_time_line, (parm_dict['num_cols'], parm_dict['num_rows']))
fig, axes = usid.plot_utils.plot_curves(excitation, phaseshifted, use_rainbow_plots=True,
x_label='Voltage (Vac)', title='Phase Shifted',
num_plots=4, y_label='Deflection (a.u.)')
axes[0][0].set_title('Phase ' + str(ph))
return
def save_Yout(h5_main, Yout, yout):
'''
Writes the results to teh HDF5 file
:param h5_main:
:type h5_main: h5py dataset of USIDataset
:param Yout:
:type Yout:
:param yout:
:type yout:
'''
parm_dict = usid.hdf_utils.get_attributes(h5_main)
# Get relevant parameters
num_rows = parm_dict['num_rows']
num_cols = parm_dict['num_cols']
pnts_per_avg = parm_dict['pnts_per_avg']
h5_gp = h5_main.parent
h5_meas_group = usid.hdf_utils.create_indexed_group(h5_gp, 'GKPFM_Frequency')
# Create dimensions
pos_desc = [Dimension('X', 'm', np.linspace(0, parm_dict['FastScanSize'], num_cols)),
Dimension('Y', 'm', np.linspace(0, parm_dict['SlowScanSize'], num_rows))]
# ds_pos_ind, ds_pos_val = build_ind_val_matrices(pos_desc, is_spectral=False)
spec_desc = [Dimension('Frequency', 'Hz', np.linspace(0, parm_dict['sampling_rate'], pnts_per_avg))]
# ds_spec_inds, ds_spec_vals = build_ind_val_matrices(spec_desc, is_spectral=True)
# Writes main dataset
h5_y = usid.hdf_utils.write_main_dataset(h5_meas_group,
Yout,
'Y', # Name of main dataset
'Deflection', # Physical quantity contained in Main dataset
'V', # Units for the physical quantity
pos_desc, # Position dimensions
spec_desc, # Spectroscopic dimensions
dtype=np.cdouble, # data type / precision
main_dset_attrs=parm_dict)
usid.hdf_utils.copy_attributes(h5_y, h5_gp)
h5_meas_group = usid.hdf_utils.create_indexed_group(h5_gp, 'GKPFM_Time')
spec_desc = [Dimension('Time', 's', np.linspace(0, parm_dict['total_time'], pnts_per_avg))]
h5_y = usid.hdf_utils.write_main_dataset(h5_meas_group,
yout,
'y_time', # Name of main dataset
'Deflection', # Physical quantity contained in Main dataset
'V', # Units for the physical quantity
pos_desc, # Position dimensions
spec_desc, # Spectroscopic dimensions
dtype=np.float32, # data type / precision
main_dset_attrs=parm_dict)
usid.hdf_utils.copy_attributes(h5_y, h5_gp)
h5_y.file.flush()
return
def check_response(h5_main, pixel=0, ph=0):
"""
:param h5_main:
:type h5_main: h5py dataset of USIDataset
:param pixel:
:type pixel:
:param ph:
:type ph:
"""
parm_dict = usid.hdf_utils.get_attributes(h5_main)
freq = parm_dict['drive_freq']
txl = np.linspace(0, parm_dict['total_time'], h5_main[pixel, :].shape[0])
resp_wfm = np.sin(txl * 2 * pi * freq + ph)
plt.figure()
plt.plot(resp_wfm, h5_main[()][pixel, :])
return | PypiClean |
/utils/huetoolkit.py | import os
import numpy as np
import pandas as pd
def check_reserved_field_name(cols):
cols = set(cols)
res = {'file_name', 'load_dt'}
ident = res.intersection(cols)
if len(ident) > 0:
print('Reserved Column Name Exists', ident)
else:
print('No Issue')
def check_dup_columns(cols):
cols_lower = np.array([c.lower() for c in cols])
unq, cnt = np.unique(cols_lower, return_counts=True)
dup = unq[cnt > 1]
dup = [c for c in cols if c.lower() in dup]
print(f'Duplicated Columns: {dup}')
def generate_hue_dict(fp: list, ins: list = None):
"""
Args:
fp (list): [description]
ins (dict, optional): [description]. Defaults to None.
dict_file_name (str, optional): [description].
Defaults to 'prod_audit_header_info.dic'.
"""
def generate_hue_dict_item(fp: str, ins: dict = None):
fc = os.path.basename(fp).split('.')
fn = fc[0]
fe = fc[1]
if fe.lower() != 'csv':
raise IOError(f'File {fp} is not a csv file')
try:
df = pd.read_csv(fp, nrows=1)
except:
raise IOError(f'Error when reading file {fp}')
if ins is None:
col_def = ','.join(
['"' + col + ' string' + '"' for col in df.columns])
else:
col_def = ','.join(['"' + col + f' {ins[col]}' + '"'
if col in ins.keys()
else '"' + col + ' string' + '"'
for col in df.columns])
dict_rec = '"' + fn + '":[' + col_def + ']'
return dict_rec
dict_items = []
for p, i in zip(fp, ins):
dict_items.append(generate_hue_dict_item(p, i))
dict_items = ',\n'.join(dict_items)
dict_export = '{\n' + dict_items + '\n}'
return dict_export
def generate_hue_csv(file_dir: list, file_name: list, table_name: list):
cols = ['flow_type', 'lob', 'subject_area', 'ingestion_type', 'load_type',
'is_control_file', 'destination_type', 'source_db', 'source_table',
'target_table', 'target_db', 'lake_db', 'select_columns',
'lake_partitions', 'partition_deriving_columns', 'lake_partition_logic',
'bucket_columns', 'no_of_buckets', 'storage_type', 'lake_storage_type',
'no_of_mappers', 'file_format', 'header_avail', 'part_file',
'delimiter', 'mask_columns', 'timestamp_format', 'split_column']
df = pd.DataFrame(columns=cols)
def _add_row(df, file_dir, file_name, table_name):
new_row = np.array(
['fs_hv', 'audit', 'audit', 'file-based', 'full', 'N', 'hive-table',
file_dir,
file_name,
table_name,
'p_audit_users_db',
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
'parquetfile', 'parquet', 8, 'CSV', True, False, ',', np.nan,
'%Y%m%d%H%M%S', np.nan], dtype=object)
df.loc[df.shape[0], :] = new_row
return df
for i in range(len(file_dir)):
df = _add_row(df, file_dir[i], file_name[i], table_name[i])
return df
def prepare_hue_files(path_file_in: list, file_dir: list,
table_name: list, ins: list):
dir_file_out = os.path.dirname(path_file_in[0])
file_name = [os.path.basename(fn).split('.')[0] for fn in path_file_in]
try:
dict_file_path = os.path.join(
dir_file_out, 'prod_audit_header_info.dic')
dict_export = generate_hue_dict(path_file_in, ins)
with open(dict_file_path, 'w') as f:
f.write(dict_export)
print(f'HUE DICT was generated: {dict_file_path}')
except:
print('Error - HUE DICT')
try:
csv_file_path = os.path.join(dir_file_out, 'prod_audit_full.csv')
df_csv = generate_hue_csv(file_dir, file_name, table_name)
df_csv.to_csv(csv_file_path, index=False)
print(f'HUE CSV was generated: {csv_file_path}')
except:
print('Error - HUE CSV') | PypiClean |
/DOCBRPY-0.1.3.tar.gz/DOCBRPY-0.1.3/src/PYBRDOC/cnpj.py | import re
import requests
import json
from itertools import chain
from random import randint
from .documentoidentificacao import DocumentoIdentificacao
class Cnpj(DocumentoIdentificacao):
"""docstring for Cnpj"""
def __init__(self, arg):
super().__init__(arg)
def __str__(self):
"""
Formatará uma string CNPJ somente de números formatada adequadamente, adicionando visual de formatação padrão
símbolos de ajuda para exibição.
Se a string do CNPJ for menor que 14 dígitos ou contiver caracteres que não sejam dígitos, retornará o valor bruto
Cadeia CNPJ não formatada.
"""
if self.rawValue == None: return str()
x = self.rawValue
if not x.isdigit() or len(x) != 14 or len(set(x)) == 1: return self.rawValue
return '{}.{}.{}/{}-{}'.format(x[:2], x[2:5], x[5:8], x[8:12], x[12:])
@property
def isValid(self):
"""
Retorna se os dígitos de checksum de verificação do `cnpj` fornecido correspondem ou não ao seu número base.
A entrada deve ser uma string de dígitos de comprimento adequado.
"""
return ValidadorCnpj.validar(self)
class ValidadorCnpj(object):
"""docstring for ValidadorCnpj"""
def __call__(self, value):
return ValidadorCnpj.validar(value)
def __validarCnpj(self, arg):
return self.__validarStr(arg.rawValue)
def __validarStr(self, arg):
if arg == None:
return False
p = re.compile('[^0-9]')
x = p.sub('', arg)
if len(x) != 14 or len(set(x)) == 1: return False
return all(self.__hashDigit(x, i + 13) == int(v) for i, v in enumerate(x[12:]))
def __hashDigit(self, cnpj, position): # type: (str, int) -> int
"""
Calculará o dígito de soma de verificação `position` fornecido para a entrada `cnpj`. A entrada deve conter
todos os elementos anteriores a `position` senão a computação produzirá o resultado errado.
"""
weighten = chain(range(position - 8, 1, -1), range(9, 1, -1))
val = sum(int(digit) * weight for digit, weight in zip(cnpj, weighten)) % 11
return 0 if val < 2 else 11 - val
@staticmethod
def validar(arg):
v = ValidadorCnpj()
if type(arg) == Cnpj: return v.__validarCnpj(arg)
if type(arg) == str: return v.__validarStr(arg)
return False
def consulta_cnpj(cnpj):
url = "https://www.sintegraws.com.br/api/v1/execute-api.php"
querystring = {"token":"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX","cnpj":"06990590000123","plugin":"RF"}
response = requests.request("GET", url, params=querystring)
print(response.text)
validar_cnpj = ValidadorCnpj()
class GeradorCnpj(object):
def __hashdigit(self, cnpj, position):
"""
Calculará o dígito de soma de verificação `position` fornecido para a entrada `cnpj`. A entrada deve conter
todos os elementos anteriores a `position` senão a computação produzirá o resultado errado.
"""
weighten = chain(range(position - 8, 1, -1), range(9, 1, -1))
val = sum(int(digit) * weight for digit, weight in zip(cnpj, weighten)) % 11
return 0 if val < 2 else 11 - val
def __checksum(self, basenum):
"""
Calculará os dígitos da soma de verificação para um determinado número base do CNPJ. `basenum` precisa ser uma string de dígitos
de comprimento adequado.
"""
digitos = str(self.__hashdigit(basenum, 13))
digitos += str(self.__hashdigit(basenum + digitos, 14))
return digitos
@staticmethod
def gerar(branch = 1):
"""
Gera uma string de dígitos CNPJ válida aleatória. Um parâmetro opcional de número de ramal pode ser fornecido,
o padrão é 1.
"""
branch %= 10000
branch += int(branch == 0)
branch = str(branch).zfill(4)
base = str(randint(0, 99999999)).zfill(8) + branch
while len(set(base)) == 1: base = str(randint(0, 99999999)).zfill(8) + branch
gerador = GeradorCnpj()
return Cnpj(base + gerador.__checksum(base)) | PypiClean |
/0x-sra-client-4.0.0.tar.gz/0x-sra-client-4.0.0/src/zero_ex/sra_client/api_client.py |
from __future__ import absolute_import
import datetime
import json
import mimetypes
from multiprocessing.pool import ThreadPool
import os
import re
import tempfile
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
from zero_ex.sra_client.configuration import Configuration
import zero_ex.sra_client.models
from zero_ex.sra_client import rest
class ApiClient(object):
"""Generic API client for OpenAPI client library builds.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
"int": int,
"long": int if six.PY3 else long, # noqa: F821
"float": float,
"str": str,
"bool": bool,
"date": datetime.date,
"datetime": datetime.datetime,
"object": object,
}
_pool = None
def __init__(
self,
configuration=None,
header_name=None,
header_value=None,
cookie=None,
pool_threads=None,
):
if configuration is None:
configuration = Configuration()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = "OpenAPI-Generator/1.0.0/python"
def __del__(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers["User-Agent"]
@user_agent.setter
def user_agent(self, value):
self.default_headers["User-Agent"] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self,
resource_path,
method,
path_params=None,
query_params=None,
header_params=None,
body=None,
post_params=None,
files=None,
response_type=None,
auth_settings=None,
_return_http_data_only=None,
collection_formats=None,
_preload_content=True,
_request_timeout=None,
):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params["Cookie"] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(
self.parameters_to_tuples(header_params, collection_formats)
)
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(
path_params, collection_formats
)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
"{%s}" % k,
quote(str(v), safe=config.safe_chars_for_path_param),
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(
query_params, collection_formats
)
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(
post_params, collection_formats
)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
url = self.configuration.host + resource_path
# perform request and return response
response_data = self.request(
method,
url,
query_params=query_params,
headers=header_params,
post_params=post_params,
body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
self.last_response = response_data
return_data = response_data
if _preload_content:
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
return return_data
else:
return (
return_data,
response_data.status,
response_data.getheaders(),
)
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [
self.sanitize_for_serialization(sub_obj) for sub_obj in obj
]
elif isinstance(obj, tuple):
return tuple(
self.sanitize_for_serialization(sub_obj) for sub_obj in obj
)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `openapi_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {
obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in six.iteritems(obj.openapi_types)
if getattr(obj, attr) is not None
}
return {
key: self.sanitize_for_serialization(val)
for key, val in six.iteritems(obj_dict)
}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith("list["):
sub_kls = re.match(r"list\[(.*)\]", klass).group(1)
return [
self.__deserialize(sub_data, sub_kls) for sub_data in data
]
if klass.startswith("dict("):
sub_kls = re.match(r"dict\(([^,]*), (.*)\)", klass).group(2)
return {
k: self.__deserialize(v, sub_kls)
for k, v in six.iteritems(data)
}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(zero_ex.sra_client.models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(
self,
resource_path,
method,
path_params=None,
query_params=None,
header_params=None,
body=None,
post_params=None,
files=None,
response_type=None,
auth_settings=None,
async_req=None,
_return_http_data_only=None,
collection_formats=None,
_preload_content=True,
_request_timeout=None,
):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(
resource_path,
method,
path_params,
query_params,
header_params,
body,
post_params,
files,
response_type,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
)
else:
thread = self.pool.apply_async(
self.__call_api,
(
resource_path,
method,
path_params,
query_params,
header_params,
body,
post_params,
files,
response_type,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
),
)
return thread
def request(
self,
method,
url,
query_params=None,
headers=None,
post_params=None,
body=None,
_preload_content=True,
_request_timeout=None,
):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(
url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers,
)
elif method == "HEAD":
return self.rest_client.HEAD(
url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers,
)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(
url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
elif method == "POST":
return self.rest_client.POST(
url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
elif method == "PUT":
return self.rest_client.PUT(
url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
elif method == "PATCH":
return self.rest_client.PATCH(
url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
elif method == "DELETE":
return self.rest_client.DELETE(
url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
else:
raise ValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in (
six.iteritems(params) if isinstance(params, dict) else params
): # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == "multi":
new_params.extend((k, value) for value in v)
else:
if collection_format == "ssv":
delimiter = " "
elif collection_format == "tsv":
delimiter = "\t"
elif collection_format == "pipes":
delimiter = "|"
else: # csv is the default
delimiter = ","
new_params.append(
(k, delimiter.join(str(value) for value in v))
)
else:
new_params.append((k, v))
return new_params
def prepare_post_parameters(self, post_params=None, files=None):
"""Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, "rb") as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (
mimetypes.guess_type(filename)[0]
or "application/octet-stream"
)
params.append(
tuple([k, tuple([filename, filedata, mimetype])])
)
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if "application/json" in accepts:
return "application/json"
else:
return ", ".join(accepts)
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return "application/json"
content_types = [x.lower() for x in content_types]
if "application/json" in content_types or "*/*" in content_types:
return "application/json"
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if not auth_setting["value"]:
continue
elif auth_setting["in"] == "header":
headers[auth_setting["key"]] = auth_setting["value"]
elif auth_setting["in"] == "query":
querys.append((auth_setting["key"], auth_setting["value"]))
else:
raise ValueError(
"Authentication token must be in `query` or `header`"
)
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(
r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition
).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return an original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string),
)
def __deserialize_datatime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object".format(string)
),
)
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if not klass.openapi_types and not hasattr(
klass, "get_real_child_model"
):
return data
kwargs = {}
if klass.openapi_types is not None:
for attr, attr_type in six.iteritems(klass.openapi_types):
if (
data is not None
and klass.attribute_map[attr] in data
and isinstance(data, (list, dict))
):
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
if hasattr(instance, "get_real_child_model"):
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance | PypiClean |
/Office365-REST-Python-Client-2.4.3.tar.gz/Office365-REST-Python-Client-2.4.3/office365/sharepoint/ui/applicationpages/peoplepicker/query_parameters.py | from office365.runtime.client_value import ClientValue
from office365.sharepoint.principal.source import PrincipalSource
from office365.sharepoint.principal.type import PrincipalType
class ClientPeoplePickerQueryParameters(ClientValue):
def __init__(self, query_string, allow_emai_addresses=True, allow_multiple_entities=True,
allow_only_email_addresses=False,
all_url_zones=False, enabled_claim_providers=None, force_claims=False, maximum_entity_suggestions=1,
principal_source=PrincipalSource.All, principal_type=PrincipalType.All, url_zone=0,
url_zone_specified=False, sharepoint_group_id=0):
"""
Specifies the properties of a principal query
:type int urlZone: Specifies a location in the topology of the farm for the principal query.
:param int sharepoint_group_id: specifies a group containing allowed principals to be used in the principal query.
:param str query_string: Specifies the value to be used in the principal query.
:param int principal_type: Specifies the type to be used in the principal query.
:param int principal_source: Specifies the source to be used in the principal query.
:param int maximum_entity_suggestions: Specifies the maximum number of principals to be returned by the
principal query.
:param bool force_claims: Specifies whether the principal query SHOULD be handled by claims providers.
:param bool enabled_claim_providers: Specifies the claims providers to be used in the principal query.
:param bool all_url_zones: Specifies whether the principal query will search all locations in the topology
of the farm.
:param bool allow_only_email_addresses: Specifies whether to allow the picker to resolve only email addresses as
valid entities. This property is only used when AllowEmailAddresses (section 3.2.5.217.1.1.1) is set to True.
Otherwise it is ignored.
:param bool allow_multiple_entities: Specifies whether the principal query allows multiple values.
:param bool allow_emai_addresses: Specifies whether the principal query can return a resolved principal
matching an unverified e-mail address when unable to resolve to a known principal.
"""
super(ClientPeoplePickerQueryParameters, self).__init__()
self.QueryString = query_string
self.AllowEmailAddresses = allow_emai_addresses
self.AllowMultipleEntities = allow_multiple_entities
self.AllowOnlyEmailAddresses = allow_only_email_addresses
self.AllUrlZones = all_url_zones
self.EnabledClaimProviders = enabled_claim_providers
self.ForceClaims = force_claims
self.MaximumEntitySuggestions = maximum_entity_suggestions
self.PrincipalSource = principal_source
self.PrincipalType = principal_type
self.UrlZone = url_zone
self.UrlZoneSpecified = url_zone_specified
self.SharePointGroupID = sharepoint_group_id
@property
def entity_type_name(self):
return "SP.UI.ApplicationPages.ClientPeoplePickerQueryParameters" | PypiClean |
/BitEx-2.0.0b3.zip/BitEx-2.0.0b3/bitex/pairs.py | import logging
# Import Third-Party
# Import Homebrew
# Init Logging Facilities
log = logging.getLogger(__name__)
class PairFormatter:
"""Container Class which features formatting function for all supported
exchanges. These Formatter functions apply any changes to make a given
pair, pased as quote and base currency, compatible with an exchange.
This does NOT include an availability check of the pair.
It is therefore possible to format a given pair, even though it is not
supported by the requested exchange.
"""
def __init__(self, base, quote):
self._base = base
self._quote = quote
self.formatters = {'Kraken': self.kraken_formatter,
'Bitstamp': self.bitstamp_formatter,
'Bitfinex': self.bitfinex_formatter,
'Bittrex': self.bittrex_formatter,
'CoinCheck': self.coincheck_formatter,
'GDAX': self.gdax_formatter,
'ITBit': self.itbit_formatter,
'OKCoin': self.okcoin_formatter,
'BTC-E': self.btce_formatter,
'C-CEX': self.ccex_formatter,
'Cryptopia': self.cryptopia_formatter,
'Gemini': self.gemini_formatter,
'The Rock Trading Ltd.': self.rocktrading_formatter,
'Poloniex': self.poloniex_formatter,
'Quoine': self.quoine_formatter,
'QuadrigaCX': self.quadriga_formatter,
'HitBTC': self.hitbtc_formatter,
'Vaultoro': self.vaultoro_formatter,
'Bter': self.bter_formatter,
'Yunbi': self.yunbi_formatter,
"Binance": self.binance_formatter
}
def __str__(self, *args, **kwargs):
return self._base + self._quote
def __call__(self):
return self.__str__()
def format_for(self, exchange_name):
return self.formatters[exchange_name](self._base, self._quote)
@staticmethod
def kraken_formatter(base, quote):
base = 'XBT' if base == 'BTC' else base
quote = 'XBT' if quote == 'BTC' else quote
def add_prefix(cur):
if 'BCH' in (base, quote):
return cur
elif cur in ('USD', 'EUR', 'GBP', 'JPY', 'CAD'):
return 'Z' + cur
else:
return 'X' + cur
return add_prefix(base) + add_prefix(quote)
@staticmethod
def bitstamp_formatter(base, quote):
return base.lower() + quote.lower()
@staticmethod
def bitfinex_formatter(base, quote):
base = 'DSH' if base == 'DASH' else base
quote = 'DSH' if quote == 'DASH' else quote
return base + quote
@staticmethod
def bittrex_formatter(base, quote):
return quote + '-' + base
@staticmethod
def binance_formatter(base, quote):
return base + quote
@staticmethod
def coincheck_formatter(base, quote):
return base.lower() + '_' + quote.lower()
@staticmethod
def gdax_formatter(base, quote):
return base + '-' + quote
@staticmethod
def itbit_formatter(base, quote):
base = 'XBT' if base == 'BTC' else base
quote = 'XBT' if base == 'BTC' else quote
return base + quote
@staticmethod
def okcoin_formatter(base, quote):
return base.lower() + '_' + quote.lower()
@staticmethod
def btce_formatter(base, quote):
return base.lower() + '_' + quote.lower()
@staticmethod
def ccex_formatter(base, quote):
return base.lower() + '-' + quote.lower()
@staticmethod
def cryptopia_formatter(base, quote):
return base + '_' + quote
@staticmethod
def gemini_formatter(base, quote):
return base.lower() + quote.lower()
@staticmethod
def yunbi_formatter(base, quote):
return base.lower() + quote.lower()
@staticmethod
def rocktrading_formatter(base, quote):
return base + quote
@staticmethod
def poloniex_formatter(base, quote):
if ((quote == 'BTC') or (quote == 'USDT') or
(quote == 'XMR' and not(base == 'BTC' or base == 'USDT'))):
return quote + '_' + base
else:
return base + '_' + quote
@staticmethod
def quoine_formatter(base, quote):
return base + quote
@staticmethod
def quadriga_formatter(base, quote):
return base.lower() + '_' + quote.lower()
@staticmethod
def hitbtc_formatter(base, quote):
return base + quote
@staticmethod
def vaultoro_formatter(base, quote):
return base + '-' + quote
@staticmethod
def bter_formatter(base, quote):
return base.lower() + '_' + quote.lower()
class BTCUSDFormatter(PairFormatter):
def __init__(self):
super(BTCUSDFormatter, self).__init__('BTC', 'USD')
class ETHUSDFormatter(PairFormatter):
def __init__(self):
super(ETHUSDFormatter, self).__init__('ETH', 'USD')
class XMRUSDFormatter(PairFormatter):
def __init__(self):
super(XMRUSDFormatter, self).__init__('XMR', 'USD')
class ETCUSDFormatter(PairFormatter):
def __init__(self):
super(ETCUSDFormatter, self).__init__('ETC', 'USD')
class ZECUSDFormatter(PairFormatter):
def __init__(self):
super(ZECUSDFormatter, self).__init__('ZEC', 'USD')
class DASHUSDFormatter(PairFormatter):
def __init__(self):
super(DASHUSDFormatter, self).__init__('DASH', 'USD')
class BCHUSDFormatter(PairFormatter):
def __init__(self):
super(BCHUSDFormatter, self).__init__('BCH', 'USD')
class ETHBTCFormatter(PairFormatter):
def __init__(self):
super(ETHBTCFormatter, self).__init__('ETH', 'BTC')
class LTCBTCFormatter(PairFormatter):
def __init__(self):
super(LTCBTCFormatter, self).__init__('LTC', 'BTC')
class XMRBTCFormatter(PairFormatter):
def __init__(self):
super(XMRBTCFormatter, self).__init__('XMR', 'BTC')
class ETCBTCFormatter(PairFormatter):
def __init__(self):
super(ETCBTCFormatter, self).__init__('ETC', 'BTC')
class ZECBTCFormatter(PairFormatter):
def __init__(self):
super(ZECBTCFormatter, self).__init__('ZEC', 'BTC')
class DASHBTCFormatter(PairFormatter):
def __init__(self):
super(DASHBTCFormatter, self).__init__('DASH', 'BTC')
class BCHBTCFormatter(PairFormatter):
def __init__(self):
super(BCHBTCFormatter, self).__init__('BCH', 'BTC')
BTCUSD = BTCUSDFormatter()
ETHUSD = ETHUSDFormatter()
XMRUSD = XMRUSDFormatter()
ETCUSD = ETCUSDFormatter()
ZECUSD = ZECUSDFormatter()
DASHUSD = DASHUSDFormatter()
BCHUSD = BCHUSDFormatter()
ETHBTC = ETHBTCFormatter()
LTCBTC = LTCBTCFormatter()
XMRBTC = XMRBTCFormatter()
ETCBTC = ETCBTCFormatter()
ZECBTC = ZECBTCFormatter()
DASHBTC = DASHBTCFormatter()
BCHBTC = BCHBTCFormatter() | PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/contrib/auth/management/__init__.py | import getpass
import unicodedata
from django.apps import apps as global_apps
from django.contrib.auth import get_permission_codename
from django.contrib.contenttypes.management import create_contenttypes
from django.core import exceptions
from django.db import DEFAULT_DB_ALIAS, router
def _get_all_permissions(opts):
"""
Return (codename, name) for all permissions in the given opts.
"""
return [*_get_builtin_permissions(opts), *opts.permissions]
def _get_builtin_permissions(opts):
"""
Return (codename, name) for all autogenerated permissions.
By default, this is ('add', 'change', 'delete', 'view')
"""
perms = []
for action in opts.default_permissions:
perms.append((
get_permission_codename(action, opts),
'Can %s %s' % (action, opts.verbose_name_raw)
))
return perms
def create_permissions(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, apps=global_apps, **kwargs):
if not app_config.models_module:
return
# Ensure that contenttypes are created for this app. Needed if
# 'django.contrib.auth' is in INSTALLED_APPS before
# 'django.contrib.contenttypes'.
create_contenttypes(app_config, verbosity=verbosity, interactive=interactive, using=using, apps=apps, **kwargs)
app_label = app_config.label
try:
app_config = apps.get_app_config(app_label)
ContentType = apps.get_model('contenttypes', 'ContentType')
Permission = apps.get_model('auth', 'Permission')
except LookupError:
return
if not router.allow_migrate_model(using, Permission):
return
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = []
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_config.get_models():
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(using).get_for_model(klass, for_concrete_model=False)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a content_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
all_perms = set(Permission.objects.using(using).filter(
content_type__in=ctypes,
).values_list(
"content_type", "codename"
))
perms = [
Permission(codename=codename, name=name, content_type=ct)
for ct, (codename, name) in searched_perms
if (ct.pk, codename) not in all_perms
]
Permission.objects.using(using).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
def get_system_username():
"""
Return the current system user's username, or an empty string if the
username could not be determined.
"""
try:
result = getpass.getuser()
except (ImportError, KeyError):
# KeyError will be raised by os.getpwuid() (called by getuser())
# if there is no corresponding entry in the /etc/passwd file
# (a very restricted chroot environment, for example).
return ''
return result
def get_default_username(check_db=True):
"""
Try to determine the current system user's username to use as a default.
:param check_db: If ``True``, requires that the username does not match an
existing ``auth.User`` (otherwise returns an empty string).
:returns: The username, or an empty string if no username can be
determined.
"""
# This file is used in apps.py, it should not trigger models import.
from django.contrib.auth import models as auth_app
# If the User model has been swapped out, we can't make any assumptions
# about the default user name.
if auth_app.User._meta.swapped:
return ''
default_username = get_system_username()
try:
default_username = (
unicodedata.normalize('NFKD', default_username)
.encode('ascii', 'ignore').decode('ascii')
.replace(' ', '').lower()
)
except UnicodeDecodeError:
return ''
# Run the username validator
try:
auth_app.User._meta.get_field('username').run_validators(default_username)
except exceptions.ValidationError:
return ''
# Don't return the default username if it is already taken.
if check_db and default_username:
try:
auth_app.User._default_manager.get(username=default_username)
except auth_app.User.DoesNotExist:
pass
else:
return ''
return default_username | PypiClean |
/Mathics-1.0.tar.gz/Mathics-1.0/mathics/builtin/system.py | from __future__ import unicode_literals
from __future__ import absolute_import
import sys
from mathics.core.expression import Expression, String, strip_context
from mathics.builtin.base import Builtin, Predefined
from mathics import version_string
class Version(Predefined):
"""
<dl>
<dt>'$Version'
<dd>returns a string with the current Mathics version and the versions of relevant libraries.
</dl>
>> $Version
= Mathics ...
"""
name = '$Version'
def evaluate(self, evaluation):
return String(version_string.replace('\n', ' '))
class Names(Builtin):
"""
<dl>
<dt>'Names["$pattern$"]'
<dd>returns the list of names matching $pattern$.
</dl>
>> Names["List"]
= {List}
The wildcard '*' matches any character:
>> Names["List*"]
= {List, ListLinePlot, ListPlot, ListQ, Listable}
The wildcard '@' matches only lowercase characters:
>> Names["List@"]
= {Listable}
>> x = 5;
>> Names["Global`*"]
= {x}
The number of built-in symbols:
>> Length[Names["System`*"]]
= ...
#> Length[Names["System`*"]] > 350
= True
"""
def apply(self, pattern, evaluation):
'Names[pattern_]'
pattern = pattern.get_string_value()
if pattern is None:
return
names = set([])
for full_name in evaluation.definitions.get_matching_names(pattern):
short_name = strip_context(full_name)
names.add(short_name if short_name not in names else full_name)
# TODO: Mathematica ignores contexts when it sorts the list of
# names.
return Expression('List', *[String(name) for name in sorted(names)])
class Aborted(Predefined):
"""
<dl>
<dt>'$Aborted'
<dd>is returned by a calculation that has been aborted.
</dl>
"""
name = '$Aborted'
class Failed(Predefined):
"""
<dl>
<dt>'$Failed'
<dd>is returned by some functions in the event of an error.
</dl>
>> Get["nonexistent_file.m"]
: Cannot open nonexistent_file.m.
= $Failed
"""
name = '$Failed'
class CommandLine(Predefined):
'''
<dl>
<dt>'$CommandLine'
<dd>is a list of strings passed on the command line to launch the Mathics session.
</dl>
>> $CommandLine
= {...}
'''
name = '$CommandLine'
def evaluate(self, evaluation):
return Expression('List', *(String(arg) for arg in sys.argv))
class ScriptCommandLine(Predefined):
'''
<dl>
<dt>'$ScriptCommandLine'
<dd>is a list of string arguments when running the kernel is script mode.
</dl>
>> $ScriptCommandLine
= {...}
'''
name = '$ScriptCommandLine'
def evaluate(self, evaluation):
try:
dash_index = sys.argv.index('--')
except ValueError:
# not run in script mode
return Expression('List')
return Expression('List', *(String(arg) for arg in sys.argv[dash_index + 1:])) | PypiClean |
/ConnectorDB-0.3.5.tar.gz/ConnectorDB-0.3.5/connectordb/_stream.py | from __future__ import absolute_import
import json
import os
from ._connectorobject import ConnectorObject
from ._datapointarray import DatapointArray
from jsonschema import Draft4Validator
import json
import time
# https://github.com/oxplot/fysom/issues/1
try:
unicode = unicode
except NameError:
basestring = (str, bytes)
DATAPOINT_INSERT_LIMIT = 5000
def query_maker(t1=None, t2=None, limit=None, i1=None, i2=None, transform=None, downlink=False):
"""query_maker takes the optional arguments and constructs a json query for a stream's
datapoints using it::
#{"t1": 5, "transform": "if $ > 5"}
print query_maker(t1=5,transform="if $ > 5")
"""
params = {}
if t1 is not None:
params["t1"] = t1
if t2 is not None:
params["t2"] = t2
if limit is not None:
params["limit"] = limit
if i1 is not None or i2 is not None:
if len(params) > 0:
raise AssertionError(
"Stream cannot be accessed both by index and by timestamp at the same time.")
if i1 is not None:
params["i1"] = i1
if i2 is not None:
params["i2"] = i2
# If no range is given, query whole stream
if len(params) == 0:
params["i1"] = 0
params["i2"] = 0
if transform is not None:
params["transform"] = transform
if downlink:
params["downlink"] = True
return params
class Stream(ConnectorObject):
def create(self, schema="{}", **kwargs):
"""Creates a stream given an optional JSON schema encoded as a python dict. You can also add other properties
of the stream, such as the icon, datatype or description. Create accepts both a string schema and
a dict-encoded schema."""
if isinstance(schema, basestring):
strschema = schema
schema = json.loads(schema)
else:
strschema = json.dumps(schema)
Draft4Validator.check_schema(schema)
kwargs["schema"] = strschema
self.metadata = self.db.create(self.path, kwargs).json()
def insert_array(self, datapoint_array, restamp=False):
"""given an array of datapoints, inserts them to the stream. This is different from insert(),
because it requires an array of valid datapoints, whereas insert only requires the data portion
of the datapoint, and fills out the rest::
s = cdb["mystream"]
s.create({"type": "number"})
s.insert_array([{"d": 4, "t": time.time()},{"d": 5, "t": time.time()}], restamp=False)
The optional `restamp` parameter specifies whether or not the database should rewrite the timestamps
of datapoints which have a timestamp that is less than one that already exists in the database.
That is, if restamp is False, and a datapoint has a timestamp less than a datapoint that already
exists in the database, then the insert will fail. If restamp is True, then all datapoints
with timestamps below the datapoints already in the database will have their timestamps overwritten
to the same timestamp as the most recent datapoint hat already exists in the database, and the insert will
succeed.
"""
# To be safe, we split into chunks
while (len(datapoint_array) > DATAPOINT_INSERT_LIMIT):
# We insert datapoints in chunks of a couple thousand so that they
# fit in the insert size limit of ConnectorDB
a = datapoint_array[:DATAPOINT_INSERT_LIMIT]
if restamp:
self.db.update(self.path + "/data", a)
else:
self.db.create(self.path + "/data", a)
# Clear the written datapoints
datapoint_array = datapoint_array[DATAPOINT_INSERT_LIMIT:]
if restamp:
self.db.update(self.path + "/data", datapoint_array)
else:
self.db.create(self.path + "/data", datapoint_array)
def insert(self, data):
"""insert inserts one datapoint with the given data, and appends it to
the end of the stream::
s = cdb["mystream"]
s.create({"type": "string"})
s.insert("Hello World!")
"""
self.insert_array([{"d": data, "t": time.time()}], restamp=True)
def append(self, data):
""" Same as insert, using the pythonic array name """
self.insert(data)
def subscribe(self, callback, transform="", downlink=False):
"""Subscribes to the stream, running the callback function each time datapoints are inserted into
the given stream. There is an optional transform to the datapoints, and a downlink parameter.::
s = cdb["mystream"]
def subscription_callback(stream,data):
print stream, data
s.subscribe(subscription_callback)
The downlink parameter is for downlink streams - it allows to subscribe to the downlink substream,
before it is acknowledged. This is especially useful for something like lights - have lights be
a boolean downlink stream, and the light itself be subscribed to the downlink, so that other
devices can write to the light, turning it on and off::
def light_control(stream,data):
light_boolean = data[0]["d"]
print "Setting light to", light_boolean
set_light(light_boolean)
#Acknowledge the write
return True
# We don't care about intermediate values, we only want the most recent setting
# of the light, meaning we want the "if last" transform
s.subscribe(light_control, downlink=True, transform="if last")
"""
streampath = self.path
if downlink:
streampath += "/downlink"
return self.db.subscribe(streampath, callback, transform)
def unsubscribe(self, transform="", downlink=False):
"""Unsubscribes from a previously subscribed stream. Note that the same values of transform
and downlink must be passed in order to do the correct unsubscribe::
s.subscribe(callback,transform="if last")
s.unsubscribe(transform="if last")
"""
streampath = self.path
if downlink:
streampath += "/downlink"
return self.db.unsubscribe(streampath, transform)
def __call__(self, t1=None, t2=None, limit=None, i1=None, i2=None, downlink=False, transform=None):
"""By calling the stream as a function, you can query it by either time range or index,
and further you can perform a custom transform on the stream::
#Returns all datapoints with their data < 50 from the past minute
stream(t1=time.time()-60, transform="if $ < 50")
#Performs an aggregation on the stream, returning a single datapoint
#which contains the sum of the datapoints
stream(transform="sum | if last")
"""
params = query_maker(t1, t2, limit, i1, i2, transform, downlink)
# In order to avoid accidental requests for full streams, ConnectorDB does not permit requests
# without any url parameters, so we set i1=0 if we are requesting the
# full stream
if len(params) == 0:
params["i1"] = 0
return DatapointArray(self.db.read(self.path + "/data", params).json())
def __getitem__(self, getrange):
"""Allows accessing the stream just as if it were just one big python array.
An example::
#Returns the most recent 5 datapoints from the stream
stream[-5:]
#Returns all the data the stream holds.
stream[:]
In order to perform transforms on the stream and to aggreagate data, look at __call__,
which allows getting index ranges along with a transform.
"""
if not isinstance(getrange, slice):
# Return the single datapoint
return self(i1=getrange, i2=getrange + 1)[0]
# The query is a slice - return the range
return self(i1=getrange.start, i2=getrange.stop)
def length(self, downlink=False):
return int(self.db.read(self.path + "/data", {"q": "length", "downlink": downlink}).text)
def __len__(self):
"""taking len(stream) returns the number of datapoints saved within the database for the stream"""
return self.length()
def __repr__(self):
"""Returns a string representation of the stream"""
return "[Stream:%s]" % (self.path, )
def export(self, directory):
"""Exports the stream to the given directory. The directory can't exist.
You can later import this device by running import_stream on a device.
"""
if os.path.exists(directory):
raise FileExistsError(
"The stream export directory already exists")
os.mkdir(directory)
# Write the stream's info
with open(os.path.join(directory, "stream.json"), "w") as f:
json.dump(self.data, f)
# Now write the stream's data
# We sort it first, since older versions of ConnectorDB had a bug
# where sometimes datapoints would be returned out of order.
self[:].sort().writeJSON(os.path.join(directory, "data.json"))
# And if the stream is a downlink, write the downlink data
if self.downlink:
self(i1=0, i2=0, downlink=True).sort().writeJSON(os.path.join(directory, "downlink.json"))
# -----------------------------------------------------------------------
# Following are getters and setters of the stream's properties
@property
def datatype(self):
"""returns the stream's registered datatype. The datatype suggests how the stream can be processed."""
if "datatype" in self.data:
return self.data["datatype"]
return ""
@datatype.setter
def datatype(self, set_datatype):
self.set({"datatype": set_datatype})
@property
def downlink(self):
"""returns whether the stream is a downlink, meaning that it accepts input (like turning lights on/off)"""
if "downlink" in self.data:
return self.data["downlink"]
return False
@downlink.setter
def downlink(self, is_downlink):
self.set({"downlink": is_downlink})
@property
def ephemeral(self):
"""returns whether the stream is ephemeral, meaning that data is not saved, but just passes through the messaging system."""
if "ephemeral" in self.data:
return self.data["ephemeral"]
return False
@ephemeral.setter
def ephemeral(self, is_ephemeral):
"""sets whether the stream is ephemeral, meaning that it sets whether the datapoints are saved in the database.
an ephemeral stream is useful for things which are set very frequently, and which could want a subscription, but
which are not important enough to be saved in the database"""
self.set({"ephemeral": is_ephemeral})
@property
def schema(self):
"""Returns the JSON schema of the stream as a python dict."""
if "schema" in self.data:
return json.loads(self.data["schema"])
return None
@property
def sschema(self):
"""Returns the JSON schema of the stream as a string"""
if "schema" in self.data:
return self.data["schema"]
return None
@schema.setter
def schema(self, schema):
"""sets the stream's schema. An empty schema is "{}". The schemas allow you to set a specific data type.
Both python dicts and strings are accepted."""
if isinstance(schema, basestring):
strschema = schema
schema = json.loads(schema)
else:
strschema = json.dumps(schema)
Draft4Validator.check_schema(schema)
self.set({"schema": strschema})
@property
def user(self):
"""user returns the user which owns the given stream"""
return User(self.db, self.path.split("/")[0])
@property
def device(self):
"""returns the device which owns the given stream"""
splitted_path = self.path.split("/")
return Device(self.db,
splitted_path[0] + "/" + splitted_path[1])
# The import has to go on the bottom because py3 imports are annoying
from ._user import User
from ._device import Device | PypiClean |
/Flask-Flacro-0.0.8.tar.gz/Flask-Flacro-0.0.8/flask_flacro/flacro.py | import re
from flask import current_app, Blueprint, _app_ctx_stack
from werkzeug import LocalProxy, MultiDict, CombinedMultiDict
from .compat import with_metaclass
from collections import defaultdict
import weakref
_flacro_jinja = LocalProxy(lambda: current_app.jinja_env)
_glo = LocalProxy(lambda: current_app.jinja_env.globals)
ATTR_BLACKLIST = re.compile("mwhere|mname|mattr|macros|^_")
class FlacroForMeta(type):
def __new__(cls, name, bases, dct):
new_class = super(FlacroForMeta, cls).__new__(cls, name, bases, dct)
if not hasattr(cls, '_instances'):
new_class._instances = defaultdict(weakref.WeakSet)
if not hasattr(cls, '_manager'):
cls._manager = {}
cls._manager[new_class.__name__] = new_class
return new_class
def __init__(cls, name, bases, dct):
if not hasattr(cls, '_registry'):
cls._registry = {}
else:
cls._registry[name] = cls._instances
super(FlacroForMeta, cls).__init__(name, bases, dct)
class FlacroFor(with_metaclass(FlacroForMeta)):
"""
A container class for managing, holding and returning Jinja2 macros within
a Flask application. Instance as-is or use as a mixin.
m = FlacroFor(mwhere="macros/my_macro.html", mname="my_macro")
class MyMacro(FlacroFor):
def __init__(self, a, b):
self.a = a
self.b = b
super(MyMacro, self).__init__(mwhere="macros/my_macro.html",
mname="my_macro")
where "macros/my_macro.html" is a file in your templates directory and
"my_macro" is a defined macro within that file.
:param mwhere: the jinja template file location of your macro
:param mname: the name of the macro within the macro file
:param mattr: a dict of items you might want to access
e.g. {'a': 'AAAAAA', 'b': 'BBBBB'}
:param macros: a dict of macros within the same file specified
above as mwhere in the form {mname: mattr}
e.g. {'my_macro_1': {1: 'x', 2: 'y'},
'my_macro_2': None}
"""
def __init__(self, **kwargs):
self.tag = kwargs.get('tag', None)
self.mwhere = kwargs.get('mwhere', None)
self.mname = kwargs.get('mname', None)
self._mattr = kwargs.get('mattr', None)
self._macros = kwargs.get('macros', None)
if self._mattr:
for k, v in self._mattr.items():
setattr(self, k, v)
if self._macros:
for k, v in self._macros.items():
setattr(self, k, self.get_macro(k, mattr=v))
self.register_instance(self)
@classmethod
def register_instance(cls, instance):
if getattr(instance, 'tag', None):
cls._instances[instance.tag] = weakref.ref(instance, instance) #viable or stupid
else:
cls._instances[None].add(instance)
@property
def ctx_prc(self):
def ctx_prc(macro):
return LocalProxy(lambda: getattr(macro, 'render', None))
return {self.tag: ctx_prc(self)}
def _public(self):
return [k for k in self.__dict__.keys() if not ATTR_BLACKLIST.search(k)]
@property
def public(self):
return {k: getattr(self, k, None) for k in self._public()}
def update(self, **kwargs):
[setattr(self, k, v) for k,v in kwargs.items()]
def get_macro(self, mname, mattr=None, replicate=False):
"""returns another MacroFor instance with a differently named macro from
the template location of this instance"""
if replicate:
mattr=self.public
return FlacroFor(mwhere=self.mwhere,
mname=mname,
mattr=mattr)
def jinja_template(self, mwhere):
return _flacro_jinja.get_template(mwhere, globals=_glo).module
def get_template_attribute(self, mwhere, mname):
return getattr(self.jinja_template(mwhere), mname)
@property
def renderable(self):
"""the macro held but not called"""
try:
return self.get_template_attribute(self.mwhere, self.mname)
except RuntimeError:
return LocalProxy(lambda: self.get_template_attribute(self.mwhere, self.mname))
@property
def render(self):
"""calls the macro, passing itself as accessible within"""
return self.renderable(self)
@property
def render_static(self):
"""calls the macro passing in no variable"""
return self.renderable()
def render_with(self, content):
"""calls the macro with the content specified as parameter(s)"""
return self.renderable(content)
def __repr__(self):
return "<MacroFor {} ({}: {})>".format(getattr(self, 'tag', None), self.mwhere, self.mname)
class Flacro(object):
"""flask/jinja2 tools for managing template macros"""
def __init__(self,
app=None,
register_blueprint=True):
self.app = app
self.register_blueprint = register_blueprint
self._registry = FlacroFor._registry
self._managed = FlacroFor._manager
if self.app is not None:
self.init_app(self.app)
@property
def provides(self):
return CombinedMultiDict([(MultiDict([(k,v),(k, self._registry.get(k, None))]))
for k,v in self._managed.items()])
def init_app(self, app):
app.extensions['flacro'] = self
app.before_request(self.make_ctx_prc)
if self.register_blueprint:
app.register_blueprint(self._blueprint)
def make_ctx_prc(self):
[[self.app.jinja_env.globals.update(macro().ctx_prc)
for m, macro in mf.items() if m]
for mf in self._registry.values()]
@property
def _blueprint(self):
return Blueprint('flacro', __name__, template_folder='templates') | PypiClean |
/Flask-SQLAlchemy-Meiqia-2016.8.1.zip/Flask-SQLAlchemy-Meiqia-2016.8.1/docs/index.rst | :orphan:
Flask-SQLAlchemy
================
.. module:: flask_sqlalchemy
Flask-SQLAlchemy is an extension for `Flask`_ that adds support for
`SQLAlchemy`_ to your application. It requires SQLAlchemy 0.8 or
higher. It aims to simplify using SQLAlchemy with Flask by providing
useful defaults and extra helpers that make it easier to accomplish common
tasks.
.. _SQLAlchemy: http://www.sqlalchemy.org/
.. _Flask: http://flask.pocoo.org/
.. _example sourcecode:
http://github.com/mitsuhiko/flask-sqlalchemy/tree/master/examples/
See `the SQLAlchemy documentation`_ to learn how to work with the ORM in depth. The following documentation is a brief overview of the most common tasks, as well as the features specific to Flask-SQLAlchemy.
.. _the SQLAlchemy documentation: http://docs.sqlalchemy.org/
.. include:: contents.rst.inc
| PypiClean |
/MASSA_Algorithm-0.9.1-py3-none-any.whl/MASSA_Algorithm/MASSA.py | from MASSA_Algorithm import MASSAlogos
from MASSA_Algorithm import MASSAargs
from MASSA_Algorithm import MASSAod
from MASSA_Algorithm import MASSAopen_files
from MASSA_Algorithm import MASSAextraction
from MASSA_Algorithm import MASSAdescriptors
from MASSA_Algorithm import MASSApreparation
from MASSA_Algorithm import MASSAcluster
from MASSA_Algorithm import MASSAsplit
from MASSA_Algorithm import MASSAmoloutput
def returns_zero(total, test):
# It evaluates if the distribution is not adequate = the iterated cluster has a percentage greater than 0.5% in the complete data set, but a percentage lower than 0.5% in the test set.
definer = False
for i in total.keys():
if (total[i] > 0.5) and (test[i] <= 0.5):
definer = True # Definer = True (Distribution was not done properly).
return definer
def main(): # Main subroutine, allows the program to run directly from the command line after installed via pip.
## Initializing from the command line:
MASSAlogos.initial_print() # Print the program logo.
FileInput, FileOutput, directoryFileOutput, extension_type, dendrogram_Xfont_size, barplot_Xfont_size, training_percent, test_percent, numberBioAct, BioActAsArgs, nPCS, svd_parameter, linkage_method, flag_dendrogram = MASSAargs.capture_args() # It captures command line arguments.
print('Initializing, wait...\n')
## Create log.txt file in directory:
ArqLog = directoryFileOutput+'/log.txt'
WriteLog = open(ArqLog, 'w')
## Initial file management:
MASSAod.output_directory(directoryFileOutput) # It creates the output directories.
mols = MASSAopen_files.read_molecules(FileInput, WriteLog) # Read molecules.
sdf_property_names = MASSAopen_files.get_sdf_property_names(mols) # Extracting the property names from the ".sdf" input file.
molsH = MASSAopen_files.hydrogen_add(mols) # Structure 3D management - It adds hydrogens keeping 3D coordenates.
## Extraction properties from ".sdf":
names, dataframe = MASSAextraction.name_extraction(molsH) # It extracts the names of the molecules and creates a name:molecule dictionary and a dataframe.
biological_activity = MASSAextraction.the_biological_handler(sdf_property_names, numberBioAct, BioActAsArgs) # It defines a list of what biological activities are being extracted.
dataframe = MASSAextraction.list_activities(dataframe, biological_activity) # It adds the biological activities to the dataframe.
## Get fingeprint and other descriptors:
dataframe = MASSAdescriptors.physicochemical_descriptors(dataframe) # Get physicochemical descriptors.
dataframe = MASSAdescriptors.atompairs_fingerprint(dataframe) # Get AtomPairs fingerprint.
## Normalizes physicochemical and biological properties and creates matrices for the three domains:
bio_matrix, PhCh_matrix, FP_matrix = MASSApreparation.normalizer_or_matrix(dataframe, biological_activity)
## PCA:
bio_PCA = MASSApreparation.pca_maker(bio_matrix, nPCS, svd_parameter) # PCA for the biological domain.
PhCh_PCA = MASSApreparation.pca_maker(PhCh_matrix, nPCS, svd_parameter) # PCA for the physicochemical domain.
FP_PCA = MASSApreparation.pca_maker(FP_matrix, nPCS, svd_parameter) # PCA for the structural domain.
## First clustering (HCA):
leaves_cluster_bio, bioHCA, linkage_bio, CutOff_bio = MASSAcluster.hca_clusters(bio_PCA, names, 'bio', directoryFileOutput, extension_type, linkage_method) # It performs HCA clustering without generating the dendrogram for the biological domain.
leaves_cluster_phch, phchHCA, linkage_phch, CutOff_phch = MASSAcluster.hca_clusters(PhCh_PCA, names, 'PhCh', directoryFileOutput, extension_type, linkage_method) # It performs HCA clustering without generating the dendrogram for the physicochemical domain.
leaves_cluster_fp, fpHCA, linkage_fp, CutOff_fp = MASSAcluster.hca_clusters(FP_PCA, names, 'FP', directoryFileOutput, extension_type, linkage_method) # It performs HCA clustering without generating the dendrogram for the structural domain.
dataframe = MASSApreparation.organize_df_clusterization(dataframe, bioHCA, 'bio') # It adds the biological cluster identification to the spreadsheet.
dataframe = MASSApreparation.organize_df_clusterization(dataframe, phchHCA, 'PhCh') # It adds the physicochemical cluster identification to the spreadsheet.
dataframe = MASSApreparation.organize_df_clusterization(dataframe, fpHCA, 'FP') # It adds the structural cluster identification to the spreadsheet.
## Second clustering (Kmodes):
matrix_for_kmodes = MASSApreparation.organize_for_kmodes(dataframe) # It creates a matrix with cluster identifications for each of the three domains, in order to prepare for Kmodes.
allHCA = MASSAcluster.kmodes_clusters(matrix_for_kmodes, names) # It performs Kmodes clustering for the general domain.
dataframe = MASSApreparation.organize_df_clusterization(dataframe, allHCA, 'all') # It adds the general cluster identification to the spreadsheet.
## Split into training, test:
dataframe, test_molecules = MASSAsplit.split_train_test_sets(dataframe, training_percent, test_percent)
## Bar plot of frequencies (Calculates the percentages of molecules in each cluster for each dataset and generates a bar graph for each domain):
bio_total, bio_training, bio_test = MASSAsplit.freq_clusters(dataframe, directoryFileOutput, extension_type, 'Cluster_Biological', barplot_Xfont_size) # Biological Bar Plot
PhCh_total, PhCh_training, PhCh_test = MASSAsplit.freq_clusters(dataframe, directoryFileOutput, extension_type, 'Cluster_Physicochemical', barplot_Xfont_size) # Physicochemical Bar Plot
FP_total, FP_training, FP_test = MASSAsplit.freq_clusters(dataframe, directoryFileOutput, extension_type, 'Cluster_Structural', barplot_Xfont_size) # Structural Bar Plot
all_total, all_training, all_test = MASSAsplit.freq_clusters(dataframe, directoryFileOutput, extension_type, 'Cluster_General', barplot_Xfont_size) # General Bar Plot
## Verifying percentages:
bio_ok = returns_zero(bio_total, bio_test) # For biological domain.
PhCh_ok = returns_zero(PhCh_total, PhCh_test) # For physicochemical domain.
FP_ok = returns_zero(FP_total, FP_test) # For structural domain.
ok = [bio_ok, PhCh_ok, FP_ok]
max_iters = 0
# Redo the distribution in case of errors (up to 10 times):
while (True in ok) and (max_iters < 10):
## Split into training, test:
dataframe, test_molecules = MASSAsplit.split_train_test_sets(dataframe, training_percent, test_percent)
## Bar plot of frequencies (Calculates the percentages of molecules in each cluster for each dataset and generates a bar graph for each domain):
bio_total, bio_training, bio_test = MASSAsplit.freq_clusters(dataframe, directoryFileOutput, extension_type, 'Cluster_Biological', barplot_Xfont_size) # Biological Bar Plot
PhCh_total, PhCh_training, PhCh_test = MASSAsplit.freq_clusters(dataframe, directoryFileOutput, extension_type, 'Cluster_Physicochemical', barplot_Xfont_size) # Physicochemical Bar Plot
FP_total, FP_training, FP_test = MASSAsplit.freq_clusters(dataframe, directoryFileOutput, extension_type, 'Cluster_Structural', barplot_Xfont_size) # Structural Bar Plot
all_total, all_training, all_test = MASSAsplit.freq_clusters(dataframe, directoryFileOutput, extension_type, 'Cluster_General', barplot_Xfont_size) # General Bar Plot
## Verifying percentages:
bio_ok = returns_zero(bio_total, bio_test) # For biological domain.
PhCh_ok = returns_zero(PhCh_total, PhCh_test) # For physicochemical domain.
FP_ok = returns_zero(FP_total, FP_test) # For structural domain.
ok = [bio_ok, PhCh_ok, FP_ok]
max_iters += 1
## Write distribution information to log file:
bio_distr = 'Biological Distribution'
MASSAsplit.log_of_distributions(bio_distr, bio_total, bio_training, bio_test, WriteLog)
PhCh_distr = 'Physicochemical Distribution'
MASSAsplit.log_of_distributions(PhCh_distr, PhCh_total, PhCh_training, PhCh_test, WriteLog)
FP_distr = 'Structural (FP) Distribution'
MASSAsplit.log_of_distributions(FP_distr, FP_total, FP_training, FP_test, WriteLog)
all_distr = 'General Distribution'
MASSAsplit.log_of_distributions(all_distr, all_total, all_training, all_test, WriteLog)
WriteLog.close()
## Plot HCAs:
if flag_dendrogram == True:
print('\nGenerating dendrogram images. Please wait...')
MASSAcluster.hca_plot(linkage_bio, names, leaves_cluster_bio, CutOff_bio, 'bio', directoryFileOutput, extension_type, dendrogram_Xfont_size, test_molecules) #Bio_Plot: Plot the HCA dendrogram
MASSAcluster.hca_plot(linkage_phch, names, leaves_cluster_phch, CutOff_phch, 'PhCh', directoryFileOutput, extension_type, dendrogram_Xfont_size, test_molecules) #PhCh_Plot: Plot the HCA dendrogram
MASSAcluster.hca_plot(linkage_fp, names, leaves_cluster_fp, CutOff_fp, 'FP', directoryFileOutput, extension_type, dendrogram_Xfont_size, test_molecules) #FP_Plot: Plot the HCA dendrogram
## Output management:
MASSAmoloutput.output_mols(dataframe, FileOutput) # It adds, for each molecule, the values of the calculated properties, the identifications of each cluster and which set the molecule belongs to.
print('Completed') | PypiClean |
/Draugr-1.0.9.tar.gz/Draugr-1.0.9/draugr/opencv_utilities/namespaces/flags.py |
__author__ = "heider"
__doc__ = r"""
Created on 01/02/2022
"""
__all__ = [
"ThresholdTypeFlag",
"WindowPropertyFlag",
"DrawMatchesFlagEnum",
"MouseEventFlag",
"TermCriteriaFlag",
]
from enum import Flag
import cv2
class TermCriteriaFlag(Flag):
""" """
count = (
cv2.TERM_CRITERIA_COUNT
) # the maximum number of iterations or elements to compute
eps = (
cv2.TERM_CRITERIA_EPS
) # the desired accuracy or change in parameters at which the iterative algorithm stops
max_iter = cv2.TERM_CRITERIA_MAX_ITER # the maximum number of iterations to compute
class DrawMatchesFlagEnum(Flag):
""" """
default = cv2.DRAW_MATCHES_FLAGS_DEFAULT
# Output image matrix will be created (Mat::create), i.e. existing memory of output image may be reused. Two source image, matches and single keypoints will be drawn. For each keypoint only the center point will be drawn (without the circle around keypoint with keypoint size and orientation).
over_outimg = cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG
# Output image matrix will not be created (Mat::create). Matches will be drawn on existing content of output image.
not_draw_single_points = cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS
# Single keypoints will not be drawn.
rich_keypoints = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
# For each keypoint the circle around keypoint with keypoint size and orientation will be drawn.
class ThresholdTypeFlag(Flag):
""" """
binary = cv2.THRESH_BINARY # dst(x,y)={maxval0if src(x,y)>threshotherwise
inverse_binary = (
cv2.THRESH_BINARY_INV
) # dst(x,y)={0maxvalif src(x,y)>threshotherwise
truncate = (
cv2.THRESH_TRUNC
) # dst(x,y)={thresholdsrc(x,y)if src(x,y)>threshotherwise
to_zero = cv2.THRESH_TOZERO # dst(x,y)={src(x,y)0if src(x,y)>threshotherwise
inverse_to_zero = (
cv2.THRESH_TOZERO_INV
) # dst(x,y)={0src(x,y)if src(x,y)>threshotherwise
mask = cv2.THRESH_MASK
otsu = (
cv2.THRESH_OTSU
) # flag, use Otsu algorithm to choose the optimal threshold value
triangle = (
cv2.THRESH_TRIANGLE
) # flag, use Triangle algorithm to choose the optimal threshold value
class WindowPropertyFlag(Flag):
""" """
fullscreen = (
cv2.WND_PROP_FULLSCREEN
) # fullscreen property (can be WINDOW_NORMAL or WINDOW_FULLSCREEN).
autosize = (
cv2.WND_PROP_AUTOSIZE
) # autosize property (can be WINDOW_NORMAL or WINDOW_AUTOSIZE).
keep_ratio = (
cv2.WND_PROP_ASPECT_RATIO
) # window's aspect ration (can be set to WINDOW_FREERATIO or WINDOW_KEEPRATIO).
opengl = cv2.WND_PROP_OPENGL # opengl support.
visible = cv2.WND_PROP_VISIBLE # checks whether the window exists and is visible
topmost = (
cv2.WND_PROP_TOPMOST
) # property to toggle normal window being topmost or not
class MouseEventFlag(Flag):
""" """
ctrl_down = cv2.EVENT_FLAG_CTRLKEY # indicates that CTRL Key is pressed.
shift_down = cv2.EVENT_FLAG_SHIFTKEY # indicates that SHIFT Key is pressed.
alt_down = cv2.EVENT_FLAG_ALTKEY # indicates that ALT Key is pressed.
left_down = cv2.EVENT_FLAG_LBUTTON # indicates that the left mouse button is down.
right_down = (
cv2.EVENT_FLAG_RBUTTON
) # indicates that the right mouse button is down.
middle_down = (
cv2.EVENT_FLAG_MBUTTON
) # indicates that the middle mouse button is down. | PypiClean |
/EOxServer-1.2.12-py3-none-any.whl/eoxserver/services/mapserver/wms/feature_info_renderer.py |
from eoxserver.core import implements
from eoxserver.core.config import get_eoxserver_config
from eoxserver.core.decoders import xml
from eoxserver.contrib import mapserver as ms
from eoxserver.resources.coverages import models
from eoxserver.services.ows.common.config import CapabilitiesConfigReader
from eoxserver.services.mapserver.wms.util import MapServerWMSBaseComponent
from eoxserver.services.ows.wms.interfaces import (
WMSFeatureInfoRendererInterface
)
from eoxserver.services.ows.wcs.v20.encoders import WCS20EOXMLEncoder
from eoxserver.services.result import (
result_set_from_raw_data, get_content_type, ResultBuffer
)
from eoxserver.services.urls import get_http_service_url
class MapServerWMSFeatureInfoRenderer(MapServerWMSBaseComponent):
""" A WMS feature info renderer using MapServer.
"""
implements(WMSFeatureInfoRendererInterface)
def render(self, layer_groups, request_values, request, **options):
config = CapabilitiesConfigReader(get_eoxserver_config())
http_service_url = get_http_service_url(request)
map_ = ms.Map()
map_.setMetaData({
"enable_request": "*",
"onlineresource": http_service_url,
}, namespace="ows")
map_.setMetaData("wms_getfeatureinfo_formatlist", "text/html")
map_.setProjection("EPSG:4326")
session = self.setup_map(layer_groups, map_, options)
# check if the required format is EO O&M
frmt = pop_param(request_values, "info_format")
use_eoom = False
if frmt in ("application/xml", "text/xml"):
request_values.append(("info_format", "application/vnd.ogc.gml"))
use_eoom = True
else:
request_values.append(("info_format", frmt))
with session:
request = ms.create_request(request_values)
raw_result = map_.dispatch(request)
result = result_set_from_raw_data(raw_result)
if not use_eoom:
# just return the response
return result, get_content_type(result)
else:
# do a postprocessing step and get all identifiers in order
# to encode them with EO O&M
decoder = GMLFeatureDecoder(result[0].data_file.read())
identifiers = decoder.identifiers
coverages = models.Coverage.objects.filter(
identifier__in=identifiers
)
# sort the result with the returned order of coverages
lookup_table = dict((c.identifier, c) for c in coverages)
coverages = [
lookup_table[identifier] for identifier in identifiers
]
# encode the coverages with the EO O&M
encoder = WCS20EOXMLEncoder()
return [
ResultBuffer(
encoder.serialize(
encoder.encode_coverage_descriptions(coverages)
), encoder.content_type
)
], encoder.content_type
def pop_param(request_values, name, default=None):
""" Helper to pop one param from a key-value list
"""
for param_name, value in request_values:
if param_name.lower() == name:
request_values.remove((param_name, value))
return value
return default
class GMLFeatureDecoder(xml.Decoder):
identifiers = xml.Parameter("//identifier/text()", num="*") | PypiClean |
/GaitAnalysisToolKit-0.2.0.tar.gz/GaitAnalysisToolKit-0.2.0/README.rst | Introduction
============
This is a collection of tools that are helpful for gait analysis. Some are
specific to the needs of the Human Motion and Control Lab at Cleveland State
University but other portions may have potential for general use. It is
relatively modular so you can use what you want. It is primarily structured as
a Python distribution but the Octave files are also accessible independently.
.. image:: https://img.shields.io/pypi/v/gaitanalysistoolkit.svg
:target: https://pypi.python.org/pypi/gaitanalysistoolkit/
:alt: Latest Version
.. image:: https://zenodo.org/badge/6017/csu-hmc/GaitAnalysisToolKit.svg
:target: http://dx.doi.org/10.5281/zenodo.13006
.. image:: https://travis-ci.org/csu-hmc/GaitAnalysisToolKit.png?branch=master
:target: http://travis-ci.org/csu-hmc/GaitAnalysisToolKit
Python Packages
===============
The main Python package is ``gaitanalysis`` and it contains five modules listed
below. ``oct2py`` is used to call Octave routines in the Python code where
needed.
``gait.py``
General tools for working with gait data such as gait landmark
identification and 2D inverse dynamics. The main class is ``GaitData``.
``controlid.py``
Tools for identifying control mechanisms in human locomotion.
``markers.py``
Routines for processing marker data.
``motek.py``
Tools for processing and cleaning data from `Motek Medical`_'s products,
e.g. the D-Flow software outputs.
``utils.py``
Helper functions for the other modules.
.. _Motek Medical: http://www.motekmedical.com
Each module has a corresponding test module in ``gaitanalysis/tests``
sub-package which contain unit tests for the classes and functions in the
respective module.
Octave Libraries
================
Several Octave routines are included in the ``gaitanalysis/octave`` directory.
``2d_inverse_dynamics``
Implements joint angle and moment computations of a 2D lower body human.
``inertial_compensation``
Compensates force plate forces and moments for inertial effects and
re-expresses the forces and moments in the camera reference frame.
``mmat``
Fast matrix multiplication.
``soder``
Computes the rigid body orientation and location of a group of markers.
``time_delay``
Deals with the analog signal time delays.
Installation
============
You will need Python 2.7 or 3.7+ and setuptools to install the packages. Its
best to install the dependencies first (NumPy, SciPy, matplotlib, Pandas,
PyTables).
Supported versions:
- python >= 2.7 or >= 3.7
- numpy >= 1.8.2
- scipy >= 0.13.3
- matplotlib >= 1.3.1
- tables >= 3.1.1
- pandas >= 0.13.1, <= 0.24.0
- pyyaml >= 3.10
- DynamicistToolKit >= 0.4.0
- oct2py >= 2.4.2
- octave >= 3.8.1
We recommend installing Anaconda_ for users in our lab to get all of the
dependencies.
.. _Anaconda: http://docs.continuum.io/anaconda/
We also utilize Octave code, so an install of Octave with is also required. See
http://octave.sourceforge.net/index.html for installation instructions.
You can install using pip (or easy_install). Pip will theoretically [#]_ get
the dependencies for you (or at least check if you have them)::
$ pip install https://github.com/csu-hmc/GaitAnalysisToolKit/zipball/master
Or download the source with your preferred method and install manually.
Using Git::
$ git clone [email protected]:csu-hmc/GaitAnalysisToolKit.git
$ cd GaitAnalysisToolKit
Or wget::
$ wget https://github.com/csu-hmc/GaitAnalysisToolKit/archive/master.zip
$ unzip master.zip
$ cd GaitAnalysisToolKit-master
Then for basic installation::
$ python setup.py install
Or install for development purposes::
$ python setup.py develop
.. [#] You will need all build dependencies and also note that matplotlib
doesn't play nice with pip.
Dependencies
------------
It is recommended to install the software dependencies as follows:
Octave can be installed from your package manager or from a downloadable
binary, for example on Debian based Linux::
$ sudo apt-get install octave
For oct2py to work, calling Octave from the command line should work after
Octave is installed. For example,
::
$ octave
GNU Octave, version 3.8.1
Copyright (C) 2014 John W. Eaton and others.
This is free software; see the source code for copying conditions.
There is ABSOLUTELY NO WARRANTY; not even for MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. For details, type 'warranty'.
Octave was configured for "x86_64-pc-linux-gnu".
Additional information about Octave is available at http://www.octave.org.
Please contribute if you find this software useful.
For more information, visit http://www.octave.org/get-involved.html
Read http://www.octave.org/bugs.html to learn how to submit bug reports.
For information about changes from previous versions, type 'news'.
octave:1>
The core dependencies can be installed with conda in a conda environment::
$ conda create -n gait python=2.7 pip numpy scipy matplotlib pytables pandas pyyaml nose sphinx numpydoc oct2py mock
$ source activate gait
And the dependencies which do not have conda packages can be installed into the
environment with pip::
(gait)$ pip install DynamicistToolKit
Tests
=====
When in the repository directory, run the tests with nose::
$ nosetests
Vagrant
=======
A vagrant file and provisioning script are included to test the code on both a
Ubuntu 12.04 and Ubuntu 13.10 box. To load the box and run the tests simply
type::
$ cd vagrant
$ vagrant up
See ``VagrantFile`` and the ``*bootstrap.sh`` files to see what's going on.
Documentation
=============
The documentation is hosted at ReadTheDocs:
http://gait-analysis-toolkit.readthedocs.org
You can build the documentation (currently sparse) if you have Sphinx and
numpydoc::
$ cd docs
$ make html
$ firefox _build/html/index.html
Release Notes
=============
0.2.0
-----
- Support Python 3. [PR `#149`_]
- Minimum dependencies bumped to Ubuntu 14.04 LTS versions and tests run on
latest conda forge packages as of 2018/08/30. [PR `#140`_]
- The minimum version of the required dependency, DynamicistToolKit, was bumped
to 0.4.0. [PR `#134`_]
- Reworked the DFlowData class so that interpolation and resampling is based on
the FrameNumber column in the mocap data instead of the unreliable TimeStamp
column. [PR `#135`_]
- Added note and setup.py check about higher oct2py versions required for
Windows.
.. _#149: https://github.com/csu-hmc/GaitAnalysisToolKit/pull/149
.. _#134: https://github.com/csu-hmc/GaitAnalysisToolKit/pull/134
.. _#135: https://github.com/csu-hmc/GaitAnalysisToolKit/pull/135
.. _#140: https://github.com/csu-hmc/GaitAnalysisToolKit/pull/140
0.1.2
-----
- Fixed bug preventing GaitData.plot_grf_landmarks from working.
- Removed inverse_data.mat from the source distribution.
0.1.1
-----
- Fixed installation issue where the octave and data files were not included in
the installation directory.
0.1.0
-----
- Initial release
- Copied the walk module from DynamicistToolKit @ eecaebd31940179fe25e99a68c91b75d8b8f191f
| PypiClean |
/EARL-pytorch-0.5.1.tar.gz/EARL-pytorch-0.5.1/earl_pytorch/dataset/create_dataset_v3.py | import json
import os
import subprocess
import sys
import ballchasing as bc
import numpy as np
import pandas as pd
from earl_pytorch import EARL
command = r'carball.exe -i "{}" -o "{}" parquet'
ENV = os.environ.copy()
ENV["NO_COLOR"] = "1"
class CarballAnalysis:
METADATA_FNAME = "metadata.json"
ANALYZER_FNAME = "analyzer.json"
BALL_FNAME = "__ball.parquet"
GAME_FNAME = "__game.parquet"
PLAYER_FNAME = "player_{}.parquet"
def __init__(self, processed_folder: str):
# print(processed_folder, self.METADATA_FNAME)
self.metadata = json.load(open(os.path.join(processed_folder, self.METADATA_FNAME)))
self.analyzer = json.load(open(os.path.join(processed_folder, self.ANALYZER_FNAME)))
self.ball = pd.read_parquet(os.path.join(processed_folder, self.BALL_FNAME))
self.game = pd.read_parquet(os.path.join(processed_folder, self.GAME_FNAME))
self.players = {}
for player in self.metadata["players"]:
uid = player["unique_id"]
player_path = os.path.join(processed_folder, self.PLAYER_FNAME.format(uid))
if os.path.exists(player_path):
self.players[uid] = pd.read_parquet(player_path)
def download_replays(n=1_000):
for gamemode in bc.Playlist.RANKED:
gm_folder = os.path.join(working_dir, "replays", gamemode)
os.makedirs(gm_folder, exist_ok=True)
replay_iter = api.get_replays(
min_rank=bc.Rank.SUPERSONIC_LEGEND,
max_rank=bc.Rank.SUPERSONIC_LEGEND,
season=bc.Season.SEASON_5_FTP,
count=n
)
for replay in replay_iter:
if not os.path.exists(os.path.join(gm_folder, replay["id"])):
api.download_replay(replay["id"], gm_folder)
print(replay["id"], "downloaded")
def process_replay(replay_path, output_folder):
folder, fn = os.path.split(replay_path)
replay_name = fn.replace(".replay", "")
processed_folder = os.path.join(output_folder, replay_name)
os.makedirs(processed_folder, exist_ok=True)
with open(os.path.join(processed_folder, "carball.o.log"), "w", encoding="utf8") as stdout_f:
with open(os.path.join(processed_folder, "carball.e.log"), "w", encoding="utf8") as stderr_f:
return subprocess.run(
command.format(replay_path, processed_folder),
stdout=stdout_f,
stderr=stderr_f,
env=ENV
)
def parse_replays():
for gamemode in bc.Playlist.RANKED:
replay_folder = os.path.join(working_dir, "replays", gamemode)
parsed_folder = os.path.join(working_dir, "parsed", gamemode)
for replay in os.listdir(replay_folder):
process_replay(os.path.join(replay_folder, replay), parsed_folder)
print(replay, "processed")
def train_model():
model = EARL()
shard_size = 1_000_000
for epoch in range(100):
data = np.zeros((shard_size, 41, 24))
analysis = CarballAnalysis()
def main():
download_replays()
parse_replays()
if __name__ == '__main__':
working_dir = sys.argv[1]
api = bc.Api(sys.argv[2])
main() | PypiClean |
/matchengine-V2-2.0.1.tar.gz/matchengine-V2-2.0.1/matchengine/internals/typing/matchengine_types.py | from __future__ import annotations
import copy
import datetime
from itertools import chain
from typing import (
NewType,
Tuple,
Union,
List,
Dict,
Any,
Set
)
from bson import ObjectId
from networkx import DiGraph
from matchengine.internals.utilities.object_comparison import nested_object_hash
Trial = NewType("Trial", dict)
ParentPath = NewType("ParentPath", Tuple[Union[str, int]])
MatchClause = NewType("MatchClause", List[Dict[str, Any]])
MatchTree = NewType("MatchTree", DiGraph)
NodeID = NewType("NodeID", int)
MatchClauseLevel = NewType("MatchClauseLevel", str)
MongoQueryResult = NewType("MongoQueryResult", Dict[str, Any])
MongoQuery = NewType("MongoQuery", Dict[str, Any])
GenomicID = NewType("GenomicID", ObjectId)
ClinicalID = NewType("ClinicalID", ObjectId)
Collection = NewType("Collection", str)
class PoisonPill(object):
__slots__ = ()
class CheckIndicesTask(object):
__slots__ = ()
class IndexUpdateTask(object):
__slots__ = (
"collection", "index"
)
def __init__(
self,
collection: str,
index: str
):
self.index = index
self.collection = collection
class QueryTask(object):
__slots__ = (
"trial", "match_clause_data", "match_path",
"query", "clinical_ids"
)
def __init__(
self,
trial: Trial,
match_clause_data: MatchClauseData,
match_path: MatchCriterion,
query: MultiCollectionQuery,
clinical_ids: Set[ClinicalID]
):
self.clinical_ids = clinical_ids
self.query = query
self.match_path = match_path
self.match_clause_data = match_clause_data
self.trial = trial
class UpdateTask(object):
__slots__ = (
"ops", "protocol_no"
)
def __init__(
self,
ops: List,
protocol_no: str
):
self.ops = ops
self.protocol_no = protocol_no
class RunLogUpdateTask(object):
__slots__ = (
"protocol_no"
)
def __init__(
self,
protocol_no: str
):
self.protocol_no = protocol_no
Task = NewType("Task", Union[PoisonPill, CheckIndicesTask, IndexUpdateTask, QueryTask, UpdateTask, RunLogUpdateTask])
class MatchCriteria(object):
__slots__ = (
"criteria", "depth", "node_id"
)
def __init__(
self,
criteria: Dict,
depth: int,
node_id: int
):
self.criteria = criteria
self.depth = depth
self.node_id = node_id
class MatchCriterion(object):
__slots__ = (
"criteria_list", "_hash"
)
def __init__(
self,
criteria_list: List[MatchCriteria]
):
self.criteria_list = criteria_list
self._hash = None
def add_criteria(self, criteria: MatchCriteria):
self._hash = None
self.criteria_list.append(criteria)
def hash(self) -> str:
if self._hash is None:
self._hash = nested_object_hash({"query": [criteria.criteria for criteria in self.criteria_list]})
return self._hash
class QueryPart(object):
__slots__ = (
"mcq_invalidating", "render", "negate",
"_query", "_hash"
)
def __init__(
self,
query: Dict,
negate: bool,
render: bool,
mcq_invalidating: bool,
_hash: str = None
):
self.mcq_invalidating = mcq_invalidating
self.render = render
self.negate = negate
self._query = query
self._hash = _hash
def hash(self) -> str:
if self._hash is None:
self._hash = nested_object_hash(self.query)
return self._hash
def set_query_attr(
self,
key,
value
):
self._query[key] = value
def __copy__(self):
return QueryPart(
self.query,
self.negate,
self.render,
self.mcq_invalidating,
self._hash
)
@property
def query(self):
return self._query
class QueryNode(object):
__slots__ = (
"query_level", "query_depth", "query_parts",
"exclusion", "is_finalized", "_hash",
"_raw_query", "_raw_query_hash", "sibling_nodes",
"node_id", "criterion_ancestor"
)
def __init__(
self,
query_level: str,
node_id: int,
criterion_ancestor: MatchCriteria,
query_depth: int,
query_parts: List[QueryPart],
exclusion: Union[None, bool] = None,
is_finalized: bool = False,
_hash: str = None,
_raw_query: Dict = None,
_raw_query_hash: str = None
):
self.node_id = node_id
self.criterion_ancestor = criterion_ancestor
self.is_finalized = is_finalized
self.query_level = query_level
self.query_depth = query_depth
self.query_parts = query_parts
self.exclusion = exclusion
self._hash = _hash
self._raw_query = _raw_query
self._raw_query_hash = _raw_query_hash
self.sibling_nodes = None
def hash(self) -> str:
if self._hash is None:
self._hash = nested_object_hash({
"_tmp1": [query_part.hash()
for query_part in self.query_parts],
'_tmp2': self.exclusion
})
return self._hash
def add_query_part(self, query_part: QueryPart):
self._hash = None
self._raw_query = None
self._raw_query_hash = None
self.query_parts.append(query_part)
def _extract_raw_query(self):
return {
key: value
for query_part in self.query_parts
for key, value in query_part.query.items()
if query_part.render
}
def extract_raw_query(self):
if self.is_finalized:
if self._raw_query is None:
self._raw_query = self._extract_raw_query()
return self._raw_query
else:
return self._extract_raw_query()
def raw_query_hash(self):
if self._raw_query_hash is None:
if not self.is_finalized:
raise Exception("Query node is not finalized")
else:
self._raw_query_hash = nested_object_hash(self.extract_raw_query())
return self._raw_query_hash
def finalize(self):
self.is_finalized = True
def get_query_part_by_key(self, key: str) -> QueryPart:
return next(chain((query_part
for query_part in self.query_parts
if key in query_part.query),
iter([None])))
def get_query_part_value_by_key(self, key: str, default: Any = None) -> Any:
query_part = self.get_query_part_by_key(key)
if query_part is not None:
return query_part.query.get(key, default)
@property
def mcq_invalidating(self):
return True if any([query_part.mcq_invalidating for query_part in self.query_parts]) else False
def __copy__(self):
return QueryNode(
self.query_level,
self.node_id,
self.criterion_ancestor,
self.query_depth,
[query_part.__copy__()
for query_part
in self.query_parts],
self.exclusion,
self.is_finalized,
self._hash,
self._raw_query,
self._raw_query_hash
)
class QueryNodeContainer(object):
__slots__ = (
"query_nodes"
)
def __init__(
self,
query_nodes: List[QueryNode]
):
self.query_nodes = query_nodes
def __copy__(self):
return QueryNodeContainer(
[query_node.__copy__()
for query_node
in self.query_nodes]
)
class MultiCollectionQuery(object):
__slots__ = (
"genomic", "clinical"
)
def __init__(
self,
genomic: List[QueryNodeContainer],
clinical=List[QueryNodeContainer]
):
self.genomic = genomic
self.clinical = clinical
def __copy__(self):
return MultiCollectionQuery(
[query_node_container.__copy__()
for query_node_container
in self.genomic],
[query_node_container.__copy__()
for query_node_container
in self.clinical],
)
class MatchClauseData(object):
__slots__ = (
"match_clause", "internal_id", "code",
"coordinating_center", "is_suspended", "status",
"parent_path", "match_clause_level", "match_clause_additional_attributes",
"protocol_no"
)
def __init__(self,
match_clause: MatchClause,
internal_id: str,
code: str,
coordinating_center: str,
is_suspended: bool,
status: str,
parent_path: ParentPath,
match_clause_level: MatchClauseLevel,
match_clause_additional_attributes: dict,
protocol_no: str):
self.code = code
self.coordinating_center = coordinating_center
self.is_suspended = is_suspended
self.status = status
self.parent_path = parent_path
self.match_clause_level = match_clause_level
self.internal_id = internal_id
self.match_clause_additional_attributes = match_clause_additional_attributes
self.protocol_no = protocol_no
self.match_clause = match_clause
class GenomicMatchReason(object):
__slots__ = (
"query_node", "width", "clinical_id",
"genomic_id", "clinical_width", "depth",
"show_in_ui"
)
reason_name = "genomic"
def __init__(
self,
query_node: QueryNode,
width: int,
clinical_width: int,
clinical_id: ClinicalID,
genomic_id: Union[GenomicID, None],
show_in_ui: bool
):
self.show_in_ui = show_in_ui
self.clinical_width = clinical_width
self.genomic_id = genomic_id
self.clinical_id = clinical_id
self.width = width
self.query_node = query_node
self.depth = query_node.query_depth
def extract_raw_query(self):
return self.query_node.extract_raw_query()
class ClinicalMatchReason(object):
__slots__ = (
"query_part", "clinical_id", "depth",
"show_in_ui"
)
reason_name = "clinical"
width = 1
def __init__(
self,
query_part: QueryPart,
clinical_id: ClinicalID,
depth: int,
show_in_ui: bool
):
self.show_in_ui = show_in_ui
self.clinical_id = clinical_id
self.query_part = query_part
self.depth = depth
def extract_raw_query(self):
return self.query_part.query
MatchReason = NewType("MatchReason", Union[GenomicMatchReason, ClinicalMatchReason])
class TrialMatch(object):
__slots__ = (
"trial", "match_clause_data", "match_criterion",
"match_clause_data", "multi_collection_query", "match_reason",
"run_log"
)
def __init__(
self,
trial: Trial,
match_clause_data: MatchClauseData,
match_criterion: MatchCriterion,
multi_collection_query: MultiCollectionQuery,
match_reason: MatchReason,
run_log: datetime.datetime,
):
self.run_log = run_log
self.match_reason = match_reason
self.multi_collection_query = multi_collection_query
self.match_criterion = match_criterion
self.match_clause_data = match_clause_data
self.trial = trial
class Cache(object):
__slots__ = (
"docs", "ids", "in_process"
)
docs: Dict
ids: Dict
in_process: Dict
def __init__(self):
self.docs = dict()
self.ids = dict()
self.in_process = dict()
class Secrets(object):
__slots__ = (
"HOST", "PORT", "DB",
"AUTH_DB", "RO_USERNAME", "RO_PASSWORD",
"RW_USERNAME", "RW_PASSWORD", "REPLICA_SET",
"MAX_POOL_SIZE", "MIN_POOL_SIZE"
)
def __init__(
self,
host: str,
port: int,
db: str,
auth_db: str,
ro_username: str,
ro_password: str,
rw_username: str,
rw_password: str,
replica_set: str,
max_pool_size: str,
min_pool_size: str
):
self.MIN_POOL_SIZE = min_pool_size
self.MAX_POOL_SIZE = max_pool_size
self.REPLICA_SET = replica_set
self.RW_PASSWORD = rw_password
self.RW_USERNAME = rw_username
self.RO_PASSWORD = ro_password
self.RO_USERNAME = ro_username
self.AUTH_DB = auth_db
self.DB = db
self.PORT = port
self.HOST = host
class QueryTransformerResult(object):
__slots__ = (
"results"
)
results: List[QueryPart]
def __init__(
self,
query_clause: Dict = None,
negate: bool = None,
render: bool = True,
mcq_invalidating: bool = False
):
self.results = list()
if query_clause is not None:
if negate is not None:
self.results.append(QueryPart(query_clause, negate, render, mcq_invalidating))
else:
raise Exception("If adding query result directly to results container, "
"both Negate and Query must be specified")
def add_result(
self,
query_clause: Dict,
negate: bool,
render: bool = True,
mcq_invalidating: bool = False
):
self.results.append(QueryPart(query_clause, negate, render, mcq_invalidating)) | PypiClean |
/Flask-RESTbolt-0.1.0.tar.gz/Flask-RESTbolt-0.1.0/docs/installation.rst | .. _installation:
Installation
============
.. currentmodule:: flask_restful
Install Flask-RESTful with ``pip`` ::
pip install flask-restful
The development version can be downloaded from `its page at GitHub
<https://github.com/flask-restful/flask-restful>`_. ::
git clone https://github.com/flask-restful/flask-restful.git
cd flask-restful
python setup.py develop
Flask-RESTful has the following dependencies (which will be automatically
installed if you use ``pip``):
* `Flask <http://flask.pocoo.org>`_ version 0.8 or greater
Flask-RESTful requires Python version 2.6, 2.7, 3.3, or 3.4.
| PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/iron-iconset/.github/ISSUE_TEMPLATE.md | <!-- Instructions: https://github.com/PolymerElements/iron-iconset/CONTRIBUTING.md#filing-issues -->
### Description
<!-- Example: The `paper-foo` element causes the page to turn pink when clicked. -->
### Expected outcome
<!-- Example: The page stays the same color. -->
### Actual outcome
<!-- Example: The page turns pink. -->
### Live Demo
<!-- Example: https://jsbin.com/cagaye/edit?html,output -->
### Steps to reproduce
<!-- Example
1. Put a `paper-foo` element in the page.
2. Open the page in a web browser.
3. Click the `paper-foo` element.
-->
### Browsers Affected
<!-- Check all that apply -->
- [ ] Chrome
- [ ] Firefox
- [ ] Safari 9
- [ ] Safari 8
- [ ] Safari 7
- [ ] Edge
- [ ] IE 11
- [ ] IE 10
| PypiClean |
/DMS_APP-0.2.1-py3-none-any.whl/dms_app/resources/login_register/super_login.py | import logging
from flask import request, Flask
from ...db.db_connection import database_access
from flask_restx import Resource, fields
from ...namespace import api
from ...response_helper import get_response
import hashlib
import jwt
from datetime import datetime, timedelta
from ...config import Config
sec_key = Config.SEC_KEY
flask_app = Flask(__name__)
flask_app.config['SECRET_KEY'] = 'cc6e455f0b76439d99cc8e1669232518'
super_login = api.model("SuperLogin", {
"email": fields.String,
"password": fields.String})
role = "super_admin"
dashboard = "both_dashboard"
first_name = "Admin"
privileges = [{
"Roles_Privileges":
[
{
"read": "true",
"write": "true"
}
]
},
{
"Users": [
{
"read": "true",
"write": "true"
}
]
},
{
"Create_Violation": [
{
"read": "true",
"write": "true"
}
]
}, {
"Customize_Form": [
{
"read": "true",
"write": "true"
}
]
},
{
"Checklist_Master": [
{
"read": "true",
"write": "true"
}
]
},{
"Checklist_Configure": [
{
"read": "true",
"write": "true"
}
]
},{
"Checklist_Approval": [
{
"read": "true",
"write": "true"
}
]
}, {
"Checklist_History": [
{
"read": "true",
"write": "true"
}
]
}, {
"Fingerprint_Authentication": [
{
"read": "true",
"write": "true"
}
]
}, {
"Fingerprint_Enrollment": [
{
"read": "true",
"write": "true"
}
]
}, {
"Add_New_Profile": [
{
"read": "true",
"write": "true"
}
]
}, {
"View_Profiles": [
{
"read": "true",
"write": "true"
}
]
}, {
"Edit_Profile": [
{
"read": "true",
"write": "true"
}
]
}, {
"Person_Profile": [
{
"read": "true",
"write": "true"
}
]
},{
"Security_Checklist": [
{
"read": "true",
"write": "true"
}
]
}]
class SuperLogin(Resource):
@api.expect(super_login, validate=True)
def post(self):
args = request.get_json()
try:
database_connection = database_access()
dms_super_admin = database_connection["dms_super_admin"]
password = hashlib.md5("dmsautoplant@987".encode('utf-8')).digest()
data = dms_super_admin.find_one({"email": args["email"]})
if data:
if data["password"] == password:
_response = get_response(200)
session_id = jwt.encode({
'email': data['email'],
'exp': datetime.utcnow() + timedelta(days=1)
}, sec_key)
_response["session_id"] = session_id
_response["email"] = data["email"]
_response["role"] = role
_response["first_name"] = first_name
_response["dashboard"] = dashboard
_response["privileges"] = privileges
return _response
else:
logging.error(get_response(401))
return get_response(401)
else:
logging.error(get_response(404))
return get_response(404)
except Exception as e:
logging.error(e) | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/SVG/fonts/Neo-Euler/Size2/Regular/Main.js | MathJax.OutputJax.SVG.FONTDATA.FONTS.NeoEulerMathJax_Size2={directory:"Size2/Regular",family:"NeoEulerMathJax_Size2",id:"NEOEULERSIZE2",32:[0,0,333,0,0,""],40:[1599,199,596,180,574,"180 700c0 336 89 675 346 899h48l-17 -17c-230 -225 -301 -556 -301 -882s71 -657 301 -882l17 -17h-48c-257 224 -346 563 -346 899"],41:[1599,199,595,22,415,"415 700c0 -336 -89 -675 -346 -899h-47l16 17c231 225 301 556 301 882s-70 657 -301 882l-16 17h47c257 -224 346 -563 346 -899"],47:[1599,200,811,53,759,"759 1583l-662 -1783l-41 8l-3 14l660 1775l12 2l29 -5"],91:[1674,125,472,226,453,"226 -125v1799h227v-47h-180v-1705h180v-47h-227"],92:[1599,200,811,53,759,"53 1583l662 -1783l41 8l3 14l-660 1775l-12 2l-29 -5"],93:[1674,125,472,18,245,"198 -78v1705h-180v47h227v-1799h-227v47h180"],123:[1599,200,667,119,547,"296 61v416c0 24 -7 127 -167 201c-10 5 -10 7 -10 22c0 12 0 16 8 20c38 17 157 72 168 196c1 8 1 65 1 98v312c0 83 0 120 69 189c35 35 128 84 162 84c19 0 20 -1 20 -20c0 -13 0 -17 -7 -20c-41 -17 -170 -71 -170 -188v-450c-1 -75 -56 -161 -194 -222 c119 -49 192 -133 194 -220v-425c0 -56 0 -145 168 -214c9 -3 9 -6 9 -20c0 -19 -1 -20 -20 -20c-37 0 -127 51 -160 82c-71 69 -71 114 -71 179"],124:[1897,208,213,86,126,"126 -200l-40 -8v2097l40 8v-2097"],125:[1599,200,667,119,547,"296 28v450c1 75 56 161 194 222c-119 49 -192 133 -194 220v425c0 59 0 143 -167 213c-10 4 -10 9 -10 21c0 19 3 20 21 20c34 0 124 -49 159 -82c71 -69 71 -114 71 -179v-416c0 -7 0 -9 3 -26c19 -109 135 -162 167 -177c7 -3 7 -8 7 -20c0 -10 0 -16 -6 -19 c-40 -18 -159 -73 -170 -197c-1 -8 -1 -65 -1 -98v-312c0 -83 0 -120 -69 -189c-36 -36 -129 -84 -161 -84c-18 0 -21 1 -21 20c0 10 0 16 8 20c42 18 169 72 169 188"],160:[0,0,333,0,0,""],8214:[1897,208,403,86,316,"316 -200l-40 -8v2097l40 8v-2097zM126 -200l-40 -8v2097l40 8v-2097"],8260:[1599,200,811,53,759,"759 1583l-662 -1783l-41 8l-3 14l660 1775l12 2l29 -5"],8725:[1599,200,811,53,759,"759 1583l-662 -1783l-41 8l-3 14l660 1775l12 2l29 -5"],8730:[1800,1,1000,110,1024,"458 -1h-34l-230 802l-68 -80l-16 15l139 163l215 -750l522 1651l38 -12"],8739:[1897,208,213,86,126,"126 -200l-40 -8v2097l40 8v-2097"],8741:[1297,208,403,86,316,"316 -200l-40 -8v1497l40 8v-1497zM126 -200l-40 -8v1497l40 8v-1497"],8743:[1128,267,1549,56,1492,"1492 -238l-681 1360l-24 6l-731 -1373l21 -22l76 2l641 1197l606 -1199"],8744:[1069,326,1549,56,1492,"1492 1040l-681 -1360l-24 -6l-731 1373l21 22l76 -2l641 -1197l606 1199"],8745:[1359,-1,1110,56,1053,"56 878c0 271 228 481 499 481c270 0 498 -210 498 -481v-877h-83v877c0 222 -187 398 -415 398s-416 -176 -416 -398v-877h-83v877"],8746:[1317,41,1110,56,1053,"139 440c0 -222 188 -398 416 -398s415 176 415 398v877h83v-877c0 -271 -228 -481 -498 -481c-271 0 -499 210 -499 481v877h83v-877"],8846:[1317,41,1110,56,1053,"139 440c0 -222 188 -398 416 -398s415 176 415 398v877h83v-877c0 -271 -228 -481 -498 -481c-271 0 -499 210 -499 481v877h83v-877zM513 989h83v-249h249v-83h-249v-249h-83v249h-249v83h249v249"],8896:[1128,267,1549,56,1492,"1492 -238l-681 1360l-24 6l-731 -1373l21 -22l76 2l641 1197l606 -1199"],8897:[1069,326,1549,56,1492,"1492 1040l-681 -1360l-24 -6l-731 1373l21 22l76 -2l641 -1197l606 1199"],8898:[1359,-1,1110,56,1053,"56 878c0 271 228 481 499 481c270 0 498 -210 498 -481v-877h-83v877c0 222 -187 398 -415 398s-416 -176 -416 -398v-877h-83v877"],8899:[1317,41,1110,56,1053,"139 440c0 -222 188 -398 416 -398s415 176 415 398v877h83v-877c0 -271 -228 -481 -498 -481c-271 0 -499 210 -499 481v877h83v-877"],8968:[1599,200,527,226,509,"226 -200v1799h283v-47h-236v-1752h-47"],8969:[1599,200,527,18,301,"254 -200v1752h-236v47h283v-1799h-47"],8970:[1599,200,527,226,509,"226 -200v1799h47v-1752h236v-47h-283"],8971:[1599,200,527,18,301,"254 -153v1752h47v-1799h-283v47h236"],9001:[1536,234,629,109,520,"160 651l360 -867l-43 -18l-368 885l368 885l43 -18"],9002:[1536,234,693,89,500,"133 1536l367 -885l-367 -885l-44 18l360 867l-360 867"],9180:[794,-414,1911,56,1855,"67 458l36 40c7 8 23 22 35 32c1 0 42 36 81 64c232 156 500 200 737 200c166 0 327 -24 459 -66c211 -70 350 -176 432 -272c8 -8 8 -10 8 -24c0 -12 0 -18 -10 -18c-2 0 -5 0 -8 2c-177 176 -438 302 -882 302c-240 0 -487 -40 -679 -148c-110 -62 -165 -116 -201 -152 c-3 -2 -5 -4 -9 -4c-9 0 -10 8 -10 10v22c0 2 10 10 11 12"],9181:[144,236,1911,56,1855,"956 -236c-161 0 -323 22 -460 66c-211 70 -350 176 -432 272c-8 8 -8 10 -8 24c0 10 0 18 10 18c3 0 6 -2 8 -4c183 -180 448 -300 882 -300c240 0 487 40 679 148c110 62 165 116 200 150c2 2 6 6 10 6c10 0 10 -8 10 -18c0 -14 0 -16 -6 -22c-21 -24 -60 -62 -76 -76 c-225 -196 -530 -264 -817 -264"],9182:[912,-484,1911,56,1855,"317 736h416c24 0 127 6 201 166c5 10 7 10 22 10c12 0 16 0 20 -8c17 -38 72 -156 196 -168h98h312c83 0 120 0 189 -70c35 -34 84 -128 84 -162c0 -18 -1 -20 -20 -20c-13 0 -17 0 -20 8c-17 40 -71 170 -188 170h-450c-75 0 -161 56 -222 194 c-49 -120 -133 -192 -220 -194h-425c-56 0 -145 0 -214 -168c-3 -10 -6 -10 -20 -10c-19 0 -20 2 -20 20c0 38 51 128 82 160c69 72 114 72 179 72"],9183:[70,358,1911,56,1855,"284 -106h450c75 -2 161 -56 222 -194c49 118 133 192 220 194h425c59 0 143 0 213 166c4 10 9 10 21 10c19 0 20 -2 20 -20c0 -34 -49 -124 -82 -160c-69 -70 -114 -70 -179 -70h-416c-7 0 -9 0 -26 -4c-109 -18 -162 -134 -177 -166c-3 -8 -8 -8 -20 -8 c-10 0 -16 0 -19 6c-18 40 -73 160 -197 170c-8 2 -65 2 -98 2h-312c-83 0 -120 0 -189 68c-36 36 -84 130 -84 162c0 18 1 20 20 20c10 0 16 0 20 -8c18 -42 72 -168 188 -168"],10216:[939,237,501,95,392,"139 351l253 -572l-37 -16l-260 588l260 588l37 -16"],10217:[939,237,568,79,375,"115 939l260 -588l-260 -588l-36 16l252 572l-252 572"]};MathJax.Ajax.loadComplete(MathJax.OutputJax.SVG.fontDir+"/Size2/Regular/Main.js"); | PypiClean |
/Keras28Models-0.1.0.tar.gz/Keras28Models-0.1.0/README.md | # Keras28 Models
This is a Python package for easy build deep learning applications by using transfer learning pre trainined 28 models in easy few line code. to avoid wasting time scripting in your deep learning .You can read about keras models in complex code scriptining [keras document for Models API](https://keras.io/api/)
.that is easy code for training your custom dataset in faster code not complex scripting code. more than 25% of time spent on deep learning projects is collecting and cleaning data
and building convolution neural network (CNN)..but this package is very easy for images classifiction and recognition application and save final weights models .and compare all pre trainined models results for choice the best accurecy one
convert results to DataFrame for easy viewing...(CSV format file spreadsheet) contains model name and number model parameters and validation accurcy ..and final code it is plot the num model params vs validation_accuracy for choosing benchmark
## Installation
```
pip install keras28model==0.1.0
or in colab google cloud
!pip install keras28model==0.1.0
```
## Tutorial
[Colab Google Drive](https://colab.research.google.com/drive/1IVzMGgpm-KQQqhQU3brVl331FyP7Ueke?usp=sharing)
```
u can see tutorial in colab google drive
```
| PypiClean |
/Flask-State-test-1.0.2b1.tar.gz/Flask-State-test-1.0.2b1/README.md | 
[](https://github.com/yoobool/flask-state/tree/master/.github/ISSUE_TEMPLATE)
[](https://gitter.im/flaskstate/community)
[](https://www.npmjs.com/package/flask-state)
[](https://github.com/yoobool/flask-state/blob/master/LICENSE)
[](https://pypi.org/project/Flask-State/)
# Flask-State
Flask-State is a visual plug-in based on flask. It can record the local state every minute and read the status of redis if you have configured redis, and generate data chart to show to users through [Echarts](https://github.com/apache/incubator-echarts).

## Installation
Install and update using [pip](https://pip.pypa.io/en/stable/quickstart/):
```
$ pip install Flask-State
```
Display components can use ```<script>``` tag from a CDN, or as a flask-state package on npm.
```html
<script src="https://cdn.jsdelivr.net/gh/yoobool/[email protected]/packages/umd/flask-state.min.js"></script>
```
```
npm install flask-state --save
```
## Usage
After the Flask-State is installed, you also need to import JavaScript file and CSS file to bind a convention ID value for your element. In some configurations, you can also choose to modify them.
### Firstly:we'll set up a Flask app.
```python
from flask import Flask
app = Flask(__name__)
```
### Secondly:Bind database address.
```python
from flask_state import DEFAULT_BIND_SQLITE
app.config['SQLALCHEMY_BINDS'] = {DEFAULT_BIND_SQLITE: 'sqlite:///path'}
```
### Thirdly:Call the init_app method of the flask-state to initialize the configuration.
```python
import flask_state
flask_state.init_app(app)
```
### Lastly:Select the appropriate method to import the view file.
```html
<!--CDN-->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/yoobool/[email protected]/packages/flask-state.css">
<script src="https://cdn.jsdelivr.net/gh/yoobool/[email protected]/packages/umd/flask-state.min.js"></script>
<script type="text/javascript">
// Create a DOM node with ID 'test'. After init() binds the node, click to open the listening window
flaskState.init({dom:document.getElementById('test')});
</script>
```
```javascript
// npm
import 'flask-state/flask-state.css';
import {init} from 'flask-state';
// Create a DOM node with ID 'test'. After init() binds the node, click to open the listening window
init({dom:document.getElementById('test')});
```
### Extra:You can also customize some configuration(non-essential).
#### Monitor the redis status.
```python
app.config['REDIS_CONF'] = {'REDIS_STATUS': True, 'REDIS_HOST': '192.168.1.1', 'REDIS_PORT':16380, 'REDIS_PASSWORD': 'psw'}
```
#### Modify the time interval for saving monitoring records.
```python
# The minimum interval is 60 seconds. The default interval is 60 seconds
import flask_state
SECS = 60
flask_state.init_app(app, SECS)
```
#### Custom logger object.
```python
import flask_state
import logging
custom_logger = logging.getLogger(__name__)
flask_state.init_app(app, interval=60, log_instance=custom_logger)
```
#### Custom binding triggers the object of the window.
```javascript
/* When the initialization plug-in does not pass in an object, the plug-in will automatically create a right-hand suspension ball */
/* Note: all pages share a plug-in instance. Calling init() method multiple times will only trigger plug-in events for new object binding */
flaskState.init();
```
#### Select the language in which the plug-in is displayed, now support en, zh.
```html
<!--Note: the language file imported through the tag must be after the plug-in is imported-->
<script src="https://cdn.jsdelivr.net/gh/yoobool/[email protected]/packages/umd/flask-state.min.js"></script>
<script src="https://cdn.jsdelivr.net/gh/yoobool/[email protected]/packages/umd/zh.js"></script>
<script type="text/javascript">
flaskState.init({lang:flaskState.zh});
</script>
```
```javascript
import {init} from 'flask-state';
import {zh} from 'flask-state/i18n.js';
init({lang:zh});
```
## Contributing
Welcome to [open an issue](https://github.com/yoobool/flask-state/issues/new)!
Flask-State follows the [Contributor Covenant](https://www.contributor-covenant.org/version/1/3/0/code-of-conduct/) Code of Conduct.
## Community Channel
We're on [Gitter](https://gitter.im/flaskstate/community) ! Please join us.
## License
Flask-State is available under the BSD-3-Clause License. | PypiClean |
/LIBTwinSVM-0.3.0-cp35-cp35m-manylinux1_x86_64.whl/libtsvm/estimators.py |
# LIBTwinSVM: A Library for Twin Support Vector Machines
# Developers: Mir, A. and Mahdi Rahbar
# License: GNU General Public License v3.0
"""
In this module, Standard TwinSVM and Least Squares TwinSVM estimators are
defined.
"""
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_X_y
from libtsvm.optimizer import clipdcd
import numpy as np
class BaseTSVM(BaseEstimator):
"""
Base class for TSVM-based estimators
Parameters
----------
kernel : str
Type of the kernel function which is either 'linear' or 'RBF'.
rect_kernel : float
Percentage of training samples for Rectangular kernel.
C1 : float
Penalty parameter of first optimization problem.
C2 : float
Penalty parameter of second optimization problem.
gamma : float
Parameter of the RBF kernel function.
Attributes
----------
mat_C_t : array-like, shape = [n_samples, n_samples]
A matrix that contains kernel values.
cls_name : str
Name of the classifier.
w1 : array-like, shape=[n_features]
Weight vector of class +1's hyperplane.
b1 : float
Bias of class +1's hyperplane.
w2 : array-like, shape=[n_features]
Weight vector of class -1's hyperplane.
b2 : float
Bias of class -1's hyperplane.
"""
def __init__(self, kernel, rect_kernel, C1, C2, gamma):
self.C1 = C1
self.C2 = C2
self.gamma = gamma
self.kernel = kernel
self.rect_kernel = rect_kernel
self.mat_C_t = None
self.clf_name = None
# Two hyperplanes attributes
self.w1, self.b1, self.w2, self.b2 = None, None, None, None
self.check_clf_params()
def check_clf_params(self):
"""
Checks whether the estimator's input parameters are valid.
"""
if not(self.kernel in ['linear', 'RBF']):
raise ValueError("\"%s\" is an invalid kernel. \"linear\" and"
" \"RBF\" values are valid." % self.kernel)
def get_params_names(self):
"""
For retrieving the names of hyper-parameters of the TSVM-based
estimator.
Returns
-------
parameters : list of str, {['C1', 'C2'], ['C1', 'C2', 'gamma']}
Returns the names of the hyperparameters which are same as
the class' attributes.
"""
return ['C1', 'C2'] if self.kernel == 'linear' else ['C1', 'C2',
'gamma']
def fit(self, X, y):
"""
It fits a TSVM-based estimator.
THIS METHOD SHOULD BE IMPLEMENTED IN CHILD CLASS.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training feature vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape(n_samples,)
Target values or class labels.
"""
pass # Impelement fit method in child class
def predict(self, X):
"""
Performs classification on samples in X using the TSVM-based model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature vectors of test data.
Returns
-------
array, shape (n_samples,)
Predicted class lables of test data.
"""
# Assign data points to class +1 or -1 based on distance from
# hyperplanes
return 2 * np.argmin(self.decision_function(X), axis=1) - 1
def decision_function(self, X):
"""
Computes distance of test samples from both non-parallel hyperplanes
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
array-like, shape(n_samples, 2)
distance from both hyperplanes.
"""
# dist = np.zeros((X.shape[0], 2), dtype=np.float64)
#
# kernel_f = {'linear': lambda i: X[i, :],
# 'RBF': lambda i: rbf_kernel(X[i, :], self.mat_C_t,
# self.gamma)}
#
# for i in range(X.shape[0]):
#
# # Prependicular distance of data pint i from hyperplanes
# dist[i, 1] = np.abs(np.dot(kernel_f[self.kernel](i), self.w1) \
# + self.b1)
#
# dist[i, 0] = np.abs(np.dot(kernel_f[self.kernel](i), self.w2) \
# + self.b2)
#
# return dist
kernel_f = {'linear': lambda: X, 'RBF': lambda: rbf_kernel(X,
self.mat_C_t, self.gamma)}
return np.column_stack((np.abs(np.dot(kernel_f[self.kernel](), self.w2)
+ self.b2),
np.abs(np.dot(kernel_f[self.kernel](),
self.w1) + self.b1)))
class TSVM(BaseTSVM):
"""
Standard Twin Support Vector Machine for binary classification.
It inherits attributes of :class:`BaseTSVM`.
Parameters
----------
kernel : str, optional (default='linear')
Type of the kernel function which is either 'linear' or 'RBF'.
rect_kernel : float, optional (default=1.0)
Percentage of training samples for Rectangular kernel.
C1 : float, optional (default=1.0)
Penalty parameter of first optimization problem.
C2 : float, optional (default=1.0)
Penalty parameter of second optimization problem.
gamma : float, optional (default=1.0)
Parameter of the RBF kernel function.
"""
def __init__(self, kernel='linear', rect_kernel=1, C1=2**0, C2=2**0,
gamma=2**0):
super(TSVM, self).__init__(kernel, rect_kernel, C1, C2, gamma)
self.clf_name = 'TSVM'
# @profile
def fit(self, X_train, y_train):
"""
It fits the binary TwinSVM model according to the given training data.
Parameters
----------
X_train : array-like, shape (n_samples, n_features)
Training feature vectors, where n_samples is the number of samples
and n_features is the number of features.
y_train : array-like, shape(n_samples,)
Target values or class labels.
"""
X_train = np.array(X_train, dtype=np.float64) if isinstance(X_train,
list) else X_train
y_train = np.array(y_train) if isinstance(y_train, list) else y_train
# Matrix A or class 1 samples
mat_A = X_train[y_train == 1]
# Matrix B or class -1 data
mat_B = X_train[y_train == -1]
# Vectors of ones
mat_e1 = np.ones((mat_A.shape[0], 1))
mat_e2 = np.ones((mat_B.shape[0], 1))
if self.kernel == 'linear': # Linear kernel
mat_H = np.column_stack((mat_A, mat_e1))
mat_G = np.column_stack((mat_B, mat_e2))
elif self.kernel == 'RBF': # Non-linear
# class 1 & class -1
mat_C = np.row_stack((mat_A, mat_B))
self.mat_C_t = np.transpose(mat_C)[:, :int(mat_C.shape[0] *
self.rect_kernel)]
mat_H = np.column_stack((rbf_kernel(mat_A, self.mat_C_t,
self.gamma), mat_e1))
mat_G = np.column_stack((rbf_kernel(mat_B, self.mat_C_t,
self.gamma), mat_e2))
mat_H_t = np.transpose(mat_H)
mat_G_t = np.transpose(mat_G)
# Compute inverses:
# Regulariztion term used for ill-possible condition
reg_term = 2 ** float(-7)
mat_H_H = np.linalg.inv(np.dot(mat_H_t, mat_H) + (reg_term *
np.identity(mat_H.shape[1])))
# Wolfe dual problem of class 1
mat_dual1 = np.dot(np.dot(mat_G, mat_H_H), mat_G_t)
# Obtaining Lagrange multipliers using ClipDCD optimizer
alpha_d1 = clipdcd.optimize(mat_dual1, self.C1).reshape(mat_dual1.shape[0], 1)
# Obtain hyperplanes
hyper_p_1 = -1 * np.dot(np.dot(mat_H_H, mat_G_t), alpha_d1)
# Free memory
del mat_dual1, mat_H_H
mat_G_G = np.linalg.inv(np.dot(mat_G_t, mat_G) + (reg_term *
np.identity(mat_G.shape[1])))
# Wolfe dual problem of class -1
mat_dual2 = np.dot(np.dot(mat_H, mat_G_G), mat_H_t)
alpha_d2 = clipdcd.optimize(mat_dual2, self.C2).reshape(mat_dual2.shape[0], 1)
hyper_p_2 = np.dot(np.dot(mat_G_G, mat_H_t), alpha_d2)
# Class 1
self.w1 = hyper_p_1[:hyper_p_1.shape[0] - 1, :]
self.b1 = hyper_p_1[-1, :]
# Class -1
self.w2 = hyper_p_2[:hyper_p_2.shape[0] - 1, :]
self.b2 = hyper_p_2[-1, :]
class LSTSVM(BaseTSVM):
"""
Least Squares Twin Support Vector Machine (LSTSVM) for binary
classification. It inherits attributes of :class:`BaseTSVM`.
Parameters
----------
kernel : str, optional (default='linear')
Type of the kernel function which is either 'linear' or 'RBF'.
rect_kernel : float, optional (default=1.0)
Percentage of training samples for Rectangular kernel.
C1 : float, optional (default=1.0)
Penalty parameter of first optimization problem.
C2 : float, optional (default=1.0)
Penalty parameter of second optimization problem.
gamma : float, optional (default=1.0)
Parameter of the RBF kernel function.
mem_optimize : boolean, optional (default=False)
If it's True, it optimizes the memory consumption siginificantly.
However, the memory optimization increases the CPU time.
"""
def __init__(self, kernel='linear', rect_kernel=1, C1=2**0, C2=2**0,
gamma=2**0, mem_optimize=False):
super(LSTSVM, self).__init__(kernel, rect_kernel, C1, C2, gamma)
self.mem_optimize = mem_optimize
self.clf_name = 'LSTSVM'
# @profile
def fit(self, X, y):
"""
It fits the binary Least Squares TwinSVM model according to the given
training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training feature vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape(n_samples,)
Target values or class labels.
"""
X = np.array(X, dtype=np.float64) if isinstance(X, list) else X
y = np.array(y) if isinstance(y, list) else y
# Matrix A or class 1 data
mat_A = X[y == 1]
# Matrix B or class -1 data
mat_B = X[y == -1]
# Vectors of ones
mat_e1 = np.ones((mat_A.shape[0], 1))
mat_e2 = np.ones((mat_B.shape[0], 1))
# Regularization term used for ill-possible condition
reg_term = 2 ** float(-7)
if self.kernel == 'linear':
mat_H = np.column_stack((mat_A, mat_e1))
mat_G = np.column_stack((mat_B, mat_e2))
elif self.kernel == 'RBF':
# class 1 & class -1
mat_C = np.row_stack((mat_A, mat_B))
self.mat_C_t = np.transpose(mat_C)[:, :int(mat_C.shape[0] *
self.rect_kernel)]
mat_H = np.column_stack((rbf_kernel(mat_A, self.mat_C_t,
self.gamma), mat_e1))
mat_G = np.column_stack((rbf_kernel(mat_B, self.mat_C_t,
self.gamma), mat_e2))
mat_H_t = np.transpose(mat_H)
mat_G_t = np.transpose(mat_G)
if self.mem_optimize:
inv_p_1 = np.linalg.inv((np.dot(mat_G_t, mat_G) + (1 / self.C1) \
* np.dot(mat_H_t,mat_H)) + (reg_term * np.identity(mat_H.shape[1])))
# Determine parameters of two non-parallel hyperplanes
hyper_p_1 = -1 * np.dot(inv_p_1, np.dot(mat_G_t, mat_e2))
# Free memory
del inv_p_1
inv_p_2 = np.linalg.inv((np.dot(mat_H_t,mat_H) + (1 / self.C2) \
* np.dot(mat_G_t, mat_G)) + (reg_term * np.identity(mat_H.shape[1])))
hyper_p_2 = np.dot(inv_p_2, np.dot(mat_H_t, mat_e1))
else:
stabilizer = reg_term * np.identity(mat_H.shape[1])
mat_G_G_t = np.dot(mat_G_t, mat_G)
mat_H_H_t = np.dot(mat_H_t,mat_H)
inv_p_1 = np.linalg.inv((mat_G_G_t + (1 / self.C1) * mat_H_H_t) \
+ stabilizer)
# Determine parameters of two non-parallel hyperplanes
hyper_p_1 = -1 * np.dot(inv_p_1, np.dot(mat_G_t, mat_e2))
# Free memory
del inv_p_1
inv_p_2 = np.linalg.inv((mat_H_H_t + (1 / self.C2) * mat_G_G_t) \
+ stabilizer)
hyper_p_2 = np.dot(inv_p_2, np.dot(mat_H_t, mat_e1))
self.w1 = hyper_p_1[:hyper_p_1.shape[0] - 1, :]
self.b1 = hyper_p_1[-1, :]
self.w2 = hyper_p_2[:hyper_p_2.shape[0] - 1, :]
self.b2 = hyper_p_2[-1, :]
# @profile
# def fit_2(self, X, y):
# """
# It fits the binary Least Squares TwinSVM model according to the given
# training data.
#
# Parameters
# ----------
# X : array-like, shape (n_samples, n_features)
# Training feature vectors, where n_samples is the number of samples
# and n_features is the number of features.
#
# y : array-like, shape(n_samples,)
# Target values or class labels.
# """
#
# X, y = check_X_y(X, y, dtype=np.float64)
#
# # Matrix A or class 1 data
# mat_A = X[y == 1]
#
# # Matrix B or class -1 data
# mat_B = X[y == -1]
#
# # Vectors of ones
# mat_e1 = np.ones((mat_A.shape[0], 1))
# mat_e2 = np.ones((mat_B.shape[0], 1))
#
# # Regularization term used for ill-possible condition
# reg_term = 2 ** float(-7)
#
# if self.kernel == 'linear':
#
# mat_H = np.column_stack((mat_A, mat_e1))
# mat_G = np.column_stack((mat_B, mat_e2))
#
# mat_H_t = np.transpose(mat_H)
# mat_G_t = np.transpose(mat_G)
#
# stabilizer = reg_term * np.identity(mat_H.shape[1])
#
# # Determine parameters of two non-parallel hyperplanes
# hyper_p_1 = -1 * np.dot(np.linalg.inv((np.dot(mat_G_t, mat_G) +
# (1 / self.C1) * np.dot(mat_H_t,mat_H)) + stabilizer),
# np.dot(mat_G_t, mat_e2))
#
# self.w1 = hyper_p_1[:hyper_p_1.shape[0] - 1, :]
# self.b1 = hyper_p_1[-1, :]
#
# hyper_p_2 = np.dot(np.linalg.inv((np.dot(mat_H_t, mat_H) + (1 / self.C2)
# * np.dot(mat_G_t, mat_G)) + stabilizer), np.dot(mat_H_t, mat_e1))
#
# self.w2 = hyper_p_2[:hyper_p_2.shape[0] - 1, :]
# self.b2 = hyper_p_2[-1, :]
#
# elif self.kernel == 'RBF':
#
# # class 1 & class -1
# mat_C = np.row_stack((mat_A, mat_B))
#
# self.mat_C_t = np.transpose(mat_C)[:, :int(mat_C.shape[0] *
# self.rect_kernel)]
#
# mat_H = np.column_stack((rbf_kernel(mat_A, self.mat_C_t,
# self.gamma), mat_e1))
#
# mat_G = np.column_stack((rbf_kernel(mat_B, self.mat_C_t,
# self.gamma), mat_e2))
#
# mat_H_t = np.transpose(mat_H)
# mat_G_t = np.transpose(mat_G)
#
# mat_I_H = np.identity(mat_H.shape[0]) # (m_1 x m_1)
# mat_I_G = np.identity(mat_G.shape[0]) # (m_2 x m_2)
#
# mat_I = np.identity(mat_G.shape[1]) # (n x n)
#
# # Determine parameters of hypersurfaces # Using SMW formula
# if mat_A.shape[0] < mat_B.shape[0]:
#
# y = (1 / reg_term) * (mat_I - np.dot(np.dot(mat_G_t, \
# np.linalg.inv((reg_term * mat_I_G) + np.dot(mat_G, mat_G_t))),
# mat_G))
#
# mat_H_y = np.dot(mat_H, y)
# mat_y_Ht = np.dot(y, mat_H_t)
# mat_H_y_Ht = np.dot(mat_H_y, mat_H_t)
#
# h_surf1_inv = np.linalg.inv(self.C1 * mat_I_H + mat_H_y_Ht)
# h_surf2_inv = np.linalg.inv((mat_I_H / self.C2) + mat_H_y_Ht)
#
# hyper_surf1 = np.dot(-1 * (y - np.dot(np.dot(mat_y_Ht, h_surf1_inv),
# mat_H_y)), np.dot(mat_G_t, mat_e2))
#
# hyper_surf2 = np.dot(self.C2 * (y - np.dot(np.dot(mat_y_Ht,
# h_surf2_inv), mat_H_y)), np.dot(mat_H_t, mat_e1))
#
# # Parameters of hypersurfaces
# self.w1 = hyper_surf1[:hyper_surf1.shape[0] - 1, :]
# self.b1 = hyper_surf1[-1, :]
#
# self.w2 = hyper_surf2[:hyper_surf2.shape[0] - 1, :]
# self.b2 = hyper_surf2[-1, :]
#
# else:
#
# z = (1 / reg_term) * (mat_I - np.dot(np.dot(mat_H_t, \
# np.linalg.inv(reg_term * mat_I_H + np.dot(mat_H, mat_H_t))),
# mat_H))
#
# mat_G_z = np.dot(mat_G, z)
# mat_z_Gt = np.dot(z, mat_G_t)
# mat_G_y_Gt = np.dot(mat_G_z, mat_G_t)
#
# g_surf1_inv = np.linalg.inv((mat_I_G / self.C1) + mat_G_y_Gt)
# g_surf2_inv = np.linalg.inv(self.C2 * mat_I_G + mat_G_y_Gt)
#
# hyper_surf1 = np.dot(-self.C1 * (z - np.dot(np.dot(mat_z_Gt,
# g_surf1_inv), mat_G_z)), np.dot(mat_G_t, mat_e2))
#
# hyper_surf2 = np.dot((z - np.dot(np.dot(mat_z_Gt, g_surf2_inv),
# mat_G_z)), np.dot(mat_H_t, mat_e1))
#
# self.w1 = hyper_surf1[:hyper_surf1.shape[0] - 1, :]
# self.b1 = hyper_surf1[-1, :]
#
# self.w2 = hyper_surf2[:hyper_surf2.shape[0] - 1, :]
# self.b2 = hyper_surf2[-1, :]
def rbf_kernel(x, y, u):
"""
It transforms samples into higher dimension using Gaussian (RBF) kernel.
Parameters
----------
x, y : array-like, shape (n_features,)
A feature vector or sample.
u : float
Parameter of the RBF kernel function.
Returns
-------
float
Value of kernel matrix for feature vector x and y.
"""
return np.exp(-2 * u) * np.exp(2 * u * np.dot(x, y))
# GPU implementation of estimators ############################################
#
# try:
#
# from cupy import prof
# import cupy as cp
#
# def GPU_rbf_kernel(x, y, u):
# """
# It transforms samples into higher dimension using Gaussian (RBF)
# kernel.
#
# Parameters
# ----------
# x, y : array-like, shape (n_features,)
# A feature vector or sample.
#
# u : float
# Parameter of the RBF kernel function.
#
# Returns
# -------
# float
# Value of kernel matrix for feature vector x and y.
# """
#
# return cp.exp(-2 * u) * cp.exp(2 * u * cp.dot(x, y))
#
# class GPU_LSTSVM():
# """
# GPU implementation of least squares twin support vector machine
#
# Parameters
# ----------
# kernel : str
# Type of the kernel function which is either 'linear' or 'RBF'.
#
# rect_kernel : float
# Percentage of training samples for Rectangular kernel.
#
# C1 : float
# Penalty parameter of first optimization problem.
#
# C2 : float
# Penalty parameter of second optimization problem.
#
# gamma : float
# Parameter of the RBF kernel function.
#
# Attributes
# ----------
# mat_C_t : array-like, shape = [n_samples, n_samples]
# A matrix that contains kernel values.
#
# cls_name : str
# Name of the classifier.
#
# w1 : array-like, shape=[n_features]
# Weight vector of class +1's hyperplane.
#
# b1 : float
# Bias of class +1's hyperplane.
#
# w2 : array-like, shape=[n_features]
# Weight vector of class -1's hyperplane.
#
# b2 : float
# Bias of class -1's hyperplane.
# """
#
# def __init__(self, kernel='linear', rect_kernel=1, C1=2**0, C2=2**0,
# gamma=2**0):
#
# self.C1 = C1
# self.C2 = C2
# self.gamma = gamma
# self.kernel = kernel
# self.rect_kernel = rect_kernel
# self.mat_C_t = None
# self.clf_name = None
#
# # Two hyperplanes attributes
# self.w1, self.b1, self.w2, self.b2 = None, None, None, None
#
# #@prof.TimeRangeDecorator()
# def fit(self, X, y):
# """
# It fits the binary Least Squares TwinSVM model according to
# the given
# training data.
#
# Parameters
# ----------
# X : array-like, shape (n_samples, n_features)
# Training feature vectors, where n_samples is the number of
# samples
# and n_features is the number of features.
#
# y : array-like, shape(n_samples,)
# Target values or class labels.
# """
#
# X = cp.asarray(X)
# y = cp.asarray(y)
#
# # Matrix A or class 1 data
# mat_A = X[y == 1]
#
# # Matrix B or class -1 data
# mat_B = X[y == -1]
#
# # Vectors of ones
# mat_e1 = cp.ones((mat_A.shape[0], 1))
# mat_e2 = cp.ones((mat_B.shape[0], 1))
#
# mat_H = cp.column_stack((mat_A, mat_e1))
# mat_G = cp.column_stack((mat_B, mat_e2))
#
# mat_H_t = cp.transpose(mat_H)
# mat_G_t = cp.transpose(mat_G)
#
# # Determine parameters of two non-parallel hyperplanes
# hyper_p_1 = -1 * cp.dot(cp.linalg.inv(cp.dot(mat_G_t, mat_G) + \
# (1 / self.C1) * cp.dot(mat_H_t, mat_H)), \
# cp.dot(mat_G_t, mat_e2))
#
# self.w1 = hyper_p_1[:hyper_p_1.shape[0] - 1, :]
# self.b1 = hyper_p_1[-1, :]
#
# hyper_p_2 = cp.dot(cp.linalg.inv(cp.dot(mat_H_t, mat_H) +
# (1 / self.C2) * cp.dot(mat_G_t, mat_G)), cp.dot(mat_H_t, mat_e1))
#
# self.w2 = hyper_p_2[:hyper_p_2.shape[0] - 1, :]
# self.b2 = hyper_p_2[-1, :]
#
# #@prof.TimeRangeDecorator()
# def predict(self, X):
# """
# Performs classification on samples in X using the Least Squares
# TwinSVM model.
#
# Parameters
# ----------
# X_test : array-like, shape (n_samples, n_features)
# Feature vectors of test data.
#
# Returns
# -------
# output : array, shape (n_samples,)
# Predicted class lables of test data.
#
# """
#
# X = cp.asarray(X)
#
# # Calculate prependicular distances for new data points
# # prepen_distance = cp.zeros((X.shape[0], 2))
# #
# # kernel_f = {'linear': lambda i: X[i, :] ,
# # 'RBF': lambda i: GPU_rbf_kernel(X[i, :],
# self.mat_C_t, self.gamma)}
# #
# # for i in range(X.shape[0]):
# #
# # # Prependicular distance of data pint i from hyperplanes
# # prepen_distance[i, 1] = cp.abs(cp.dot(kernel_f[self.kernel](i),
# self.w1) + self.b1)[0]
# #
# # prepen_distance[i, 0] = cp.abs(cp.dot(kernel_f[self.kernel](i), self.w2) + self.b2)[0]
#
# dist = cp.column_stack((cp.abs(cp.dot(X, self.w2) + self.b2),
# cp.abs(cp.dot(X, self.w1) + self.b1)))
#
#
#
# # Assign data points to class +1 or -1 based on distance from
# hyperplanes
# output = 2 * cp.argmin(dist, axis=1) - 1
#
# return cp.asnumpy(output)
#
# except ImportError:
#
# print("Cannot run GPU implementation. Install CuPy package.")
##############################################################################
if __name__ == '__main__':
pass
# from preprocess import read_data
# from sklearn.model_selection import train_test_split
# from sklearn.metrics import accuracy_score
#
# X, y, filename = read_data('../dataset/australian.csv')
#
# x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
#
# tsvm_model = TSVM('linear', 0.25, 0.5)
#
# tsvm_model.fit(x_train, y_train)
# pred = tsvm_model.predict(x_test)
#
# print("Accuracy: %.2f" % (accuracy_score(y_test, pred) * 100)) | PypiClean |
/Eskapade-1.0.0-py3-none-any.whl/eskapade/tutorials/esk303_hgr_filler_plotter.py | from eskapade import analysis, core_ops, process_manager, resources, visualization, ConfigObject, Chain
from eskapade.logger import Logger, LogLevel
logger = Logger()
logger.debug('Now parsing configuration file esk303_hgr_filler_plotter.py.')
#########################################################################################
# --- minimal analysis information
settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk303_hgr_filler_plotter'
settings['version'] = 0
#########################################################################################
msg = r"""
The plots and latex files produced by link hist_summary can be found in dir:
{path}
"""
logger.info(msg, path=settings['resultsDir'] + '/' + settings['analysisName'] + '/data/v0/report/')
# --- Analysis configuration flags.
# E.g. use these flags turn on or off certain chains with links.
# by default all set to false, unless already configured in
# configobject or vars()
settings['do_loop'] = True
settings['do_plotting'] = True
chunk_size = 400
#########################################################################################
# --- create dummy example dataset, which is read in below
input_files = [resources.fixture('mock_accounts.csv.gz')]
def to_date(x):
"""Convert to timestamp."""
import pandas as pd
try:
ts = pd.Timestamp(x.split()[0])
return ts
except BaseException:
pass
return x
#########################################################################################
# --- now set up the chains and links, based on configuration flags
# --- example 2: readdata loops over the input files, with file chunking.
if settings['do_loop']:
ch = Chain('Data')
# --- a loop is set up in the chain MyChain.
# we iterate over (chunks of) the next file in the list until the iterator is done.
# then move on to the next chain (Overview)
# --- readdata keeps on opening the next 400 lines of the open or next file in the file list.
# all kwargs are passed on to pandas file reader.
read_data = analysis.ReadToDf(name='dflooper', key='rc', reader='csv')
read_data.chunksize = chunk_size
read_data.path = input_files
ch.add(read_data)
# add conversion functions to "Data" chain
# here, convert column 'registered', an integer, to an actual timestamp.
conv_funcs = [{'func': to_date, 'colin': 'registered', 'colout': 'date'}]
transform = analysis.ApplyFuncToDf(name='Transform', read_key=read_data.key,
apply_funcs=conv_funcs)
ch.add(transform)
# --- As an example, will fill histogram iteratively over the file loop
hf = analysis.HistogrammarFiller()
hf.read_key = 'rc'
hf.store_key = 'hist'
hf.logger.log_level = LogLevel.DEBUG
# colums that are picked up to do value_counting on in the input dataset
# note: can also be 2-dim: ['isActive','age']
# in this example, the rest are one-dimensional histograms
hf.columns = ['date', 'isActive', 'age', 'eyeColor', 'gender', 'company',
'latitude', 'longitude', ['isActive', 'age'], ['latitude', 'longitude']]
# binning is apply to all input columns that are numeric or timestamps.
# default binning is: bin_width = 1, bin_offset = 0
# for timestamps, default binning is: { 'bin_width': np.timedelta64(30,'D'),
# 'bin_offset': np.datetime64('2010-01-04') } }
hf.bin_specs = {'longitude': {'bin_width': 5, 'bin_offset': 0},
'latitude': {'bin_width': 5, 'bin_offset': 0}}
ch.add(hf)
# --- this serves as the continue statement of the loop. go back to start of the chain.
repeater = core_ops.RepeatChain()
# repeat until readdata says halt.
repeater.listen_to = 'chainRepeatRequestBy_' + read_data.name
ch.add(repeater)
link = core_ops.DsObjectDeleter()
link.keep_only = ['hist', 'n_sum_rc', 'rc']
ch.add(link)
# --- print contents of the datastore
if settings['do_plotting']:
ch = Chain('Overview')
pds = core_ops.PrintDs(name='End')
pds.keys = ['n_sum_rc']
ch.add(pds)
# --- make a nice summary report of the created histograms
hist_summary = visualization.DfSummary(name='HistogramSummary',
read_key=hf.store_key)
ch.add(hist_summary)
#########################################################################################
logger.debug('Done parsing configuration file esk303_hgr_filler_plotter.py.') | PypiClean |
/BeanCommonUtils-1.1.7.tar.gz/BeanCommonUtils-1.1.7/common_utils/mysql_pool.py |
import pymysql
from DBUtils.PooledDB import PooledDB
class MysqlPool(object):
pool = None
# 数据库连接池连接
def init_mysql_pool(self, mysql_info):
if self.pool is None:
self.pool = PooledDB(creator=pymysql, mincached=10,
host=mysql_info['host'], user=mysql_info['user'], passwd=mysql_info['passwd'],
db=mysql_info['db'], port=mysql_info['port'],
maxcached=20, # 链接池中最多闲置的链接,0和None不限制
blocking=True,
ping=0,
charset=mysql_info.get('charset', 'utf8'),
maxconnections=6)
def get_mysql_conn(self):
mysql_conn = self.pool.connection()
cur = mysql_conn.cursor(cursor=pymysql.cursors.DictCursor)
return cur, mysql_conn
# 插入\更新\删除sql
@staticmethod
def op_insert(sql, cur, mysql_conn, sql_type):
mysql_conn.ping()
try:
insert_num = cur.execute(sql)
mysql_conn.commit()
except Exception as e:
raise Exception("%s sql execute error, err_msg: %s" % (sql_type, e))
return insert_num, True
# 查询
@staticmethod
def op_select(sql, cur, mysql_conn):
mysql_conn.ping()
cur.execute(sql)
try:
select_res = cur.fetchall()
except Exception as e:
return e, False
return select_res, True
def sql_operate(self, sql, cur, mysql_conn, sql_type):
sql_operate_list = ["insert", "update", "delete", "select"]
if not isinstance(sql_type, str) and sql_type not in sql_operate_list:
raise ValueError("input sql_type error, sql_type may be: %s" % sql_operate_list)
if sql_type == "select":
return self.op_select(sql, cur, mysql_conn)
else:
return self.op_insert(sql, cur, mysql_conn, sql_type)
# 释放资源
@staticmethod
def dispose(mysql_conn):
mysql_conn.close() | PypiClean |
/Notus-0.0.3-py36-none-any.whl/notus/win10/win10_toaster.py |
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 25-10-2020
"""
__all__ = ["Win10Toaster"]
import logging
from os import path, remove
from pathlib import Path
from random import randint
from threading import Thread
from time import sleep
from typing import Optional
from pkg_resources import Requirement, resource_filename
from notus import PROJECT_NAME
try:
from PIL import Image
except ImportError:
Image = None
"""
CW_USEDEFAULT = -0x80000000
IDI_APPLICATION = 0x7f00
IMAGE_ICON = 0x1
LR_LOADFROMFILE = 0x16
LR_DEFAULTSIZE = 0x40
NIM_ADD = 0x0
NIM_MODIFY = 0x1
NIM_DELETE = 0x2
NIF_MESSAGE = 0x1
NIF_ICON = 0x2
NIF_TIP = 0x4
NIF_INFO = 0x10
WM_USER = 0x400
WS_OVERLAPPED = 0x0
WS_SYSMENU = 0x80000
"""
from winsound import SND_FILENAME, PlaySound
from win32api import (
GetModuleHandle,
PostQuitMessage,
)
from win32con import (
BS_DEFPUSHBUTTON,
CW_USEDEFAULT,
IDI_APPLICATION,
IMAGE_ICON,
LR_DEFAULTSIZE,
LR_LOADFROMFILE,
WM_USER,
WS_CHILD,
WS_OVERLAPPED,
WS_SYSMENU,
WS_TABSTOP,
WS_VISIBLE,
)
from win32gui import (
CreateWindow,
DestroyWindow,
LoadIcon,
LoadImage,
NIF_ICON,
NIF_INFO,
NIF_MESSAGE,
NIF_TIP,
NIM_ADD,
NIM_DELETE,
NIIF_NOSOUND,
NIM_MODIFY,
RegisterClass,
UnregisterClass,
Shell_NotifyIcon,
UpdateWindow,
WNDCLASS,
PumpMessages,
)
from pywintypes import error as WinTypesException
SPIF_SENDCHANGE = 0x2
SPI_SETMESSAGEDURATION = 0x2017
SPI_GETMESSAGEDURATION = 0x2016
PARAM_DESTROY = 0x404
PARAM_CLICKED = 0x405
MOUSE_UP = 0x202
# PARAM_DESTROY = 1028
# PARAM_CLICKED = 1029
# Class
class Win10Toaster(object):
"""Create a Windows 10 toast notification.
#TODO: Add progress bar notification type
"""
def __init__(self):
self._thread = None
@staticmethod
def _decorator(func: callable, callback: callable = None):
"""
:param func: callable to decorate
:param callback: callable to run on mouse click within notification window
:return: callable
"""
def inner(*args, **kwargs):
"""
:param args:
:param kwargs:
"""
kwargs.update({"callback": callback})
func(*args, **kwargs)
return inner
def _show_toast(
self,
title: str,
msg: str = "No msg",
icon_path: Path = None,
duration: float = None,
sound_path=None,
callback_on_click: callable = None,
tooltip: Optional[str] = None,
) -> None:
"""Notification settings.
:param title: notification title
:param msg: notification message
:param icon_path: path to the .ico file to custom notification
:param duration: delay in seconds before notification self-destruction, None for no-self-destruction
:param sound_path: path to the .wav file to custom notification
"""
self.duration = duration
def callback():
""" """
self.duration = 0
if callback_on_click is not None:
callback_on_click()
if tooltip is None:
tooltip = PROJECT_NAME
# Register the window class.
self.window_class = WNDCLASS()
self.instance_handle = self.window_class.hInstance = GetModuleHandle(None)
self.window_class.lpszClassName = f"{PROJECT_NAME}-{title}" # must be a string
self.window_class.lpfnWndProc = self._decorator(
self.wnd_proc, callback
) # could instead specify simple mapping
try:
self.classAtom = RegisterClass(self.window_class)
except Exception as e:
logging.error("Some trouble with classAtom (%s)", e)
style = WS_OVERLAPPED | WS_SYSMENU
button_style = WS_TABSTOP | WS_VISIBLE | WS_CHILD | BS_DEFPUSHBUTTON # TODO: Unused for now
self.window_handle = CreateWindow(
self.classAtom,
"Taskbar",
style,
0,
0,
CW_USEDEFAULT,
CW_USEDEFAULT,
0,
0,
self.instance_handle,
None,
)
UpdateWindow(self.window_handle)
# icon
new_name = ""
if icon_path is not None:
icon_path = path.realpath(icon_path)
converted = False
if Image is not None and icon_path.split(".")[-1] != ".ico":
img = Image.open(icon_path)
new_name = f'{str(icon_path.split(".")[:-1])}.ico'
img.save(new_name)
icon_path = new_name
converted = True
else:
icon_path = resource_filename(
Requirement.parse(PROJECT_NAME),
str(Path(PROJECT_NAME) / "data" / "python.ico"),
)
converted = False
try:
hicon = LoadImage(
self.instance_handle,
icon_path,
IMAGE_ICON,
0,
0,
LR_LOADFROMFILE | LR_DEFAULTSIZE,
)
if Image and path.exists(new_name and converted):
remove(new_name)
except Exception as e:
logging.error("Some trouble with the icon (%s): %s", icon_path, e)
hicon = LoadIcon(0, IDI_APPLICATION)
# Set the duration
"""
buff = create_unicode_buffer(10)
windll.user32.SystemParametersInfoW(SPI_GETMESSAGEDURATION, 0, buff, 0)
try:
oldlength = int(buff.value.encode("unicode_escape").decode().replace("\\", "0"), 16)
except ValueError:
oldlength = 5 # Default notification length
duration_output = windll.user32.SystemParametersInfoW(SPI_SETMESSAGEDURATION, 0, self.duration, SPIF_SENDCHANGE)
windll.user32.SystemParametersInfoW(SPI_GETMESSAGEDURATION, 0, buff, 0)
duration_error = False
try:
int(buff.value.encode("unicode_escape").decode().replace("\\", "0"), 16)
except ValueError:
duration_error = True
if duration_output == 0 or self.duration > 255 or duration_error:
windll.user32.SystemParametersInfoW(SPI_SETMESSAGEDURATION, 0, oldlength, SPIF_SENDCHANGE)
self.active = False
raise RuntimeError(f"Some trouble with the duration ({self.duration})" ": Invalid duration length")
"""
title += " " * randint(0, 63 - len(title))
msg += " " * randint(0, 128 - len(msg))
Shell_NotifyIcon(
NIM_ADD,
(
self.window_handle,
0,
NIF_ICON | NIF_MESSAGE | NIF_TIP,
WM_USER + 20,
hicon,
tooltip,
),
)
Shell_NotifyIcon(
NIM_MODIFY,
(
self.window_handle,
0,
NIF_INFO,
WM_USER + 20,
hicon,
tooltip,
msg,
200,
title,
0 if sound_path is None else NIIF_NOSOUND,
),
)
if sound_path is not None: # play the custom sound
sound_path = path.realpath(sound_path)
if not path.exists(sound_path):
logging.error(f"Some trouble with the sound file ({sound_path}): [Errno 2] No such file")
try:
PlaySound(sound_path, SND_FILENAME)
except Exception as e:
logging.error(f"Some trouble with the sound file ({sound_path}): {e}")
PumpMessages()
"""
# Put the notification duration back to normal
SystemParametersInfoW(SPI_SETMESSAGEDURATION, 0, oldlength, SPIF_SENDCHANGE)
"""
if duration is not None: # take a rest then destroy
# sleep(duration)
while self.duration > 0:
sleep(0.1)
self.duration -= 0.1
DestroyWindow(self.window_handle)
UnregisterClass(self.window_class.lpszClassName, self.instance_handle)
try: # Sometimes the try icon sticks around until you click it - this should stop that
Shell_NotifyIcon(NIM_DELETE, (self.window_handle, 0))
except WinTypesException:
pass
self.active = False
def show(
self,
title: str,
message: str = "No msg",
*,
icon_path: Optional[Path] = None,
duration: Optional[float] = None,
threaded: bool = False,
callback_on_click: Optional[callable] = None,
wait_for_active_notification: bool = True,
tooltip: Optional[str] = None,
) -> bool:
"""Notification settings.
:param tooltip:
:param wait_for_active_notification:
:param duration:
:param threaded:
:param callback_on_click:
:param title: notification title
:param message: notification message
:param icon_path: path to the .ico file to custom notification
:para mduration: delay in seconds before notification self-destruction, None for no-self-destruction
"""
args = title, message, icon_path, duration, None, callback_on_click, tooltip
if not threaded:
self._show_toast(*args)
else:
if (
self.notification_active and wait_for_active_notification
): # We have an active notification, let is finish so we don't spam them
# TODO: FIGURE OUT if sleeping here is a better solution
return False
self._thread = Thread(target=self._show_toast, args=args)
self._thread.start()
return True
@property
def notification_active(self) -> bool:
"""See if we have an active notification showing"""
if (
self._thread is not None and self._thread.is_alive()
): # We have an active notification, let is finish we don't spam them
return True
return False
def wnd_proc(self, hwnd, msg, wparam, lparam, **kwargs) -> None:
"""Messages handler method"""
if lparam == PARAM_CLICKED:
if kwargs.get("callback"):
kwargs.pop("callback")()
self.on_destroy(hwnd, msg, wparam, lparam)
elif lparam == PARAM_DESTROY:
self.on_destroy(hwnd, msg, wparam, lparam)
def on_destroy(self, hwnd, msg, wparam, lparam) -> None:
"""Clean after notification ended."""
Shell_NotifyIcon(NIM_DELETE, (self.window_handle, 0))
PostQuitMessage(0)
if __name__ == "__main__":
def main():
""" """
import time
def p_callback():
""" """
print("clicked toast")
toaster = Win10Toaster()
toaster.show("Hello World", "Python Here!", callback_on_click=p_callback, duration=3)
toaster.show("Buh", "DOUBLE TROUBLE", duration=2)
toaster.show(
"Example two",
"This notification is in it's own thread!",
icon_path=None,
duration=5,
threaded=True,
)
toaster.show("Do it", "Good!", icon_path=None, duration=5, threaded=True) # TODO: MAKE THIS APPEAR!
while toaster.notification_active: # Wait for threaded notification to finish
time.sleep(0.1)
main() | PypiClean |
/CouchDB3-1.2.0-py3-none-any.whl/couchdb3/utils.py |
import base64
from collections.abc import Generator
import re
import requests
from typing import Any, Dict, Optional, Set
from urllib import parse
from urllib3.util import Url, parse_url
from . import exceptions
__all__ = [
"basic_auth",
"build_query",
"build_url",
"user_name_to_id",
"validate_auth_method",
"validate_db_name",
"validate_proxy",
"validate_user_id",
"check_response",
"content_type_getter",
"extract_url_data",
"partitioned_db_resource_parser",
"COUCHDB_USERS_DB_NAME",
"COUCHDB_REPLICATOR_DB_NAME",
"COUCHDB_GLOBAL_CHANGES_DB_NAME",
"COUCH_DB_RESERVED_DB_NAMES",
"DEFAULT_AUTH_METHOD",
"DEFAULT_TIMEOUT",
"MIME_TYPES_MAPPING",
"PATTERN_DB_NAME",
"PATTERN_USER_ID",
"VALID_AUTH_METHODS",
"VALID_SCHEMES",
]
COUCHDB_USERS_DB_NAME: str = "_users"
"""Reserved CouchDB users database name."""
COUCHDB_REPLICATOR_DB_NAME: str = "_replicator"
"""Reserved CouchDB replicator database name."""
COUCHDB_GLOBAL_CHANGES_DB_NAME: str = "_global_changes"
"""Reserved CouchDB global changes database name."""
COUCH_DB_RESERVED_DB_NAMES: Set[str] = {
COUCHDB_USERS_DB_NAME,
COUCHDB_REPLICATOR_DB_NAME,
COUCHDB_GLOBAL_CHANGES_DB_NAME
}
"""Reserved CouchDB database names."""
DEFAULT_AUTH_METHOD: str = "cookie"
"""The default authentication method - values to `\"cookie\"`."""
DEFAULT_TEXTUAL_MIME_TYPE: str = "text/plain"
DEFAULT_FALLBACK_MIME_TYPE: str = "application/octet-stream"
DEFAULT_TIMEOUT: int = 300
"""The default timeout set in requests - values to `300`."""
MIME_TYPES_MAPPING: Dict = {
"aac": "audio/aac",
"abw": "application/x-abiword",
"arc": "application/x-freearc",
"avi": "video/x-msvideo",
"azw": "application/vnd.amazon.ebook",
"bin": "application/octet-stream",
"bmp": "image/bmp",
"bz": "application/x-bzip",
"bz2": "application/x-bzip2",
"cda": "application/x-cdf",
"csh": "application/x-csh",
"css": "text/css",
"csv": "text/csv",
"doc": "application/msword",
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"eot": "application/vnd.ms-fontobject",
"epub": "application/epub+zip",
"gz": "application/gzip",
"gif": "image/gif",
"htm": "text/html",
"html": "text/html",
"ico": "image/vnd.microsoft.icon",
"ics": "text/calendar",
"jar": "application/java-archive",
"jpeg": "image/jpeg",
"jpg": "image/jpeg",
"js": "text/javascript",
"json": "application/json",
"jsonld": "application/ld+json",
"mid": "audio/midi audio/x-midi",
"midi": "audio/midi audio/x-midi",
"mjs": "text/javascript",
"mp3": "audio/mpeg",
"mp4": "video/mp4",
"mpeg": "video/mpeg",
"mpkg": "application/vnd.apple.installer+xml",
"odp": "application/vnd.oasis.opendocument.presentation",
"ods": "application/vnd.oasis.opendocument.spreadsheet",
"odt": "application/vnd.oasis.opendocument.text",
"oga": "audio/ogg",
"ogv": "video/ogg",
"ogx": "application/ogg",
"opus": "audio/opus",
"otf": "font/otf",
"png": "image/png",
"pdf": "application/pdf",
"php": "application/x-httpd-php",
"ppt": "application/vnd.ms-powerpoint",
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
"rar": "application/vnd.rar",
"rtf": "application/rtf",
"sh": "application/x-sh",
"svg": "image/svg+xml",
"swf": "application/x-shockwave-flash",
"tar": "application/x-tar",
"tif .tiff": "image/tiff",
"ts": "video/mp2t",
"ttf": "font/ttf",
"txt": "text/plain",
"vsd": "application/vnd.visio",
"wav": "audio/wav",
"weba": "audio/webm",
"webm": "video/webm",
"webp": "image/webp",
"woff": "font/woff",
"woff2": "font/woff2",
"xhtml": "application/xhtml+xml",
"xls": "application/vnd.ms-excel",
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"xml": "application/xml",
"xul": "application/vnd.mozilla.xul+xml",
"zip": "application/zip",
"3gp": "video/3gpp",
"3g2": "video/3gpp2",
"7z": "application/x-7z-compressed"
}
"""A dictionary mapping file extensions to their appropriate content-type."""
PATTERN_DB_NAME: re.Pattern = re.compile(r"^[a-z][a-z0-9_$()+/-]*$")
"""The pattern for valid database names."""
PATTERN_USER_ID: re.Pattern = re.compile(r"^org\.couchdb\.user:.*")
"""The pattern for valid user IDs."""
VALID_AUTH_METHODS: Set[str] = {"basic", "cookie"}
"""The valid auth method arguments. Possible values are `\"basic\"` or `\"cookie\"`."""
VALID_SCHEMES: Set[str] = {"http", "https", "socks5"}
"""The valid TCP schemes. Possible values are `\"http\"` or `\"https\"` or `\"socks5\"`."""
def _handler(x: Any) -> str:
if isinstance(x, (Generator, map, list, set, tuple)):
return "[%s]" % ",".join(f"\"{_handler(_)}\"" for _ in x)
elif isinstance(x, dict):
return str({key: _handler(val) for key, val in x.items()})
elif isinstance(x, bool):
return str(x).lower()
return str(x)
def basic_auth(
user: str,
password: str
) -> str:
"""
Create basic authentication headers value.
Parameters
----------
user : str
A CouchDB user name.
password : str
A corresponding CouchDB user password.
Returns
-------
str : The credentials concatenated with a colon and base64 encoded.
"""
return base64.b64encode(f"{user}:{password}".encode()).decode()
def build_query(
**kwargs,
) -> Optional[str]:
"""
Parameters
----------
kwargs
Arbitrary keyword-args to be passed as query-params in a URL.
Returns
-------
str : A string containing the keyword-args encoded as URL query-params.
"""
return parse.urlencode({key: _handler(val) for key, val in kwargs.items() if val is not None})
def build_url(
*,
scheme: str,
host: str,
path: str = None,
port: int = None,
**kwargs,
) -> Url:
"""
Build a URL using the provided scheme, host, path & kwargs.
Parameters
----------
scheme : str
The URL scheme (e.g `http`).
host : str
The URL host (e.g. `example.com`).
path : str
The URL path (e.g. `/api/data`). Default `None`.
port : int
The port to connect to (e.g. `5984`). Default `None`.
kwargs
Arbitrary keyword-args to be passed as query-params in a URL.
Returns
-------
Url : An instance of `Url`.
"""
return Url(
scheme=scheme,
host=host,
port=port,
path=path,
query=build_query(**kwargs),
)
def validate_db_name(name: str) -> bool:
"""
Checks a name for CouchDB name-compliance.
Parameters
----------
name : str
A prospective database name.
Returns
-------
bool : `True` if the provided name is CouchDB compliant.
"""
return name in COUCH_DB_RESERVED_DB_NAMES or bool(PATTERN_DB_NAME.fullmatch(name))
def validate_auth_method(auth_method: str) -> bool:
"""
Checks if the provided authentication method is valid.
Parameters
----------
auth_method : str
Returns
-------
bool: `True` if `auth_method` is in `VALID_AUTH_METHODS`.
"""
return auth_method in VALID_AUTH_METHODS
def validate_proxy(proxy: str) -> bool:
"""
Check a proxy scheme for CouchDB proxy-scheme-compliance
Parameters
----------
proxy : str
A prospective proxy.
Returns
-------
bool : `True` if the provided proxy is CouchDB compliant.
"""
return parse_url(proxy).scheme in VALID_SCHEMES
def validate_user_id(user_id: str) -> bool:
"""
Checks a user ID for CouchDB user-id-compliance.
Parameters
----------
user_id : str
A prospective user ID.
Returns
-------
bool : `True` if the provided user ID is CouchDB compliant.
"""
return bool(PATTERN_USER_ID.fullmatch(user_id))
def user_name_to_id(name: str) -> str:
"""
Convert a name into a valid CouchDB user ID.
Parameters
----------
name : str
A user name.
Returns
-------
str : A valid CouchDB ID, i.e. of the form `org.couchdb.user:{name}`.
"""
return f"org.couchdb.user:{name}"
def check_response(response: requests.Response) -> None:
"""
Check if a request yields a successful response.
Parameters
----------
response : requests.Response
A `requests.Response` object.
Returns
-------
None
Raises
------
One of the following exceptions:
- couchdb3.error.CouchDBError
- ConnectionError
- TimeoutError
- requests.exceptions.ConnectionError
- requests.exceptions.HTTPError
"""
try:
response.raise_for_status()
except (
ConnectionError,
TimeoutError,
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
) as err:
if response.status_code in exceptions.STATUS_CODE_ERROR_MAPPING:
_ = exceptions.STATUS_CODE_ERROR_MAPPING[response.status_code]
if _:
raise _(response.text)
else:
return None
raise err
def content_type_getter(
file_name: str = None,
data: Any = None
) -> Optional[str]:
"""
Get the appropriate content-type.
If the argument `file_name` is provided, the content-type will be determined by matching the file extension against
keys of `MIME_TYPES_MAPPING`. If no match was found, then `"application/octet-stream"` will be returned.
Alternatively, if the argument `data` is provided the conent-type returned will be
- `"application/json"` if `data` is a dictionary or a list
- `"text/plain"` else
Parameters
----------
file_name : str
A file name.
data : Any
A Python object.
Returns
-------
str : A valid content-type.
"""
if file_name:
for ext, mime_type in MIME_TYPES_MAPPING.items():
if file_name.endswith(f".{ext}"):
return mime_type
return DEFAULT_FALLBACK_MIME_TYPE
elif data:
if isinstance(data, (dict, list)):
return "application/json"
else:
return DEFAULT_TEXTUAL_MIME_TYPE
def extract_url_data(url: str) -> Dict:
"""
Extract scheme, credentials, host, port & path from a URL.
Parameters
----------
url : str
A URL string.
Returns
-------
Dict : A dictionary containing with the following items.
- scheme
- user
- password
- host
- port
- path
"""
if not any(url.startswith(_) for _ in VALID_SCHEMES):
url = f"http://{url}"
parsed = parse_url(url)
return {
"scheme": parsed.scheme,
"user": parsed.auth.split(":")[0] if hasattr(parsed.auth, "split") else None,
"password": parsed.auth.split(":")[1] if hasattr(parsed.auth, "split") else None,
"host": parsed.host,
"port": parsed.port,
"path": parsed.path
}
def partitioned_db_resource_parser(
resource: str = None,
partition: str = None,
) -> Optional[str]:
"""
Build resource path with optional partition ID.
Parameters
----------
resource : str
The resource to fetch (relative to the host). Default `None`.
partition: str
An optional partition ID. Only valid for partitioned databases. (Default `None`.)
Returns
----------
The (relative) path of the resource.
"""
return f"_partition/{partition}/{resource}" if partition else resource | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/xinha/modules/Dialogs/XinhaDialog.js | Xinha.Dialog=function(g,k,a,m,f){var r=this;this.id={};this.r_id={};this.editor=g;this.document=document;this.size=m;this.modal=(f&&f.modal===false)?false:true;this.closable=(f&&f.closable===false)?false:true;this.resizable=(f&&f.resizable===false)?false:true;this.layer=(f&&f.layer)?f.layer:0;this.centered=(f&&f.centered===true)?true:false;this.closeOnEscape=(f&&f.closeOnEscape===true)?true:false;this.rootElem=null;this.captionBar=null;this.main=null;this.background=null;this.centered=null;this.greyout=null;this.buttons=null;this.closer=null;this.icon=null;this.resizer=null;this.initialZ=null;var b=g.config.dialogOptions;if(b){if(typeof b.centered!="undefined"){this.centered=b.centered}if(typeof b.resizable!="undefined"){this.resizable=b.resizable}if(typeof b.closable!="undefined"){this.closable=b.closable}if(typeof b.greyout!="undefined"){this.greyout=b.greyout}if(typeof b.closeOnEscape!="undefined"){this.closeOnEscape=b.closeOnEscape}}var n;if(Xinha.is_ie){n=document.createElement("iframe");n.src="about:blank";n.onreadystatechange=function(){var w=window.event.srcElement.contentWindow.document;if(this.readyState=="complete"&&w&&w.body){var y=w.createElement("div");var s,x=document.styleSheets;for(var p=0;p<x.length;p++){if(x[p].id.indexOf("Xinha")!=-1&&x[p].cssText){s+=x[p].cssText}}y.innerHTML='<br><style type="text/css">\n'+s+"\n</style>";w.getElementsByTagName("body")[0].appendChild(y);w.body.className="xinha_dialog_background";if(r.modal){w.body.className+="_modal"}if(r.greyout){w.body.className+="_greyout"}}}}else{n=document.createElement("div")}n.className="xinha_dialog_background";if(this.modal){n.className+="_modal"}if(this.greyout){n.className+="_greyout"}var j=1000;if(!Xinha.Dialog.initialZ){var o=g._htmlArea;while(o){if(o.style&&parseInt(o.style.zIndex,10)>j){j=parseInt(o.style.zIndex,10)}o=o.parentNode}Xinha.Dialog.initialZ=j}j=Xinha.Dialog.initialZ;var l=n.style;l.position="absolute";l.top=0;l.left=0;l.border="none";l.overflow="hidden";l.display="none";l.zIndex=(this.modal?j+25:j+1)+this.layer;document.body.appendChild(n);this.background=n;n=null;Xinha.freeLater(this,"background");var c=document.createElement("div");c.style.position=(Xinha.is_ie||!this.modal)?"absolute":"fixed";c.style.zIndex=(this.modal?j+27:j+3)+this.layer;c.style.display="none";if(!this.modal){Xinha._addEvent(c,"mousedown",function(){Xinha.Dialog.activateModeless(r)})}c.className="dialog"+(this.modal?"":" modeless");if(Xinha.is_chrome){c.className+=" chrome"}document.body.appendChild(c);c.style.paddingBottom="10px";c.style.width=(m&&m.width)?m.width+"px":"";if(m&&m.height){if(Xinha.ie_version<7){c.style.height=m.height+"px"}else{c.style.minHeight=m.height+"px"}}k=this.translateHtml(k,a);var h=document.createElement("div");c.appendChild(h);h.innerHTML=k;this.fixupDOM(h,a);var u=h.removeChild(h.getElementsByTagName("h1")[0]);c.insertBefore(u,h);Xinha._addEvent(u,"mousedown",function(i){r.dragStart(i)});u.style.MozUserSelect="none";u.style.WebkitUserSelect="none";u.unselectable="on";u.onselectstart=function(){return false};this.buttons=document.createElement("div");l=this.buttons.style;l.position="absolute";l.top="0";l.right="2px";c.appendChild(this.buttons);if(this.closable&&this.closeOnEscape){Xinha._addEvent(document,"keypress",function(i){if(i.keyCode==27){if(Xinha.Dialog.activeModeless==r||r.modal){r.hide();return true}}})}this.closer=null;if(this.closable){this.closer=document.createElement("div");this.closer.className="closeButton";this.closer.onmousedown=function(i){this.className="closeButton buttonClick";Xinha._stopEvent(Xinha.getEvent(i));return false};this.closer.onmouseout=function(i){this.className="closeButton";Xinha._stopEvent(Xinha.getEvent(i));return false};this.closer.onmouseup=function(){this.className="closeButton";r.hide();return false};this.buttons.appendChild(this.closer);var v=document.createElement("span");v.className="innerX";v.style.position="relative";v.style.top="-3px";v.appendChild(document.createTextNode("\u00D7"));this.closer.appendChild(v);v=null}this.icon=document.createElement("img");var t=this.icon;t.className="icon";t.src=g.config.iconList.dialogCaption;t.style.position="absolute";t.style.top="3px";t.style.left="2px";t.ondrag=function(){return false};u.style.paddingLeft="22px";c.appendChild(this.icon);var e=c.getElementsByTagName("*");for(var q=0;q<e.length;q++){var d=e[q];if(d.tagName.toLowerCase()=="textarea"||d.tagName.toLowerCase()=="input"){}else{d.unselectable="on"}}this.resizer=null;if(this.resizable){this.resizer=document.createElement("div");this.resizer.className="resizeHandle";l=this.resizer.style;l.position="absolute";l.bottom="0px";l.right="0px";l.MozUserSelect="none";Xinha._addEvent(this.resizer,"mousedown",function(i){r.resizeStart(i)});c.appendChild(this.resizer)}this.rootElem=c;this.captionBar=u;this.main=h;u=null;c=null;h=null;Xinha.freeLater(this,"rootElem");Xinha.freeLater(this,"captionBar");Xinha.freeLater(this,"main");Xinha.freeLater(this,"buttons");Xinha.freeLater(this,"closer");Xinha.freeLater(this,"icon");Xinha.freeLater(this,"resizer");Xinha.freeLater(this,"document");this.size={}};Xinha.Dialog.prototype.onresize=function(){return true};Xinha.Dialog.prototype.show=function(m){var c=this.rootElem;var n=c.style;var l=this.modal;var b=this.editor.scrollPos();this.scrollPos=b;var i=this;if(this.attached){this.editor.showPanel(c)}if(Xinha._someEditorHasBeenActivated){this._lastRange=this.editor.saveSelection();if(Xinha.is_ie&&!l){i.saveSelection=function(){i._lastRange=i.editor.saveSelection()};Xinha._addEvent(this.editor._doc,"mouseup",i.saveSelection)}}if(l){this.editor.deactivateEditor();this.editor.suspendUpdateToolbar=true;this.editor.currentModal=i}if(Xinha.is_ff2&&l){this._restoreTo=[this.editor._textArea.style.display,this.editor._iframe.style.visibility,this.editor.hidePanels()];this.editor._textArea.style.display="none";this.editor._iframe.style.visibility="hidden"}if(!this.attached){if(l){this.showBackground();this.posBackground({top:0,left:0});this.resizeBackground(Xinha.Dialog.calcFullBgSize())}else{this.background.style.display=""}Xinha.Dialog.fadeIn(this.rootElem,100,function(){if(l){var o=i.rootElem.getElementsByTagName("input");for(var p=0;p<o.length;p++){if(o[p].type=="text"){o[p].focus();break}}}});var k=c.offsetHeight;var e=c.offsetWidth;var h=Xinha.viewportSize();var f=h.y;var g=h.x;if(k>f){n.height=f+"px";if(c.scrollHeight>k){i.main.style.overflowY="auto"}}if(this.size.top&&this.size.left){n.top=parseInt(this.size.top,10)+"px";n.left=parseInt(this.size.left,10)+"px"}else{if(this.editor.btnClickEvent&&!this.centered){var a=this.editor.btnClickEvent;if(n.position=="absolute"){n.top=a.clientY+this.scrollPos.y+"px"}else{n.top=a.clientY+"px"}if(k+c.offsetTop>f){n.top=(n.position=="absolute"?this.scrollPos.y:0)+"px"}if(n.position=="absolute"){n.left=a.clientX+this.scrollPos.x+"px"}else{n.left=a.clientX+"px"}if(e+c.offsetLeft>g){n.left=a.clientX-e+"px";if(c.offsetLeft<0){n.left=0}}this.editor.btnClickEvent=null}else{var j=(f-k)/2;var d=(g-e)/2;n.top=((j>0)?j:0)+"px";n.left=((d>0)?d:0)+"px"}}}this.width=e;this.height=k;if(!l){this.resizeBackground({width:e+"px",height:k+"px"});this.posBackground({top:n.top,left:n.left})}if(typeof m!="undefined"){this.setValues(m)}this.dialogShown=true};Xinha.Dialog.prototype.hide=function(){if(this.attached){this.editor.hidePanel(this.rootElem)}else{Xinha.Dialog.fadeOut(this.rootElem);this.hideBackground();var a=this;if(Xinha.is_ff2&&this.modal){this.editor._textArea.style.display=this._restoreTo[0];this.editor._iframe.style.visibility=this._restoreTo[1];this.editor.showPanels(this._restoreTo[2])}if(!this.editor._isFullScreen&&this.modal){window.scroll(this.scrollPos.x,this.scrollPos.y)}if(Xinha.is_ie&&!this.modal){Xinha._removeEvent(this.editor._doc,"mouseup",a.saveSelection)}if(this.modal){this.editor.suspendUpdateToolbar=false;this.editor.currentModal=null;this.editor.activateEditor()}}if(this.modal){this.editor.restoreSelection(this._lastRange)}this.dialogShown=false;this.editor.updateToolbar();this.editor.focusEditor();return this.getValues()};Xinha.Dialog.prototype.toggle=function(){if(this.rootElem.style.display=="none"){this.show()}else{this.hide()}};Xinha.Dialog.prototype.collapse=function(){if(this.collapsed){this.collapsed=false;this.show()}else{this.main.style.height=0;this.collapsed=true}};Xinha.Dialog.prototype.getElementById=function(a){return this.document.getElementById(this.id[a]?this.id[a]:a)};Xinha.Dialog.prototype.getElementsByName=function(a){var b=this.document.getElementsByName(this.id[a]?this.id[a]:a);return Xinha.collectionToArray(b)};Xinha.Dialog.prototype.getElementsByClassName=function(a){return Xinha.getElementsByClassName(this.rootElem,a)};Xinha.Dialog.prototype.dragStart=function(c){if(this.attached||this.dragging){return}if(!this.modal){this.posBackground({top:0,left:0});this.resizeBackground(Xinha.Dialog.calcFullBgSize());this.editor.suspendUpdateToolbar=true}c=Xinha.getEvent(c);var b=this;b.dragging=true;b.scrollPos=b.editor.scrollPos();var a=b.rootElem.style;b.xOffs=c.offsetX||c.layerX;b.yOffs=c.offsetY||c.layerY;b.mouseMove=function(d){b.dragIt(d)};Xinha._addEvent(document,"mousemove",b.mouseMove);if(Xinha.is_ie){Xinha._addEvent(this.background.contentWindow.document,"mousemove",b.mouseMove)}b.mouseUp=function(d){b.dragEnd(d)};Xinha._addEvent(document,"mouseup",b.mouseUp);if(Xinha.is_ie){Xinha._addEvent(this.background.contentWindow.document,"mouseup",b.mouseUp)}};Xinha.Dialog.prototype.dragIt=function(c){var b=this;if(!b.dragging){return false}var d,e,a;if(b.rootElem.style.position=="absolute"){d=(c.clientY+this.scrollPos.y)-b.yOffs+"px";e=(c.clientX+this.scrollPos.x)-b.xOffs+"px";a={top:d,left:e}}else{if(b.rootElem.style.position=="fixed"){d=c.clientY-b.yOffs+"px";e=c.clientX-b.xOffs+"px";a={top:d,left:e}}}b.posDialog(a)};Xinha.Dialog.prototype.dragEnd=function(b){var a=this;if(!this.modal){this.editor.suspendUpdateToolbar=false}if(!a.dragging){return false}a.dragging=false;Xinha._removeEvent(document,"mousemove",a.mouseMove);if(Xinha.is_ie){Xinha._removeEvent(this.background.contentWindow.document,"mousemove",a.mouseMove)}Xinha._removeEvent(document,"mouseup",a.mouseUp);if(Xinha.is_ie){Xinha._removeEvent(this.background.contentWindow.document,"mouseup",a.mouseUp)}var c=a.rootElem.style;a.size.top=c.top;a.size.left=c.left;if(!this.modal){this.sizeBgToDialog()}};Xinha.Dialog.prototype.resizeStart=function(c){var b=this;if(b.resizing){return}b.resizing=true;if(!this.modal){this.editor.suspendUpdateToolbar=true;this.posBackground({top:0,left:0});this.resizeBackground(Xinha.Dialog.calcFullBgSize())}b.scrollPos=b.editor.scrollPos();var a=b.rootElem.style;a.minHeight="";a.overflow="hidden";b.xOffs=parseInt(a.left,10);b.yOffs=parseInt(a.top,10);b.mouseMove=function(d){b.resizeIt(d)};Xinha._addEvent(document,"mousemove",b.mouseMove);if(Xinha.is_ie){Xinha._addEvent(this.background.contentWindow.document,"mousemove",b.mouseMove)}b.mouseUp=function(d){b.resizeEnd(d)};Xinha._addEvent(document,"mouseup",b.mouseUp);if(Xinha.is_ie){Xinha._addEvent(this.background.contentWindow.document,"mouseup",b.mouseUp)}};Xinha.Dialog.prototype.resizeIt=function(c){var b=this;if(!b.resizing){return false}var d,e;if(b.rootElem.style.position=="absolute"){d=c.clientY+b.scrollPos.y;e=c.clientX+b.scrollPos.x}else{d=c.clientY;e=c.clientX}e-=b.xOffs;d-=b.yOffs;var a={};a.width=((e>10)?e:10)+8+"px";a.height=((d>10)?d:10)+"px";b.sizeDialog(a);b.width=b.rootElem.offsetWidth;b.height=b.rootElem.offsetHeight;b.onresize()};Xinha.Dialog.prototype.resizeEnd=function(b){var a=this;a.resizing=false;if(!this.modal){this.editor.suspendUpdateToolbar=false}Xinha._removeEvent(document,"mousemove",a.mouseMove);if(Xinha.is_ie){Xinha._removeEvent(this.background.contentWindow.document,"mouseup",a.mouseUp)}Xinha._removeEvent(document,"mouseup",a.mouseUp);if(Xinha.is_ie){Xinha._removeEvent(this.background.contentWindow.document,"mouseup",a.mouseUp)}a.size.width=a.rootElem.offsetWidth;a.size.height=a.rootElem.offsetHeight;if(!this.modal){this.sizeBgToDialog()}};Xinha.Dialog.prototype.attachToPanel=function(c){var b=this;var a=this.rootElem;var d=this.editor;this.attached=true;this.rootElem.side=c;this.captionBar.ondblclick=function(e){b.detachFromPanel(Xinha.getEvent(e))};a.style.position="static";a.parentNode.removeChild(a);this.background.style.display="none";this.captionBar.style.paddingLeft="3px";this.resizer.style.display="none";if(this.closable){this.closer.style.display="none"}this.icon.style.display="none";if(c=="left"||c=="right"){a.style.width=d.config.panel_dimensions[c]}else{a.style.width=""}Xinha.addClasses(a,"panel");d._panels[c].panels.push(a);d._panels[c].div.appendChild(a);d.notifyOf("panel_change",{action:"add",panel:a})};Xinha.Dialog.prototype.detachFromPanel=function(){var b=this;var a=b.rootElem;var d=a.style;var c=b.editor;b.attached=false;var e=Xinha.getElementTopLeft(a);d.position="absolute";d.top=e.top+"px";d.left=e.left+"px";b.captionBar.style.paddingLeft="22px";b.resizer.style.display="";if(b.closable){b.closer.style.display=""}b.icon.style.display="";if(b.size.width){a.style.width=b.size.width+"px"}Xinha.removeClasses(a,"panel");c.removePanel(a);document.body.appendChild(a);b.captionBar.ondblclick=function(){b.attachToPanel(a.side)};this.background.style.display="";this.sizeBgToDialog()};Xinha.Dialog.calcFullBgSize=function(){var b=Xinha.pageSize();var a=Xinha.viewportSize();return{width:(b.x>a.x?b.x:a.x)+"px",height:(b.x>a.y?b.y:a.y)+"px"}};Xinha.Dialog.prototype.sizeBgToDialog=function(){var a=this.rootElem.style;var b=this.background.style;b.top=a.top;b.left=a.left;b.width=a.width;b.height=a.height};Xinha.Dialog.prototype.hideBackground=function(){Xinha.Dialog.fadeOut(this.background)};Xinha.Dialog.prototype.showBackground=function(){Xinha.Dialog.fadeIn(this.background,70)};Xinha.Dialog.prototype.posBackground=function(a){if(this.background.style.display!="none"){this.background.style.top=a.top;this.background.style.left=a.left}};Xinha.Dialog.prototype.resizeBackground=function(a){if(this.background.style.display!="none"){this.background.style.width=a.width;this.background.style.height=a.height}};Xinha.Dialog.prototype.posDialog=function(b){var a=this.rootElem.style;a.left=b.left;a.top=b.top};Xinha.Dialog.prototype.sizeDialog=function(c){var b=this.rootElem.style;b.height=c.height;b.width=c.width;var d=parseInt(c.width,10);var a=parseInt(c.height,10)-this.captionBar.offsetHeight;this.main.style.height=(a>20)?a:20+"px";this.main.style.width=(d>10)?d:10+"px"};Xinha.Dialog.prototype.setValues=function(d){for(var g in d){if(typeof g=="string"){var c=this.getElementsByName(g);if(!c){continue}for(var a=0;a<c.length;a++){var h=c[a];switch(h.tagName.toLowerCase()){case"select":for(var f=0;f<h.options.length;f++){if(typeof d[g]=="object"){for(var b=0;b<d[g].length;b++){if(d[g][b]==h.options[f].value){h.options[f].selected=true}}}else{if(d[g]==h.options[f].value){h.options[f].selected=true}}}break;case"textarea":case"input":switch(h.getAttribute("type")){case"radio":if(h.value==d[g]){h.checked=true}break;case"checkbox":if(typeof d[g]=="object"){for(f in d[g]){if(d[g][f]==h.value){h.checked=true}}}else{if(d[g]==h.value){h.checked=true}}break;default:h.value=d[g];break}}}}}};Xinha.Dialog.prototype.getValues=function(){var d=[];var b=Xinha.collectionToArray(this.rootElem.getElementsByTagName("input")).append(Xinha.collectionToArray(this.rootElem.getElementsByTagName("textarea"))).append(Xinha.collectionToArray(this.rootElem.getElementsByTagName("select")));for(var a=0;a<b.length;a++){var f=b[a];if(!(f.name&&this.r_id[f.name])){continue}if(typeof d[this.r_id[f.name]]=="undefined"){d[this.r_id[f.name]]=null}var c=d[this.r_id[f.name]];switch(f.tagName.toLowerCase()){case"select":if(f.multiple){if(!c.push){if(c!==null){c=[c]}else{c=[]}}for(var e=0;e<f.options.length;e++){if(f.options[e].selected){c.push(f.options[e].value)}}}else{if(f.selectedIndex>=0){c=f.options[f.selectedIndex]}}break;default:switch(f.type.toLowerCase()){case"radio":if(f.checked){c=f.value}break;case"checkbox":if(c===null){if(this.getElementsByName(this.r_id[f.name]).length>1){c=[]}}if(f.checked){if(c!==null&&typeof c=="object"&&c.push){c.push(f.value)}else{c=f.value}}break;default:c=f.value;break}}d[this.r_id[f.name]]=c}return d};Xinha.Dialog.prototype.translateHtml=function(c,a){var b=this;if(typeof a=="function"){b._lc=a}else{if(a){this._lc=function(d){return Xinha._lc(d,a)}}else{this._lc=function(d){return d}}}c=c.replace(/((?:name)|(?:id))=(['"])\[([a-z0-9_]+)\]\2/ig,function(f,e,d,g){return e+"="+d+b.createId(g)+d}).replace(/<l10n>(.*?)<\/l10n>/ig,function(d,e){return b._lc(e)}).replace(/\="_\((.*?)\)"/g,function(d,e){return'="'+b._lc(e)+'"'});return c};Xinha.Dialog.prototype.fixupDOM=function(j,e){var g=this;if(typeof e!="string"){e="GenericPlugin"}var a=function(l,m){switch(m){case"editor":return _editor_url;case"plugin":return Xinha.getPluginDir(e);case"images":return g.editor.imgURL("images")}};var h=Xinha.collectionToArray(j.getElementsByTagName("img"));for(var f=0;f<h.length;++f){var b=h[f];var c=b.getAttribute("src");if(c){var d=c.replace(/^\[(editor|plugin|images)\]/,a);if(d!=c){b.setAttribute("src",d)}}}var k=Xinha.collectionToArray(j.getElementsByTagName("a"));for(var f=0;f<k.length;++f){var i=k[f];var c=i.getAttribute("href");if(c){var d=c.replace(/^\[(editor|plugin|images)\]/,a);if(d!=c){i.setAttribute("href",d)}}}};Xinha.Dialog.prototype.createId=function(b){var a=this;if(typeof a.id[b]=="undefined"){a.id[b]=Xinha.uniq("Dialog");a.r_id[a.id[b]]=b}return a.id[b]};Xinha.Dialog.activateModeless=function(a){if(Xinha.Dialog.activeModeless==a||a.attached){return}if(Xinha.Dialog.activeModeless){Xinha.Dialog.activeModeless.rootElem.style.zIndex=parseInt(Xinha.Dialog.activeModeless.rootElem.style.zIndex,10)-10}Xinha.Dialog.activeModeless=a;Xinha.Dialog.activeModeless.rootElem.style.zIndex=parseInt(Xinha.Dialog.activeModeless.rootElem.style.zIndex,10)+10};Xinha.Dialog.setOpacity=function(a,b){if(typeof a.style.filter!="undefined"){a.style.filter=(b<100)?"alpha(opacity="+b+")":""}else{a.style.opacity=b/100}};Xinha.Dialog.fadeIn=function(c,b,f,a,d){a=a||1;d=d||25;b=b||100;c.op=c.op||0;var e=c.op;if(c.style.display=="none"){Xinha.Dialog.setOpacity(c,0);c.style.display=""}if(e<b){c.op+=d;Xinha.Dialog.setOpacity(c,e);c.timeOut=setTimeout(function(){Xinha.Dialog.fadeIn(c,b,f,a,d)},a)}else{Xinha.Dialog.setOpacity(c,b);c.op=b;c.timeOut=null;if(typeof f=="function"){f.call()}}};Xinha.Dialog.fadeOut=function(b,a,c){a=a||1;c=c||30;if(typeof b.op=="undefined"){b.op=100}var d=b.op;if(d>=0){b.op-=c;Xinha.Dialog.setOpacity(b,d);b.timeOut=setTimeout(function(){Xinha.Dialog.fadeOut(b,a,c)},a)}else{Xinha.Dialog.setOpacity(b,0);b.style.display="none";b.op=0;b.timeOut=null}}; | PypiClean |
/django-chuck-0.2.3.tar.gz/django-chuck/modules/django-cms/project/apps/cmsplugin_filer_image/migrations/0007_rename_caption_to_caption_text.py | import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_column('cmsplugin_filerimage', 'caption', 'caption_text')
def backwards(self, orm):
db.rename_column('cmsplugin_filerimage', 'caption_text', 'caption')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_filer_image.filerimage': {
'Meta': {'object_name': 'FilerImage', 'db_table': "'cmsplugin_filerimage'", '_ormbases': ['cms.CMSPlugin']},
'alignment': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'caption_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'crop': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'free_link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'original_link': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'thumbnail_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmsplugin_filer_image.ThumbnailOption']", 'null': 'True', 'blank': 'True'}),
'upscale': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'use_autoscale': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cmsplugin_filer_image.thumbnailoption': {
'Meta': {'ordering': "('width', 'height')", 'object_name': 'ThumbnailOption'},
'crop': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'upscale': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cmsplugin_filer_image'] | PypiClean |
/BlueWhale3_Bioinformatics-4.1.32-py3-none-any.whl/orangecontrib/bioinformatics/widgets/OWGEODatasets.py | import sys
from types import SimpleNamespace
from typing import Any, Optional, DefaultDict
from functools import lru_cache
from collections import OrderedDict, defaultdict
import numpy as np
import requests
from AnyQt.QtGui import QFont, QColor
from AnyQt.QtCore import Qt, QSize, QVariant, QModelIndex
from AnyQt.QtWidgets import (
QStyle,
QSplitter,
QTableView,
QTreeWidget,
QTreeWidgetItem,
QAbstractItemView,
QAbstractScrollArea,
)
from Orange.data import Table
from Orange.widgets.gui import (
LinkRole,
IndicatorItemDelegate,
LinkStyledItemDelegate,
rubber,
lineEdit,
separator,
widgetBox,
auto_commit,
widgetLabel,
radioButtonsInBox,
)
from Orange.widgets.utils import itemmodels
from Orange.widgets.widget import Msg, OWWidget
from Orange.widgets.settings import Setting
from Orange.widgets.utils.signals import Output
from Orange.widgets.utils.concurrent import TaskState, ConcurrentWidgetMixin
from orangecontrib.bioinformatics.geo import is_cached, pubmed_url, local_files
from orangecontrib.bioinformatics.geo.dataset import GDSInfo, get_samples, dataset_download
from orangecontrib.bioinformatics.i18n_config import *
def __(key):
return i18n.t('bioinformatics.owgEODatasets.' + key)
class Result(SimpleNamespace):
gds_dataset: Optional[Table] = None
def run_download_task(gds_id: str, samples: DefaultDict[str, list], transpose: bool, state: TaskState):
res = Result()
current_iter = 0
max_iter = 102
def callback():
nonlocal current_iter
current_iter += 1
state.set_progress_value(100 * (current_iter / max_iter))
state.set_status(__("state.downloading"))
res.gds_dataset = dataset_download(gds_id, samples, transpose=transpose, callback=callback)
return res
class GEODatasetsModel(itemmodels.PyTableModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setHorizontalHeaderLabels(
['', __("label.id"), __("label.pubmed_id"), __("label.organism"), __("label.sample"), __("label.feature"),
__("label.gene"), __("label.subset"), __("label.title")]
)
(
self.indicator_col,
self.gds_id_col,
self.pubmed_id_col,
self.organism_col,
self.samples_col,
self.features_col,
self.genes_col,
self.subsets_col,
self.title_col,
) = range(9)
self.info = None
self.table = None
self._sort_column = self.gds_id_col
self._sort_order = Qt.AscendingOrder
self.font = QFont()
self.font.setUnderline(True)
self.color = QColor(Qt.blue)
@lru_cache(maxsize=10000)
def _row_instance(row, column):
return self[int(row)][int(column)]
self._row_instance = _row_instance
def initialize(self, info: OrderedDict):
self.info = info
def _gds_to_row(gds: dict):
gds_id = gds['name']
title = gds['title']
organism = gds['sample_organism']
samples = len(get_samples(gds))
features = gds['variables']
genes = gds['genes']
subsets = len(gds['subsets'])
pubmed = gds.get('pubmed_id', '')
pubmed_id = pubmed
if isinstance(pubmed, list) and len(pubmed) > 0:
pubmed_id = pubmed[0]
return [
' ' if is_cached(gds_id) else '',
gds_id,
pubmed_id,
organism,
samples,
features,
genes,
subsets,
title,
]
self.table = np.asarray([_gds_to_row(gds) for gds in info.values()])
self.show_table()
def _argsortData(self, data: np.ndarray, order) -> Optional[np.ndarray]:
if not data.size:
return
# store order choice.
self._sort_column = column = self.sortColumn()
self._sort_order = self.sortOrder()
if column == self.gds_id_col:
data = np.char.replace(data, 'GDS', '')
data = data.astype(int)
elif column in (self.samples_col, self.features_col, self.genes_col, self.subsets_col, self.pubmed_id_col):
data[data == ''] = '0'
data = data.astype(int)
indices = np.argsort(data, kind='mergesort')
if order == Qt.DescendingOrder:
indices = indices[::-1]
return indices
def columnCount(self, parent=QModelIndex()):
return 0 if parent.isValid() else self._table.shape[1]
def data(
self,
index,
role,
_str=str,
_Qt_DisplayRole=Qt.DisplayRole, # noqa: N803
_Qt_EditRole=Qt.EditRole,
_Qt_FontRole=Qt.FontRole,
_Qt_ForegroundRole=Qt.ForegroundRole,
_LinkRolee=LinkRole,
_recognizedRoles=frozenset([Qt.DisplayRole, Qt.EditRole, Qt.FontRole, Qt.ForegroundRole, LinkRole]),
):
if role not in _recognizedRoles:
return None
row, col = index.row(), index.column()
if not 0 <= row <= self.rowCount():
return None
row = self.mapToSourceRows(row)
try:
# value = self[row][col]
value = self._row_instance(row, col)
except IndexError:
return
if role == Qt.DisplayRole:
return QVariant(str(value))
elif role == Qt.ToolTipRole:
return QVariant(str(value))
if col == self.pubmed_id_col:
if role == _Qt_ForegroundRole:
return self.color
elif role == _Qt_FontRole:
return self.font
elif role == _LinkRolee:
return pubmed_url.format(value)
def get_row_index(self, gds_name):
# test = self._table[self._table[:, 1] == gds_name, :]
rows, _ = np.where(np.isin(self._table, gds_name))
if rows is not None and len(rows) > 0:
return self.mapFromSourceRows(rows[0])
def filter_table(self, filter_pattern: str):
selection = np.full(self.table.shape, True)
for search_word in filter_pattern.split():
match_result = np.core.defchararray.find(np.char.lower(self.table), search_word.lower()) >= 0
selection = selection & match_result
return selection
def update_cache_indicator(self):
self.table[:, 0] = [' ' if is_cached(gid) else '' for gid in self.table[:, self.gds_id_col]]
def show_table(self, filter_pattern=''):
# clear cache if model changes
self._row_instance.cache_clear()
self.wrap(self.table[self.filter_table(filter_pattern).any(axis=1), :])
self.sort(self._sort_column, self._sort_order)
class OWGEODatasets(OWWidget, ConcurrentWidgetMixin):
name = __("name")
description = __("desc")
icon = "icons/OWGEODatasets.svg"
priority = 10
class Warning(OWWidget.Warning):
using_local_files = Msg(__("msg_using_local_files"))
class Error(OWWidget.Error):
no_connection = Msg(__("msg_no_connection"))
class Outputs:
gds_data = Output("Expression Data", Table, label=i18n.t("bioinformatics.common.expression_data"))
search_pattern = Setting('')
auto_commit = Setting(True)
genes_as_rows = Setting(False)
selected_gds = Setting(None)
gds_selection_states = Setting({})
splitter_settings = Setting(
(
b'\x00\x00\x00\xff\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x01'
b'\xea\x00\x00\x00\xd7\x01\x00\x00\x00\x07\x01\x00\x00\x00\x02',
b'\x00\x00\x00\xff\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x01'
b'\xb5\x00\x00\x02\x10\x01\x00\x00\x00\x07\x01\x00\x00\x00\x01',
)
)
def __init__(self):
OWWidget.__init__(self)
ConcurrentWidgetMixin.__init__(self)
try:
self.gds_info: Optional[GDSInfo] = GDSInfo()
except requests.exceptions.ConnectionError:
self.gds_info = {}
self.Error.no_connection()
return
self.gds_data: Optional[Table] = None
# Control area
box = widgetBox(self.controlArea, __("box.info"), addSpace=True)
self.infoBox = widgetLabel(box, 'Initializing\n\n')
box = widgetBox(self.controlArea, __("box.output"), addSpace=True)
radioButtonsInBox(box, self, 'genes_as_rows', [__("btn_sample_row"), __("btn_gene_row")], callback=self._run)
separator(box)
rubber(self.controlArea)
auto_commit(self.controlArea, self, 'auto_commit', __("btn_commit"), box=False)
# Main Area
# Filter widget
self.filter = lineEdit(
self.mainArea, self, 'search_pattern', __("box.filter"), callbackOnType=True, callback=self._apply_filter
)
self.mainArea.layout().addWidget(self.filter)
splitter_vertical = QSplitter(Qt.Vertical, self.mainArea)
self.mainArea.layout().addWidget(splitter_vertical)
# set table view
self.table_view = QTableView(splitter_vertical)
self.table_view.setShowGrid(False)
self.table_view.setSortingEnabled(True)
self.table_view.sortByColumn(1, Qt.AscendingOrder)
self.table_view.setAlternatingRowColors(True)
self.table_view.verticalHeader().setVisible(False)
self.table_view.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table_view.setSelectionMode(QAbstractItemView.SingleSelection)
self.table_view.viewport().setMouseTracking(True)
self.table_view.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)
self.table_model = GEODatasetsModel()
self.table_model.initialize(self.gds_info)
self.table_view.setModel(self.table_model)
self.table_view.horizontalHeader().setStretchLastSection(True)
self.table_view.resizeColumnsToContents()
v_header = self.table_view.verticalHeader()
option = self.table_view.viewOptions()
size = self.table_view.style().sizeFromContents(QStyle.CT_ItemViewItem, option, QSize(20, 20), self.table_view)
v_header.setDefaultSectionSize(size.height() + 2)
v_header.setMinimumSectionSize(5)
# set item delegates
self.table_view.setItemDelegateForColumn(
self.table_model.pubmed_id_col, LinkStyledItemDelegate(self.table_view)
)
self.table_view.setItemDelegateForColumn(self.table_model.gds_id_col, LinkStyledItemDelegate(self.table_view))
self.table_view.setItemDelegateForColumn(
self.table_model.indicator_col, IndicatorItemDelegate(self.table_view, role=Qt.DisplayRole)
)
splitter_horizontal = QSplitter(Qt.Horizontal, splitter_vertical)
# Description Widget
box = widgetBox(splitter_horizontal, __("box.desc"))
self.description_widget = widgetLabel(box, '')
self.description_widget.setWordWrap(True)
rubber(box)
# Sample Annotations Widget
box = widgetBox(splitter_horizontal, __("box.sample_annotation"))
self.annotations_widget = QTreeWidget(box)
self.annotations_widget.setHeaderLabels([__("label.type"), __("label.sample_count")])
self.annotations_widget.setRootIsDecorated(True)
box.layout().addWidget(self.annotations_widget)
self._annotations_updating = False
self.annotations_widget.itemChanged.connect(self.on_annotation_selection_changed)
self.splitters = splitter_vertical, splitter_horizontal
for sp, setting in zip(self.splitters, self.splitter_settings):
sp.splitterMoved.connect(self._splitter_moved)
sp.restoreState(setting)
self.table_view.selectionModel().selectionChanged.connect(self.on_gds_selection_changed)
self._apply_filter()
self.commit()
def _splitter_moved(self, *args):
self.splitter_settings = [bytes(sp.saveState()) for sp in self.splitters]
def _set_description_widget(self):
self.description_widget.setText(self.selected_gds.get('description', 'Description not available.'))
def _set_annotations_widget(self, gds):
self._annotations_updating = True
self.annotations_widget.clear()
annotations = defaultdict(set)
subsets_count = {}
for desc in gds['subsets']:
annotations[desc['type']].add(desc['description'])
subsets_count[desc['description']] = str(len(desc['sample_id']))
for _type, subsets in annotations.items():
key = (gds["name"], _type)
parent = QTreeWidgetItem(self.annotations_widget, [_type])
parent.key = key
for subset in subsets:
key = (gds['name'], _type, subset)
item = QTreeWidgetItem(parent, [subset, subsets_count.get(subset, '')])
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(0, self.gds_selection_states.get(key, Qt.Checked))
item.key = key
self._annotations_updating = False
self.annotations_widget.expandAll()
for i in range(self.annotations_widget.columnCount()):
self.annotations_widget.resizeColumnToContents(i)
def _set_selection(self):
if self.selected_gds is not None:
index = self.table_model.get_row_index(self.selected_gds.get('name'))
if index is not None:
self.table_view.selectionModel().blockSignals(True)
self.table_view.selectRow(index)
self._handle_selection_changed()
self.table_view.selectionModel().blockSignals(False)
def _handle_selection_changed(self):
if self.table_model.table is not None:
selection = self.table_view.selectionModel().selectedRows(self.table_model.gds_id_col)
selected_gds_name = selection[0].data() if len(selection) > 0 else None
if selected_gds_name:
self.selected_gds = self.table_model.info.get(selected_gds_name)
self._set_annotations_widget(self.selected_gds)
self._set_description_widget()
else:
self.annotations_widget.clear()
self.description_widget.clear()
self.update_info()
def _apply_filter(self):
if self.table_model.table is not None:
self.table_model.show_table(filter_pattern=str(self.search_pattern))
self._set_selection()
self.update_info()
def _run(self):
self.Warning.using_local_files.clear()
if self.selected_gds is not None:
self.gds_data = None
self.start(
run_download_task, self.selected_gds.get('name'), self.get_selected_samples(), self.genes_as_rows
)
def on_gds_selection_changed(self):
self._handle_selection_changed()
self.commit()
def on_annotation_selection_changed(self):
if self._annotations_updating:
return
for i in range(self.annotations_widget.topLevelItemCount()):
item = self.annotations_widget.topLevelItem(i)
if 'key' in item.__dict__:
self.gds_selection_states[item.key] = item.checkState(0)
for j in range(item.childCount()):
child = item.child(j)
if 'key' in child.__dict__:
self.gds_selection_states[child.key] = child.checkState(0)
self.commit()
def update_info(self):
all_gds = len(self.table_model.info)
text = __("row_data_info").format(all_gds, len(local_files.listfiles()))
filtered = self.table_view.model().rowCount()
if all_gds != filtered:
text += __("row_after_filter").format(filtered)
self.infoBox.setText(text)
def get_selected_samples(self):
"""
Return the currently selected sample annotations.
The return value is a list of selected (sample type, sample value)
tuples.
.. note:: if some Sample annotation type has no selected values.
this method will return all values for it.
TODO: this could probably be simplified.
"""
def childiter(item):
""" Iterate over the children of an QTreeWidgetItem instance.
"""
for i in range(item.childCount()):
yield item.child(i)
samples = []
unused_types = []
used_types = []
for stype in childiter(self.annotations_widget.invisibleRootItem()):
selected_values = []
all_values = []
for sval in childiter(stype):
value = (str(stype.text(0)), str(sval.text(0)))
if self.gds_selection_states.get(sval.key, True):
selected_values.append(value)
all_values.append(value)
if selected_values:
samples.extend(selected_values)
used_types.append(str(stype.text(0)))
else:
# If no sample of sample type is selected we don't filter on it.
samples.extend(all_values)
unused_types.append(str(stype.text(0)))
_samples = defaultdict(list)
for sample, sample_type in samples:
_samples[sample].append(sample_type)
return _samples
def commit(self):
self._run()
def on_exception(self, ex: Exception):
self.Warning.using_local_files()
def on_done(self, result: Result):
assert isinstance(result.gds_dataset, Table)
self.gds_data = result.gds_dataset
if self.gds_info:
self.table_model.update_cache_indicator()
self._apply_filter()
self.Outputs.gds_data.send(self.gds_data)
def on_partial_result(self, result: Any) -> None:
pass
def onDeleteWidget(self):
self.shutdown()
super().onDeleteWidget()
def send_report(self):
self.report_items(
__("report.geo_dataset"),
[
(__("report.id"), self.selected_gds['name']),
(__("report.title"), self.selected_gds['title']),
(__("report.organism"), self.selected_gds['sample_organism']),
],
)
self.report_items(
__("report.data"),
[
(__("report.samples"), self.selected_gds['sample_count']),
(__("report.features"), self.selected_gds['variables']),
(__("report.genes"), self.selected_gds['genes']),
],
)
self.report_name(__("report.sample_annotations"))
subsets = defaultdict(list)
for subset in self.selected_gds['subsets']:
subsets[subset['type']].append((subset['description'], len(subset['sample_id'])))
self.report_html += "<ul>"
for _type in subsets:
self.report_html += "<b>" + _type + ":</b></br>"
for desc, count in subsets[_type]:
self.report_html += 9 * " " + "<b>{}:</b> {}</br>".format(desc, count)
self.report_html += "</ul>"
if __name__ == "__main__":
def main_test():
from AnyQt.QtWidgets import QApplication
app = QApplication([])
w = OWGEODatasets()
w.show()
w.raise_()
r = app.exec_()
w.saveSettings()
return r
sys.exit(main_test()) | PypiClean |
/BCPy2000-1.6.tar.gz/BCPy2000-1.6/src/BCI2000Tools/NIASourceModule.py | import time
import numpy
import SigTools
import pylibusb as usb # NB: the original pynia-0.0.2.py uses a module named usb for usb access, but usb is for python 2.6 only.
import ctypes
class NIA_Interface(object):
""" Attaches the NIA device, and provides low level data collection and information
"""###
def __init__(self,):
self.VENDOR_ID = 0x1234 #: Vendor Id
self.PRODUCT_ID = 0x0000 #: Product Id for the bridged usb cable
self.TIME_OUT = 1000
self.handle = None
self.device = None
found =False
usb.init()
if not usb.get_busses():
usb.find_busses()
usb.find_devices()
buses = usb.get_busses()
for bus in buses :
for device in bus.devices :
if device.descriptor.idVendor == self.VENDOR_ID and device.descriptor.idProduct == self.PRODUCT_ID:
found = True
break
if found:
break
if not found:
raise RuntimeError("Cannot find device")
interface_nr = 0
self.device = device
self.config = self.device.config[0]
self.interface = interface_nr #self.interface = self.config.interfaces[0][0]
self.ENDPOINT1 = 0x81 #self.interface.endpoint[0].bEndpointAddress #self.ENDPOINT1 = self.interface.endpoints[0].address
self.ENDPOINT2 = 0x01 #self.interface.endpoints[1].address
self.PACKET_LENGTH = 56 #self.interface.endpoints[0].maxPacketSize
def open(self) :
""" Attache NIA interface
"""###
if not self.device:
raise RuntimeError("Cable isn't plugged in")
self.handle = usb.open(self.device)
if hasattr(usb,'get_driver_np'):
# non-portable libusb extension
name = usb.get_driver_np(self.handle,self.interface)
if name != '':
debug("attached to kernel driver '%s', detaching."%name )
usb.detach_kernel_driver_np(self.handle,self.interface) #self.handle.detachKernelDriver(0)
#self.handle.detachKernelDriver(1)
usb.set_configuration(self.handle, self.config.bConfigurationValue) #self.handle.setConfiguration(self.config)
usb.claim_interface(self.handle, self.interface) #self.handle.claimInterface(self.interface)
#self.handle.setAltInterface(self.interface)
self.INPUT_BUFFER = ctypes.create_string_buffer(self.PACKET_LENGTH)
def close(self):
""" Release NIA interface
"""###
usb.close(self.handle) #self.handle.reset()
# self.handle.releaseInterface()
self.handle, self.device = None, None
def read(self):
""" Read data off the NIA from its internal buffer of up to 16 samples
"""###
usb.interrupt_read(self.handle,self.ENDPOINT1,self.INPUT_BUFFER,self.TIME_OUT);
return self.INPUT_BUFFER
#################################################################
#################################################################
class BciSource(BciGenericSource):
#############################################################
def Description(self):
return "records from the NIA"
#############################################################
def Construct(self):
parameters = [
"Source:Signal%20Properties:DataIOFilter int SourceCh= 1 1 1 % // number of digitized and stored channels",
"Source:Signal%20Properties:DataIOFilter list ChannelNames= 1 NIA % % % // list of channel names",
"Source:Signal%20Properties:DataIOFilter floatlist SourceChOffset= 1 0 0 % % // Offset for channels in A/D units",
"Source:Signal%20Properties:DataIOFilter floatlist SourceChGain= 1 1e-3 1 % % // gain for each channel (A/D units -> muV)",
"Source:Online%20Processing:TransmissionFilter list TransmitChList= 1 1 % % % // list of transmitted channels",
"Source:NIA%20Recording int HardwareSamplingRate= 3900 3900 1 % // sampling rate at which the NIA natively runs",
"Source:NIA%20Recording float HardwareChunkMsec= 2.0 2.0 0 % // milliseconds of signal to record at a time",
"Source:NIA%20Recording float NIABufferSizeMsec= 10000 10000 0 % // size of ring buffer",
"Source:NIA%20Recording int DSFilterOrder= 10 10 2 % // order of pre-decimation lowpass-filter used before decimation",
"Source:NIA%20Recording float DSFilterFreqFactor= 0.4 0.4 0 1 // lowpass cutoff of pre-decimation filter expressed as a proportion of the desired Nyquist frequency",
]
states = [
]
self._add_thread('listen', self.Listen).start()
return (parameters, states)
#############################################################
def Initialize(self, indim, outdim):
self.warp = 1000.0 # let the samples flowing into the ring buffer set the pace
self.eegfs = self.samplingrate()
self.hwfs = int(self.params['HardwareSamplingRate'])
self.chunk = SigTools.msec2samples(float(self.params['HardwareChunkMsec']), self.hwfs)
ringsize = SigTools.msec2samples(float(self.params['NIABufferSizeMsec']), self.hwfs)
self.ring = SigTools.ring(ringsize, indim[0])
self.ring.allow_overflow = True
self.nominal['HardwareSamplesPerPacket'] = SigTools.msec2samples(self.nominal['SecondsPerPacket']*1000.0, self.hwfs)
cutoff = float(self.params['DSFilterFreqFactor']) * self.eegfs / 2.0
order = int(self.params['DSFilterOrder'])
if order > 0 and cutoff > 0.0:
self.filter = SigTools.causalfilter(freq_hz=cutoff, samplingfreq_hz=self.hwfs, order=order, type='lowpass') #, method=SigTools.firdesign)
else:
self.filter = None
self.dsind = numpy.linspace(0.0, self.nominal['HardwareSamplesPerPacket'], self.nominal['SamplesPerPacket']+1, endpoint=True)
self.dsind = numpy.round(self.dsind).astype(numpy.int).tolist()
self._threads['listen'].post('start')
self._threads['listen'].read('ready', wait=True, remove=True)
self._check_threads()
#############################################################
def Halt(self):
self._threads['listen'].post('stop')
self._check_threads()
#############################################################
def Process(self, sig):
ns = int(self.nominal['HardwareSamplesPerPacket'])
while self.ring.to_read() < ns:
time.sleep(0.001)
if self._check_threads(): break
x = self.ring.read(ns)
nch = min([x.shape[0], sig.shape[0]])
x = numpy.asarray(x[:nch, :])
out = numpy.asarray(sig[:nch, :])
#sig[nch:, :] = 0
packetsize = int(self.nominal['SamplesPerPacket'])
sig[nch:, :] = self.packet_count * packetsize + numpy.array(list(range(packetsize)), ndmin=2, dtype='float').repeat(sig.shape[0]-nch, axis=0)
# low-pass
if self.filter != None: x = self.filter.apply(x, axis=1)
# downsample
out[:, :] = x[:, self.dsind[:-1]] # out is a view into a slice of sig
return sig
#############################################################
def Listen(self, mythread):
mythread.read('stop', remove=True)
mythread.read('start', wait=True, remove=True)
nchan = self.ring.channels()
nsamp = int(round(self.chunk))
# initialization of NIA
self.interface = NIA_Interface()
self.interface.open()
mythread.read('stop', remove=True)
mythread.post('ready', wait=True)
while not mythread.read('stop'):
data = [] # prepares a new list to store the read NIA data
while len(data) < nsamp: # was set to perform self.Points=25 reads at a time---why 25, we don't know exactly
time.sleep(0.001)
raw = self.interface.read()
nread = ord(raw[54]) # byte 54 gives the number of samples
for t in range(nread):
val = ord(raw[t*3+2])*65536 + ord(raw[t*3+1])*256 + ord(raw[t*3])
data.append(val)
data = numpy.array([data])
self.ring.write(data)
# de-initialization of NIA
self.interface.close()
self.interface = None
#################################################################
################################################################# | PypiClean |
/Argonaut-0.3.4.tar.gz/Argonaut-0.3.4/argonaut/public/ckeditor/plugins/clipboard/dialogs/paste.js | /*
Copyright (c) 2003-2010, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.dialog.add('paste',function(a){var b=a.lang.clipboard,c=CKEDITOR.env.isCustomDomain();function d(e){var f=new CKEDITOR.dom.document(e.document),g=f.$;f.getById('cke_actscrpt').remove();CKEDITOR.env.ie?g.body.contentEditable='true':g.designMode='on';if(CKEDITOR.env.ie&&CKEDITOR.env.version<8)f.getWindow().on('blur',function(){g.selection.empty();});f.on('keydown',function(h){var i=h.data,j=i.getKeystroke(),k;switch(j){case 27:this.hide();k=1;break;case 9:case CKEDITOR.SHIFT+9:this.changeFocus(true);k=1;}k&&i.preventDefault();},this);a.fire('ariaWidget',new CKEDITOR.dom.element(e.frameElement));};return{title:b.title,minWidth:CKEDITOR.env.ie&&CKEDITOR.env.quirks?370:350,minHeight:CKEDITOR.env.quirks?250:245,onShow:function(){this.parts.dialog.$.offsetHeight;var e='<html dir="'+a.config.contentsLangDirection+'"'+' lang="'+(a.config.contentsLanguage||a.langCode)+'">'+'<head><style>body { margin: 3px; height: 95%; } </style></head><body>'+'<script id="cke_actscrpt" type="text/javascript">'+'window.parent.CKEDITOR.tools.callFunction( '+CKEDITOR.tools.addFunction(d,this)+', this );'+'</script></body>'+'</html>',f=CKEDITOR.dom.element.createFromHtml('<iframe class="cke_pasteframe" frameborder="0" allowTransparency="true"'+(c?" src=\"javascript:void((function(){document.open();document.domain='"+document.domain+"';"+'document.close();'+'})())"':'')+' role="region"'+' aria-label="'+b.pasteArea+'"'+' aria-describedby="'+this.getContentElement('general','pasteMsg').domId+'"'+' aria-multiple="true"'+'></iframe>');f.on('load',function(j){j.removeListener();var k=f.getFrameDocument().$;k.open();if(c)k.domain=document.domain;k.write(e);k.close();},this);f.setCustomData('dialog',this);var g=this.getContentElement('general','editing_area'),h=g.getElement();h.setHtml('');h.append(f);if(CKEDITOR.env.ie){var i=CKEDITOR.dom.element.createFromHtml('<span tabindex="-1" style="position:absolute;" role="presentation"></span>');i.on('focus',function(){f.$.contentWindow.focus();});h.append(i);g.focus=function(){i.focus();this.fire('focus');};}g.getInputElement=function(){return f;};if(CKEDITOR.env.ie){h.setStyle('display','block');h.setStyle('height',f.$.offsetHeight+2+'px');}},onHide:function(){if(CKEDITOR.env.ie)this.getParentEditor().document.getBody().$.contentEditable='true';},onLoad:function(){if((CKEDITOR.env.ie7Compat||CKEDITOR.env.ie6Compat)&&a.lang.dir=='rtl')this.parts.contents.setStyle('overflow','hidden');},onOk:function(){var e=this.getContentElement('general','editing_area').getElement(),f=e.getElementsByTag('iframe').getItem(0),g=this.getParentEditor(),h=f.$.contentWindow.document.body.innerHTML;
setTimeout(function(){g.fire('paste',{html:h});},0);},contents:[{id:'general',label:a.lang.common.generalTab,elements:[{type:'html',id:'securityMsg',html:'<div style="white-space:normal;width:340px;">'+b.securityMsg+'</div>'},{type:'html',id:'pasteMsg',html:'<div style="white-space:normal;width:340px;">'+b.pasteMsg+'</div>'},{type:'html',id:'editing_area',style:'width: 100%; height: 100%;',html:'',focus:function(){var e=this.getInputElement().$.contentWindow;setTimeout(function(){e.focus();},500);}}]}]};}); | PypiClean |
/Biomatters-Azimuth-2-0.1.tar.gz/Biomatters-Azimuth-2-0.1/azimuth/features/microhomology.py |
from math import exp
from re import findall
def compute_score(seq, tmpfile1="1.before removing duplication.txt", tmpfile2="2.all microhomology patterns.txt", verbose=False):
length_weight=20.0
left=30 # Insert the position expected to be broken.
right=len(seq)-int(left)
#print 'length of seq = '+str(len(seq))
file_temp=open(tmpfile1, "w")
for k in range(2,left)[::-1]:
for j in range(left,left+right-k+1):
for i in range(0,left-k+1):
if seq[i:i+k]==seq[j:j+k]:
length=j-i
file_temp.write(seq[i:i+k]+'\t'+str(i)+'\t'+str(i+k)+'\t'+str(j)+'\t'+str(j+k)+'\t'+str(length)+'\n')
file_temp.close()
### After searching out all microhomology patterns, duplication should be removed!!
f1=open(tmpfile1, "r")
s1=f1.read()
f2=open(tmpfile2, "w") #After removing duplication
f2.write(seq+'\t'+'microhomology\t'+'deletion length\t'+'score of a pattern\n')
if s1!="":
list_f1=s1.strip().split('\n')
sum_score_3=0
sum_score_not_3=0
for i in range(len(list_f1)):
n=0
score_3=0
score_not_3=0
line=list_f1[i].split('\t')
scrap=line[0]
left_start=int(line[1])
left_end=int(line[2])
right_start=int(line[3])
right_end=int(line[4])
length=int(line[5])
for j in range(i):
line_ref=list_f1[j].split('\t')
left_start_ref=int(line_ref[1])
left_end_ref=int(line_ref[2])
right_start_ref=int(line_ref[3])
right_end_ref=int(line_ref[4])
if (left_start >= left_start_ref) and (left_end <= left_end_ref) and (right_start >= right_start_ref) and (right_end <= right_end_ref):
if (left_start - left_start_ref)==(right_start - right_start_ref) and (left_end - left_end_ref)==(right_end - right_end_ref):
n+=1
else: pass
if n == 0:
if (length % 3)==0:
length_factor = round(1/exp((length)/(length_weight)),3)
num_GC=len(findall('G',scrap))+len(findall('C',scrap))
score_3=100*length_factor*((len(scrap)-num_GC)+(num_GC*2))
elif (length % 3)!=0:
length_factor = round(1/exp((length)/(length_weight)),3)
num_GC=len(findall('G',scrap))+len(findall('C',scrap))
score_not_3=100*length_factor*((len(scrap)-num_GC)+(num_GC*2))
f2.write(seq[0:left_end]+'-'*length+seq[right_end:]+'\t'+scrap+'\t'+str(length)+'\t'+str(100*length_factor*((len(scrap)-num_GC)+(num_GC*2)))+'\n')
sum_score_3+=score_3
sum_score_not_3+=score_not_3
mh_score = sum_score_3+sum_score_not_3
oof_score = (sum_score_not_3)*100/(sum_score_3+sum_score_not_3)
if verbose:
print 'Microhomology score = ' + str(mh_score)
print 'Out-of-frame score = ' + str(oof_score)
f1.close()
f2.close()
return mh_score, oof_score
if __name__ == '__main__':
seq='GGAGGAAGGGCCTGAGTCCGAGCAGAAGAAGAAGGGCTCCCATCACATCAACCGGTGGCG' # The length of sequence is recommend within 60~80 bases.
tmpfile1 = "1.before removing duplication.txt"
tmpfile2 = "2.all microhomology patterns.txt"
mh_score, oof_score = compute_score(seq, tmpfile1=tmpfile1, tmpfile2=tmpfile2, verbose=True)
# The row of output file is consist of (full sequence, microhomology scrap, deletion length, score of pattern).
#correct output is
#Microhomology score = 4662.9
#Out-of-frame score = 50.7473889639
#GGAGGAAGGGCCTGAGTCCGAGCAGAAGAAGAAGGGCTCCCATCACATCAACCGGTGGCG
print seq | PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/contrib/gis/admin/options.py | from django.contrib.admin import ModelAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
from django.contrib.gis.db import models
from django.contrib.gis.gdal import OGRGeomType
from django.forms import Media
spherical_mercator_srid = 3857
class GeoModelAdmin(ModelAdmin):
"""
The administration options class for Geographic models. Map settings
may be overloaded from their defaults to create custom maps.
"""
# The default map settings that may be overloaded -- still subject
# to API changes.
default_lon = 0
default_lat = 0
default_zoom = 4
display_wkt = False
display_srid = False
extra_js = []
num_zoom = 18
max_zoom = False
min_zoom = False
units = False
max_resolution = False
max_extent = False
modifiable = True
mouse_position = True
scale_text = True
layerswitcher = True
scrollable = True
map_width = 600
map_height = 400
map_srid = 4326
map_template = 'gis/admin/openlayers.html'
openlayers_url = 'https://cdnjs.cloudflare.com/ajax/libs/openlayers/2.13.1/OpenLayers.js'
point_zoom = num_zoom - 6
wms_url = 'http://vmap0.tiles.osgeo.org/wms/vmap0'
wms_layer = 'basic'
wms_name = 'OpenLayers WMS'
wms_options = {'format': 'image/jpeg'}
debug = False
widget = OpenLayersWidget
@property
def media(self):
"Injects OpenLayers JavaScript into the admin."
return super().media + Media(js=[self.openlayers_url] + self.extra_js)
def formfield_for_dbfield(self, db_field, request, **kwargs):
"""
Overloaded from ModelAdmin so that an OpenLayersWidget is used
for viewing/editing 2D GeometryFields (OpenLayers 2 does not support
3D editing).
"""
if isinstance(db_field, models.GeometryField) and db_field.dim < 3:
# Setting the widget with the newly defined widget.
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super().formfield_for_dbfield(db_field, request, **kwargs)
def get_map_widget(self, db_field):
"""
Return a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
"""
is_collection = db_field.geom_type in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field.geom_type == 'GEOMETRYCOLLECTION':
collection_type = 'Any'
else:
collection_type = OGRGeomType(db_field.geom_type.replace('MULTI', ''))
else:
collection_type = 'None'
class OLMap(self.widget):
template_name = self.map_template
geom_type = db_field.geom_type
wms_options = ''
if self.wms_options:
wms_options = ["%s: '%s'" % pair for pair in self.wms_options.items()]
wms_options = ', %s' % ', '.join(wms_options)
params = {
'default_lon': self.default_lon,
'default_lat': self.default_lat,
'default_zoom': self.default_zoom,
'display_wkt': self.debug or self.display_wkt,
'geom_type': OGRGeomType(db_field.geom_type),
'field_name': db_field.name,
'is_collection': is_collection,
'scrollable': self.scrollable,
'layerswitcher': self.layerswitcher,
'collection_type': collection_type,
'is_generic': db_field.geom_type == 'GEOMETRY',
'is_linestring': db_field.geom_type in ('LINESTRING', 'MULTILINESTRING'),
'is_polygon': db_field.geom_type in ('POLYGON', 'MULTIPOLYGON'),
'is_point': db_field.geom_type in ('POINT', 'MULTIPOINT'),
'num_zoom': self.num_zoom,
'max_zoom': self.max_zoom,
'min_zoom': self.min_zoom,
'units': self.units, # likely should get from object
'max_resolution': self.max_resolution,
'max_extent': self.max_extent,
'modifiable': self.modifiable,
'mouse_position': self.mouse_position,
'scale_text': self.scale_text,
'map_width': self.map_width,
'map_height': self.map_height,
'point_zoom': self.point_zoom,
'srid': self.map_srid,
'display_srid': self.display_srid,
'wms_url': self.wms_url,
'wms_layer': self.wms_layer,
'wms_name': self.wms_name,
'wms_options': wms_options,
'debug': self.debug,
}
return OLMap
class OSMGeoAdmin(GeoModelAdmin):
map_template = 'gis/admin/osm.html'
num_zoom = 20
map_srid = spherical_mercator_srid
max_extent = '-20037508,-20037508,20037508,20037508'
max_resolution = '156543.0339'
point_zoom = num_zoom - 6
units = 'm' | PypiClean |
/APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/docs/api.rst | API reference
=============
Data structures
---------------
.. autoclass:: apscheduler.Task
.. autoclass:: apscheduler.Schedule
.. autoclass:: apscheduler.Job
.. autoclass:: apscheduler.JobInfo
.. autoclass:: apscheduler.JobResult
.. autoclass:: apscheduler.RetrySettings
Schedulers
----------
.. autoclass:: apscheduler.schedulers.sync.Scheduler
.. autoclass:: apscheduler.schedulers.async_.AsyncScheduler
Workers
-------
.. autoclass:: apscheduler.workers.sync.Worker
.. autoclass:: apscheduler.workers.async_.AsyncWorker
Data stores
-----------
.. autoclass:: apscheduler.abc.DataStore
.. autoclass:: apscheduler.abc.AsyncDataStore
.. autoclass:: apscheduler.datastores.memory.MemoryDataStore
.. autoclass:: apscheduler.datastores.sqlalchemy.SQLAlchemyDataStore
.. autoclass:: apscheduler.datastores.async_sqlalchemy.AsyncSQLAlchemyDataStore
.. autoclass:: apscheduler.datastores.mongodb.MongoDBDataStore
Event brokers
-------------
.. autoclass:: apscheduler.abc.EventBroker
.. autoclass:: apscheduler.abc.AsyncEventBroker
.. autoclass:: apscheduler.eventbrokers.local.LocalEventBroker
.. autoclass:: apscheduler.eventbrokers.async_local.LocalAsyncEventBroker
.. autoclass:: apscheduler.eventbrokers.asyncpg.AsyncpgEventBroker
.. autoclass:: apscheduler.eventbrokers.mqtt.MQTTEventBroker
.. autoclass:: apscheduler.eventbrokers.redis.RedisEventBroker
Serializers
-----------
.. autoclass:: apscheduler.abc.Serializer
.. autoclass:: apscheduler.serializers.cbor.CBORSerializer
.. autoclass:: apscheduler.serializers.json.JSONSerializer
.. autoclass:: apscheduler.serializers.pickle.PickleSerializer
Triggers
--------
.. autoclass:: apscheduler.abc.Trigger
.. autoclass:: apscheduler.triggers.date.DateTrigger
.. autoclass:: apscheduler.triggers.interval.IntervalTrigger
.. autoclass:: apscheduler.triggers.calendarinterval.CalendarIntervalTrigger
.. autoclass:: apscheduler.triggers.combining.AndTrigger
.. autoclass:: apscheduler.triggers.combining.OrTrigger
.. autoclass:: apscheduler.triggers.cron.CronTrigger
Events
------
.. autoclass:: apscheduler.Event
.. autoclass:: apscheduler.DataStoreEvent
.. autoclass:: apscheduler.TaskAdded
.. autoclass:: apscheduler.TaskUpdated
.. autoclass:: apscheduler.TaskRemoved
.. autoclass:: apscheduler.ScheduleAdded
.. autoclass:: apscheduler.ScheduleUpdated
.. autoclass:: apscheduler.ScheduleRemoved
.. autoclass:: apscheduler.JobAdded
.. autoclass:: apscheduler.JobRemoved
.. autoclass:: apscheduler.ScheduleDeserializationFailed
.. autoclass:: apscheduler.JobDeserializationFailed
.. autoclass:: apscheduler.SchedulerEvent
.. autoclass:: apscheduler.SchedulerStarted
.. autoclass:: apscheduler.SchedulerStopped
.. autoclass:: apscheduler.WorkerEvent
.. autoclass:: apscheduler.WorkerStarted
.. autoclass:: apscheduler.WorkerStopped
.. autoclass:: apscheduler.JobAcquired
.. autoclass:: apscheduler.JobReleased
Enumerated types
----------------
.. autoclass:: apscheduler.RunState
.. autoclass:: apscheduler.JobOutcome
.. autoclass:: apscheduler.ConflictPolicy
.. autoclass:: apscheduler.CoalescePolicy
Context variables
-----------------
See the :mod:`contextvars` module for information on how to work with context variables.
.. data:: apscheduler.current_scheduler
:annotation: the current scheduler
:type: ~contextvars.ContextVar[~typing.Union[Scheduler, AsyncScheduler]]
.. data:: apscheduler.current_worker
:annotation: the current scheduler
:type: ~contextvars.ContextVar[~typing.Union[Worker, AsyncWorker]]
.. data:: apscheduler.current_job
:annotation: information on the job being currently run
:type: ~contextvars.ContextVar[JobInfo]
Exceptions
----------
.. autoexception:: apscheduler.TaskLookupError
.. autoexception:: apscheduler.ScheduleLookupError
.. autoexception:: apscheduler.JobLookupError
.. autoexception:: apscheduler.JobResultNotReady
.. autoexception:: apscheduler.JobCancelled
.. autoexception:: apscheduler.JobDeadlineMissed
.. autoexception:: apscheduler.ConflictingIdError
.. autoexception:: apscheduler.SerializationError
.. autoexception:: apscheduler.DeserializationError
.. autoexception:: apscheduler.MaxIterationsReached
| PypiClean |
/Django-Bootstrap3-Validator-0.3.3.zip/Django-Bootstrap3-Validator-0.3.3/bootstrap_validator/static/validator/js/language/pt_PT.js | (function ($) {
/**
* Portuguese (Portugal) language package
* Translated by @rtbfreitas
*/
$.fn.bootstrapValidator.i18n = $.extend(true, $.fn.bootstrapValidator.i18n, {
base64: {
'default': 'Por favor insira um código base 64 válido'
},
between: {
'default': 'Por favor insira um valor entre %s e %s',
notInclusive: 'Por favor insira um valor estritamente entre %s e %s'
},
callback: {
'default': 'Por favor insira um valor válido'
},
choice: {
'default': 'Por favor insira um valor válido',
less: 'Por favor escolha %s opções no mínimo',
more: 'Por favor escolha %s opções no máximo',
between: 'Por favor escolha de %s a %s opções'
},
color: {
'default': 'Por favor insira uma cor válida'
},
creditCard: {
'default': 'Por favor insira um número de cartão de crédito válido'
},
cusip: {
'default': 'Por favor insira um número CUSIP válido'
},
cvv: {
'default': 'Por favor insira um número CVV válido'
},
date: {
'default': 'Por favor insira uma data válida',
min: 'Por favor insira uma data posterior a %s',
max: 'Por favor insira uma data anterior a %s',
range: 'Por favor insira uma data entre %s e %s'
},
different: {
'default': 'Por favor insira valores diferentes'
},
digits: {
'default': 'Por favor insira somente dígitos'
},
ean: {
'default': 'Por favor insira um número EAN válido'
},
emailAddress: {
'default': 'Por favor insira um email válido'
},
file: {
'default': 'Por favor escolha um arquivo válido'
},
greaterThan: {
'default': 'Por favor insira um valor maior ou igual a %s',
notInclusive: 'Por favor insira um valor maior do que %s'
},
grid: {
'default': 'Por favor insira uma GRID válida'
},
hex: {
'default': 'Por favor insira um hexadecimal válido'
},
hexColor: {
'default': 'Por favor insira uma cor hexadecimal válida'
},
iban: {
'default': 'Por favor insira um número IBAN válido',
countryNotSupported: 'O código do país %s não é suportado',
country: 'Por favor insira um número IBAN válido em %s',
countries: {
AD: 'Andorra',
AE: 'Emirados Árabes',
AL: 'Albânia',
AO: 'Angola',
AT: 'Áustria',
AZ: 'Azerbaijão',
BA: 'Bósnia-Herzegovina',
BE: 'Bélgica',
BF: 'Burkina Faso',
BG: 'Bulgária',
BH: 'Bahrain',
BI: 'Burundi',
BJ: 'Benin',
BR: 'Brasil',
CH: 'Suíça',
IC: 'Costa do Marfim',
CM: 'Camarões',
CR: 'Costa Rica',
CV: 'Cabo Verde',
CY: 'Chipre',
CZ: 'República Checa',
DE: 'Alemanha',
DK: 'Dinamarca',
DO: 'República Dominicana',
DZ: 'Argélia',
EE: 'Estónia',
ES: 'Espanha',
FI: 'Finlândia',
FO: 'Ilhas Faroé',
FR: 'França',
GB: 'Reino Unido',
GE: 'Georgia',
GI: 'Gibraltar',
GL: 'Groenlândia',
GR: 'Grécia',
GT: 'Guatemala',
HR: 'Croácia',
HU: 'Hungria',
IE: 'Ireland',
IL: 'Israel',
IR: 'Irão',
IS: 'Islândia',
TI: 'Itália',
JO: 'Jordan',
KW: 'Kuwait',
KZ: 'Cazaquistão',
LB: 'Líbano',
LI: 'Liechtenstein',
LT: 'Lituânia',
LU: 'Luxemburgo',
LV: 'Letónia',
MC: 'Mônaco',
MD: 'Moldávia',
ME: 'Montenegro',
MG: 'Madagascar',
MK: 'Macedónia',
ML: 'Mali',
MR: 'Mauritânia',
MT: 'Malta',
MU: 'Maurício',
MZ: 'Moçambique',
NL: 'Países Baixos',
NO: 'Noruega',
PK: 'Paquistão',
PL: 'Polônia',
PS: 'Palestino',
PT: 'Portugal',
QA: 'Qatar',
RO: 'Roménia',
RS: 'Sérvia',
SA: 'Arábia Saudita',
SE: 'Suécia',
SI: 'Eslovénia',
SK: 'Eslováquia',
SM: 'San Marino',
SN: 'Senegal',
TN: 'Tunísia',
TR: 'Turquia',
VG: 'Ilhas Virgens Britânicas'
}
},
id: {
'default': 'Por favor insira um código de identificação válido',
countryNotSupported: 'O código do país %s não é suportado',
country: 'Por favor insira um número de indentificação válido em %s',
countries: {
BA: 'Bósnia e Herzegovina',
BG: 'Bulgária',
BR: 'Brasil',
CH: 'Suíça',
CL: 'Chile',
CN: 'China',
CZ: 'República Checa',
DK: 'Dinamarca',
EE: 'Estônia',
ES: 'Espanha',
FI: 'Finlândia',
HR: 'Croácia',
IE: 'Irlanda',
IS: 'Islândia',
LT: 'Lituânia',
LV: 'Letónia',
ME: 'Montenegro',
MK: 'Macedónia',
NL: 'Holanda',
RO: 'Roménia',
RS: 'Sérvia',
SE: 'Suécia',
SI: 'Eslovênia',
SK: 'Eslováquia',
SM: 'San Marino',
TH: 'Tailândia',
ZA: 'África do Sul'
}
},
identical: {
'default': 'Por favor, insira o mesmo valor'
},
imei: {
'default': 'Por favor insira um IMEI válido'
},
imo: {
'default': 'Por favor insira um IMO válido'
},
integer: {
'default': 'Por favor insira um número inteiro válido'
},
ip: {
'default': 'Por favor insira um IP válido',
ipv4: 'Por favor insira um endereço de IPv4 válido',
ipv6: 'Por favor insira um endereço de IPv6 válido'
},
isbn: {
'default': 'Por favor insira um ISBN válido'
},
isin: {
'default': 'Por favor insira um ISIN válido'
},
ismn: {
'default': 'Por favor insira um ISMN válido'
},
issn: {
'default': 'Por favor insira um ISSN válido'
},
lessThan: {
'default': 'Por favor insira um valor menor ou igual a %s',
notInclusive: 'Por favor insira um valor menor do que %s'
},
mac: {
'default': 'Por favor insira um endereço MAC válido'
},
meid: {
'default': 'Por favor insira um MEID válido'
},
notEmpty: {
'default': 'Por favor insira um valor'
},
numeric: {
'default': 'Por favor insira um número real válido'
},
phone: {
'default': 'Por favor insira um número de telefone válido',
countryNotSupported: 'O código de país %s não é suportado',
country: 'Por favor insira um número de telefone válido em %s',
countries: {
BR: 'Brasil',
CN: 'China',
CZ: 'República Checa',
DE: 'Alemanha',
DK: 'Dinamarca',
ES: 'Espanha',
FR: 'França',
GB: 'Reino Unido',
MA: 'Marrocos',
PK: 'Paquistão',
RO: 'Roménia',
RU: 'Rússia',
SK: 'Eslováquia',
TH: 'Tailândia',
US: 'EUA',
VE: 'Venezuela'
}
},
regexp: {
'default': 'Por favor insira um valor correspondente ao padrão'
},
remote: {
'default': 'Por favor insira um valor válido'
},
rtn: {
'default': 'Por favor insira um número válido RTN'
},
sedol: {
'default': 'Por favor insira um número válido SEDOL'
},
siren: {
'default': 'Por favor insira um número válido SIREN'
},
siret: {
'default': 'Por favor insira um número válido SIRET'
},
step: {
'default': 'Por favor insira um passo válido %s'
},
stringCase: {
'default': 'Por favor, digite apenas caracteres minúsculos',
upper: 'Por favor, digite apenas caracteres maiúsculos'
},
stringLength: {
'default': 'Por favor insira um valor com comprimento válido',
less: 'Por favor insira menos de %s caracteres',
more: 'Por favor insira mais de %s caracteres',
between: 'Por favor insira um valor entre %s e %s caracteres'
},
uri: {
'default': 'Por favor insira um URI válido'
},
uuid: {
'default': 'Por favor insira um número válido UUID',
version: 'Por favor insira uma versão %s UUID válida'
},
vat: {
'default': 'Por favor insira um VAT válido',
countryNotSupported: 'O código do país %s não é suportado',
country: 'Por favor insira um número VAT válido em %s',
countries: {
AT: 'Áustria',
BE: 'Bélgica',
BG: 'Bulgária',
BR: 'Brasil',
CH: 'Suíça',
CY: 'Chipre',
CZ: 'República Checa',
DE: 'Alemanha',
DK: 'Dinamarca',
EE: 'Estônia',
ES: 'Espanha',
FI: 'Finlândia',
FR: 'França',
GB: 'Reino Unido',
GR: 'Grécia',
EL: 'Grécia',
HU: 'Hungria',
HR: 'Croácia',
IE: 'Irlanda',
IS: 'Islândia',
IT: 'Itália',
LT: 'Lituânia',
LU: 'Luxemburgo',
LV: 'Letónia',
MT: 'Malta',
NL: 'Holanda',
NO: 'Norway',
PL: 'Polônia',
PT: 'Portugal',
RO: 'Roménia',
RU: 'Rússia',
RS: 'Sérvia',
SE: 'Suécia',
SI: 'Eslovênia',
SK: 'Eslováquia',
VE: 'Venezuela',
ZA: 'África do Sul'
}
},
vin: {
'default': 'Por favor insira um VIN válido'
},
zipCode: {
'default': 'Por favor insira um código postal válido',
countryNotSupported: 'O código postal do país %s não é suportado',
country: 'Por favor insira um código postal válido em %s',
countries: {
AT: 'Áustria',
BR: 'Brasil',
CA: 'Canadá',
CH: 'Suíça',
CZ: 'República Checa',
DE: 'Alemanha',
DK: 'Dinamarca',
FR: 'França',
GB: 'Reino Unido',
IE: 'Irlanda',
IT: 'Itália',
MA: 'Marrocos',
NL: 'Holanda',
PT: 'Portugal',
RO: 'Roménia',
RU: 'Rússia',
SE: 'Suécia',
SG: 'Cingapura',
SK: 'Eslováquia',
US: 'EUA'
}
}
});
}(window.jQuery)); | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/docs/modules/vector/coordsys.rst | =============================
More about Coordinate Systems
=============================
We will now look at how we can initialize new coordinate systems in
:mod:`diofant.vector`, positioned and oriented in user-defined
ways with respect to already-existing systems.
Locating new systems
====================
We already know that the ``origin`` property of a
``CoordSysCartesian`` corresponds to the ``Point`` instance
denoting its origin reference point.
Consider a coordinate system `N`. Suppose we want to define
a new system `M`, whose origin is located at
`\mathbf{3\hat{i} + 4\hat{j} + 5\hat{k}}` from `N`'s origin.
In other words, the coordinates of `M`'s origin from N's perspective
happen to be `(3, 4, 5)`. Moreover, this would also mean that
the coordinates of `N`'s origin with respect to `M`
would be `(-3, -4, -5)`.
This can be achieved programmatically as follows -
>>> from diofant.vector import CoordSysCartesian
>>> N = CoordSysCartesian('N')
>>> M = N.locate_new('M', 3*N.i + 4*N.j + 5*N.k)
>>> M.position_wrt(N)
3*N.i + 4*N.j + 5*N.k
>>> N.origin.express_coordinates(M)
(-3, -4, -5)
It is worth noting that `M`'s orientation is the same as that of
`N`. This means that the rotation matrix of `N` with respect
to `M`, and also vice versa, is equal to the identity matrix of
dimensions 3x3.
The ``locate_new`` method initializes a ``CoordSysCartesian`` that
is only translated in space, not re-oriented, relative to the 'parent'
system.
Orienting new systems
=====================
Similar to 'locating' new systems, :mod:`diofant.vector` also allows for
initialization of new ``CoordSysCartesian`` instances that are oriented
in user-defined ways with respect to existing systems.
Suppose you have a coordinate system `A`.
>>> from diofant.vector import CoordSysCartesian
>>> A = CoordSysCartesian('A')
You want to initialize a new coordinate system `B`, that is rotated with
respect to `A`'s Z-axis by an angle `\theta`.
>>> theta = Symbol('theta')
.. only:: html
The orientation is shown in the diagram below:
.. image:: coordsys_rot.*
:height: 250
:width: 250
:align: center
There are two ways to achieve this.
Using a method of CoordSysCartesian directly
--------------------------------------------
This is the easiest, cleanest, and hence the recommended way of doing
it.
>>> B = A.orient_new_axis('B', theta, A.k)
This initializes `B` with the required orientation information with
respect to `A`.
``CoordSysCartesian`` provides the following direct orientation methods
in its API-
1. ``orient_new_axis``
2. ``orient_new_body``
3. ``orient_new_space``
4. ``orient_new_quaternion``
Please look at the ``CoordSysCartesian`` class API given in the docs
of this module, to know their functionality and required arguments
in detail.
Using Orienter(s) and the orient_new method
-------------------------------------------
You would first have to initialize an ``AxisOrienter`` instance for
storing the rotation information.
>>> from diofant.vector import AxisOrienter
>>> axis_orienter = AxisOrienter(theta, A.k)
And then apply it using the ``orient_new`` method, to obtain `B`.
>>> B = A.orient_new('B', axis_orienter)
``orient_new`` also lets you orient new systems using multiple
``Orienter`` instances, provided in an iterable. The rotations/orientations
are applied to the new system in the order the ``Orienter`` instances
appear in the iterable.
>>> from diofant.vector import BodyOrienter
>>> body_orienter = BodyOrienter(a, b, c, 'XYZ')
>>> C = A.orient_new('C', (axis_orienter, body_orienter))
The :mod:`diofant.vector` API provides the following four ``Orienter``
classes for orientation purposes-
1. ``AxisOrienter``
2. ``BodyOrienter``
3. ``SpaceOrienter``
4. ``QuaternionOrienter``
Please refer to the API of the respective classes in the docs of this
module to know more.
In each of the above examples, the origin of the new coordinate system
coincides with the origin of the 'parent' system.
>>> B.position_wrt(A)
0
To compute the rotation matrix of any coordinate system with respect
to another one, use the ``rotation_matrix`` method.
>>> B = A.orient_new_axis('B', a, A.k)
>>> B.rotation_matrix(A)
Matrix([
[ cos(a), sin(a), 0],
[-sin(a), cos(a), 0],
[ 0, 0, 1]])
>>> B.rotation_matrix(B)
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
Orienting AND Locating new systems
==================================
What if you want to initialize a new system that is not only oriented
in a pre-defined way, but also translated with respect to the parent?
Each of the ``orient_new_<method of orientation>`` methods, as well
as the ``orient_new`` method, support a ``location`` keyword
argument.
If a ``Vector`` is supplied as the value for this ``kwarg``, the
new system's origin is automatically defined to be located at that
position vector with respect to the parent coordinate system.
Thus, the orientation methods also act as methods to support orientation+
location of the new systems.
>>> C = A.orient_new_axis('C', a, A.k, location=2*A.j)
>>> C.position_wrt(A)
2*A.j
>>> from diofant.vector import express
>>> express(A.position_wrt(C), C)
(-2*sin(a))*C.i + (-2*cos(a))*C.j
More on the ``express`` function in a bit.
Expression of quantities in different coordinate systems
========================================================
Vectors and Dyadics
-------------------
As mentioned earlier, the same vector attains different expressions in
different coordinate systems. In general, the same is true for scalar
expressions and dyadic tensors.
:mod:`diofant.vector` supports the expression of vector/scalar quantities
in different coordinate systems using the ``express`` function.
For purposes of this section, assume the following initializations-
>>> from diofant.vector import CoordSysCartesian, express
>>> N = CoordSysCartesian('N')
>>> M = N.orient_new_axis('M', a, N.k)
``Vector`` instances can be expressed in user defined systems using
``express``.
>>> v1 = N.i + N.j + N.k
>>> express(v1, M)
(sin(a) + cos(a))*M.i + (-sin(a) + cos(a))*M.j + M.k
>>> v2 = N.i + M.j
>>> express(v2, N)
(-sin(a) + 1)*N.i + (cos(a))*N.j
Apart from ``Vector`` instances, ``express`` also supports
reexpression of scalars (general Diofant ``Expr``) and
``Dyadic`` objects.
``express`` also accepts a second coordinate system
for re-expressing ``Dyadic`` instances.
>>> d = 2*(M.i | N.j) + 3*(M.j | N.k)
>>> express(d, M)
(2*sin(a))*(M.i|M.i) + (2*cos(a))*(M.i|M.j) + 3*(M.j|M.k)
>>> express(d, M, N)
2*(M.i|N.j) + 3*(M.j|N.k)
Coordinate Variables
--------------------
The location of a coordinate system's origin does not affect the
re-expression of ``BaseVector`` instances. However, it does affect
the way ``BaseScalar`` instances are expressed in different systems.
``BaseScalar`` instances, are coordinate 'symbols' meant to denote the
variables used in the definition of vector/scalar fields in
:mod:`diofant.vector`.
For example, consider the scalar field
`\mathbf{{T}_{N}(x, y, z) = x + y + z}` defined in system `N`.
Thus, at a point with coordinates `(a, b, c)`, the value of the
field would be `a + b + c`. Now consider system `R`, whose
origin is located at `(1, 2, 3)` with respect to `N` (no
change of orientation).
A point with coordinates `(a, b, c)` in `R` has coordinates
`(a + 1, b + 2, c + 3)` in `N`.
Therefore, the expression for `\mathbf{{T}_{N}}` in `R` becomes
`\mathbf{{T}_{R}}(x, y, z) = x + y + z + 6`.
Coordinate variables, if present in a vector/scalar/dyadic expression,
can also be re-expressed in a given coordinate system, by setting the
``variables`` keyword argument of ``express`` to ``True``.
The above mentioned example, done programmatically, would look like
this -
>>> R = N.locate_new('R', N.i + 2*N.j + 3*N.k)
>>> T_N = N.x + N.y + N.z
>>> express(T_N, R, variables=True)
R.x + R.y + R.z + 6
Other expression-dependent methods
----------------------------------
The ``to_matrix`` method of ``Vector`` and
``express_coordinates`` method of ``Point`` also return
different results depending on the coordinate system being provided.
>>> P = R.origin.locate_new('P', a*R.i + b*R.j + c*R.k)
>>> P.express_coordinates(N)
(a + 1, b + 2, c + 3)
>>> P.express_coordinates(R)
(a, b, c)
>>> v = N.i + N.j + N.k
>>> v.to_matrix(M)
Matrix([
[ sin(a) + cos(a)],
[-sin(a) + cos(a)],
[ 1]])
>>> v.to_matrix(N)
Matrix([
[1],
[1],
[1]])
| PypiClean |
/DESPOTIC-2.1.tar.gz/DESPOTIC-2.1/despotic/chemistry/NL99_old.py |
import numpy as np
import string
from despotic.despoticError import despoticError
from shielding import fShield_CO_vDB
from despotic.chemistry import abundanceDict
from despotic.chemistry import chemNetwork
import scipy.constants as physcons
import warnings
########################################################################
# Physical and numerical constants
########################################################################
kB = physcons.k/physcons.erg
mH = (physcons.m_p+physcons.m_e)/physcons.gram
_small = 1e-100
########################################################################
# List of species used in this chemistry network
########################################################################
specList = ['He+', 'H3+', 'OHx', 'CHx', 'CO', 'C', 'C+', 'HCO+', 'O',
'M+']
specListExtended = specList + ['H2', 'He', 'M', 'e-']
########################################################################
# Data on photoreactions
# Reactions are, in order:
# h nu + CI -> C+ + e
# h nu + CHx -> CI + H
# h nu + CO -> CI + O
# h nu + OHx -> OI + H
# h nu + M -> M+ + e
# h nu + HCO+ -> CO + H
########################################################################
_kph = np.array([
3.0e-10, 1.0e-9, 1.0e-10, 5.0e-10,
2.0e-10, 1.5e-10])
_avfac = np.array([3.0, 1.5, 3.0, 1.7, 1.9, 2.5])
_inph = np.array([5, 3, 4, 2, 12, 7], dtype='int')
_outph1 = np.array([6, 5, 5, 8, 9, 4], dtype='int')
_outph2 = np.array([10, 10, 8, 10, 10, 10], dtype='int')
########################################################################
# Data on two-body reactions
# Reactions are, in order:
# (0) H3+ + CI -> CHx + H2
# (1) H3+ + OI -> OHx + H2
# (2) H3+ + CO -> HCO+ + H2
# (3) He+ + H2 -> He + H + H+
# (4) He+ + CO -> C+ + O + He
# (5) C+ + H2 -> CHx + H
# (6) C+ + OHx -> HCO+
# (7) OI + CHx -> CO + H
# (8) CI + OHx -> CO + H
# (9) He+ + e -> He + h nu
# (10) H3+ + e -> H2 + H
# (11) C+ + e -> CI + h nu
# (12) HCO+ + e -> CO + H
# (13) M+ + e -> M + h nu
# (14) H3+ + M -> M+ + H + H2
########################################################################
_k2 = np.array([
2.0e-9, 8.0e-10, 1.7e-9, 7.0e-15, 1.6e-9, 4.0e-16, 1.0e-9,
2.0e-10, 5.8e-12, 9.0e-11, 1.9e-6, 1.4e-10, 3.3e-5,
3.8e-10, 2.0e-9])
_k2Texp = np.array([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, -0.64, -0.54,
-0.61, -1.0, -0.65, 0.0])
_in2bdy1 = np.array([1, 1, 1, 0, 0, 6, 6, 8, 5, 0, 1, 6, 7, 9, 1],
dtype='int')
_in2bdy2 = np.array([5, 8, 4, 10, 4, 10, 2, 3, 2, 13, 13, 13, 13,
13, 12], dtype='int')
_out2bdy1 = np.array([3, 2, 7, 10, 6, 3, 7, 4, 4, 10, 10, 5, 4, 10,
9], dtype='int')
_out2bdy2 = np.array([10, 10, 10, 10, 8, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10], dtype='int')
########################################################################
# Set some default abundances
########################################################################
_xHedefault = 0.1
_xCdefault = 2.0e-4
_xOdefault = 4.0e-4
_xMdefault = 2.0e-7
_xH2 = 0.5
########################################################################
# Define the NL99 class
########################################################################
class NL99(chemNetwork):
"""
This class the implements the chemistry network of Nelson & Langer
(1999, ApJ, 524, 923).
"""
####################################################################
# Method to initialize
####################################################################
def __init__(self, cloud=None, info=None):
"""
Parameters
----------
cloud : class cloud
a DESPOTIC cloud object from which initial data are to be
taken
info : dict
a dict containing additional parameters
Returns
-------
Nothing
Raises
------
despoticError, if the dict info contains non-allowed entries
Remarks
-------
The dict info may contain the following key - value pairs:
'xC' : float giving the total C abundance per H nucleus;
defaults to 2.0e-4
'xO' : float giving the total H abundance per H nucleus;
defaults to 4.0e-4
'xM' : float giving the total refractory metal abundance per H
nucleus; defaults to 2.0e-7
'sigmaDustV' : float giving the V band dust extinction cross
section per H nucleus; if not set, the default behavior
is to assume that sigmaDustV = 0.4 * cloud.dust.sigmaPE
'AV' : float giving the total visual extinction; ignored if
sigmaDustV is set
'noClump' : a Boolean; if True, the clump factor is set to
1.0; defaults to False
"""
# List of species for this network; provide a pointer here so
# that it can be accessed through the class
self.specList = specList
self.specListExtended = specListExtended
# Store the input info dict
self.info = info
# Array to hold abundances
self.x = np.zeros(10)
# Total metal abundance
if info is None:
self.xM = _xMdefault
else:
if 'xM' in info:
self.xM = info['xM']
else:
self.xM = _xMdefault
# Extract information from the cloud if one is given
if cloud is None:
# No cloud given, so set some defaults
self.cloud = None
# Physical properties
self._xHe = _xHedefault
self._ionRate = 2.0e-17
self._NH = _small
self._temp = _small
self._chi = 1.0
self._nH = _small
self._AV = 0.0
if info is not None:
if 'AV' in info:
self._AV = info['AV']
# Set initial abundances
if info is None:
self.x[6] = _xCdefault
self.x[8] = _xOdefault
else:
if 'xC' in info:
self.x[6] = info['xC']
else:
self.x[6] = _xCdefault
if 'xO' in info:
self.x[8] = info['xO']
else:
self.x[8] = _xOdefault
self.x[9] = self.xM
else:
# Cloud is given, so get information out of it
self.cloud = cloud
# Sanity check: make sure cloud is pure H2
if cloud.comp.xH2 != 0.5:
raise despoticError, "NL99 network only valid " + \
"for pure H2 composition"
# Sanity check: make sure cloud contains some He, since
# network will not function properly at He abundance of 0
if cloud.comp.xHe == 0.0:
raise despoticError, "NL99 network requires " + \
"non-zero He abundance"
# Set abundances
# Make a case-insensitive version of the emitter list for
# convenience
emList = dict(zip(map(string.lower,
cloud.emitters.keys()),
cloud.emitters.values()))
# OH and H2O
if 'oh' in emList:
self.x[2] += emList['oh'].abundance
if 'ph2o' in emList:
self.x[2] += emList['ph2o'].abundance
if 'oh2o' in emList:
self.x[2] += emList['oh2o'].abundance
if 'p-h2o' in emList:
self.x[2] += emList['p-h2o'].abundance
if 'o-h2o' in emList:
self.x[2] += emList['o-h2o'].abundance
# CO
if 'co' in emList:
self.x[4] = emList['co'].abundance
# Neutral carbon
if 'c' in emList:
self.x[5] = emList['c'].abundance
# Ionized carbon
if 'c+' in emList:
self.x[6] = emList['c+'].abundance
# HCO+
if 'hco+' in emList:
self.x[7] = emList['hco+'].abundance
# Sum input abundances of C, C+, CO, HCO+ to ensure that
# all carbon is accounted for. If there is too little,
# assume the excess is C+. If there is too much, throw an
# error.
if info is None:
xC = _xCdefault
elif 'xC' in info:
xC = info['xC']
else:
xC = _xCdefault
xCtot = self.x[4] + self.x[5] + self.x[6] + self.x[7]
if xCtot < xC:
# Print warning if we're altering existing C+
# abundance.
if 'c' in emList:
print "Warning: input C abundance is " + \
str(xC) + ", but total input C, C+, CHx, CO, " + \
"HCO+ abundance is " + str(xCtot) + \
"; increasing xC+ to " + str(self.x[6]+xC-xCtot)
self.x[6] += xC - xCtot
elif xCtot > xC:
# Throw an error if input C abundance is smaller than
# what is accounted for in initial conditions
raise despoticError, "input C abundance is " + \
str(xC) + ", but total input C, C+, CHx, CO, " + \
"HCO+ abundance is " + str(xCtot)
# O
if 'o' in emList:
self.x[8] = emList['o'].abundance
elif info is None:
self.x[8] = _xOdefault - self.x[2] - self.x[4] - \
self.x[7]
elif 'xO' in info:
self.x[8] = info['xO'] - self.x[2] - self.x[4] - \
self.x[7]
else:
self.x[8] = _xOdefault - self.x[2] - self.x[4] - \
self.x[7]
# As with C, make sure all O is accounted for, and if not
# park the extra in OI
if info is None:
xO = _xOdefault
elif 'xC' in info:
xO = info['xO']
else:
xO = _xOdefault
xOtot = self.x[2] + self.x[4] + self.x[7] + self.x[8]
if xOtot < xO:
# Print warning if we're altering existing O
# abundance.
if 'o' in emList:
print "Warning: input O abundance is " + \
str(xO) + ", but total input O, OHx, CO, " + \
"HCO+ abundance is " + str(xOtot) + \
"; increasing xO to " + str(self.x[8]+xO-xOtot)
self.x[8] += xO - xOtot
elif xOtot > xO:
# Throw an error if input O abundance is smaller than
# what is accounted for in initial conditions
raise despoticError, "input C abundance is " + \
str(xO) + ", but total input O, OHx, CO, " + \
"HCO+ abundance is " + str(xOtot)
# Initial electrons = metals + C+ + HCO+
xeinit = self.xM + self.x[6] + self.x[7]
# Initial He+
self.x[0] = self.xHe*self.ionRate / \
(self.nH*(_k2[9]*self.temp**_k2Texp[9]*xeinit+_k2[3]*_xH2))
# Initial H3+
self.x[1] = _xH2*self.ionRate / \
(self.nH*(_k2[10]*self.temp**_k2Texp[10]*xeinit+_k2[2]*self.x[8]))
# Initial M+
self.x[9] = self.xM
####################################################################
# Define some properties so that, if we have a cloud, quantities
# that are stored in the cloud point back to it
####################################################################
@property
def nH(self):
if self.cloud is None:
return self._nH
else:
return self.cloud.nH
@nH.setter
def nH(self, value):
if self.cloud is None:
self._nH = value
else:
self.cloud.nH = value
@property
def temp(self):
if self.cloud is None:
return self._temp
else:
return self.cloud.Tg
@temp.setter
def temp(self, value):
if self.cloud is None:
self._temp = value
else:
self.cloud.Tg = value
@property
def cfac(self):
if self.cloud is None:
return 1.0
else:
if self.info is None:
cs2 = kB * self.cloud.Tg / (self.cloud.comp.mu * mH)
return np.sqrt(1.0 + 0.75*self.cloud.sigmaNT**2/cs2)
elif 'noClump' in self.info:
if self.info['noClump'] == True:
return 1.0
else:
cs2 = kB * self.cloud.Tg / (self.cloud.comp.mu * mH)
return np.sqrt(1.0 + 0.75*self.cloud.sigmaNT**2/cs2)
else:
cs2 = kB * self.cloud.Tg / (self.cloud.comp.mu * mH)
return np.sqrt(1.0 + 0.75*self.cloud.sigmaNT**2/cs2)
@cfac.setter
def cfac(self, value):
raise despoticError, "cannot set cfac directly"
@property
def xHe(self):
if self.cloud is None:
return self._xHe
else:
return self.cloud.comp.xHe
@xHe.setter
def xHe(self, value):
if self.cloud is None:
self._xHe = value
else:
self.cloud.comp.xHe = value
@property
def ionRate(self):
if self.cloud is None:
return self._ionRate
else:
return self.cloud.rad.ionRate
@ionRate.setter
def ionRate(self, value):
if self.cloud is None:
self._ionRate = value
else:
self.cloud.rad.ionRate = value
@property
def chi(self):
if self.cloud is None:
return self._chi
else:
return self.cloud.rad.chi
@chi.setter
def chi(self, value):
if self.cloud is None:
self._chi = value
else:
self.cloud.rad.chi = value
@property
def NH(self):
if self.cloud is None:
return self._NH
else:
return self.cloud.colDen / 2.0
@NH.setter
def NH(self, value):
if self.cloud is None:
self._NH = value
else:
self.cloud.colDen = 2.0*value
@property
def AV(self):
if self.cloud is None:
if self.info is None:
return self._AV
elif 'AV' in self.info:
return self.info['AV']
else:
return self._AV
else:
if self.info is None:
return 0.4 * self.cloud.dust.sigmaPE * self.NH
elif 'sigmaDustV' in self.info:
# Note factor to convert from mag to true
# dimensionless units
return self.NH * self.info['sigmaDustV'] / \
np.log(100**0.2)
elif 'AV' in self.info:
return self.info['AV']
else:
return 0.4 * self.cloud.dust.sigmaPE * self.NH
@AV.setter
def AV(self, value):
if self.cloud is None:
if self.info is None:
self._AV = value
elif 'AV' in self.info:
self.info['AV'] = value
else:
self._AV = value
else:
if self.info is None:
raise despoticError, "cannot set AV directly " + \
"unless it is part of info"
elif 'AV' not in self.info:
raise despoticError, "cannot set AV directly " + \
"unless it is part of info"
else:
self.info['AV'] = value
####################################################################
# Override the abundances property of the base chemNetwork class
# so that we return the derived abundances as well as the
# variables ones. For the setter, let users set abundances, but if
# they try to set ones that are derived, issue a warning.
####################################################################
@property
def abundances(self):
self._abundances = abundanceDict(self.specListExtended,
self.extendAbundances())
return self._abundances
@abundances.setter
def abundances(self, value):
if len(value.x)==10:
self.x = value.x
elif len(value.x)==14:
self.x = value.x[:10]
warnings.warn('For NL99 network, cannot set abundances of H2, He, M, e-; abundances set only for other species')
else:
raise ValueError("abundnaces for NL99 network must have 10 species!")
####################################################################
# Method to get derived abundances from ones being stored; this
# adds slots for H2, HeI, MI, and e
####################################################################
def extendAbundances(self, xin=None):
# Object we'll be returning
xgrow = np.zeros(14)
# Copy abundances if passed in; otherwise user stored ones
if xin is None:
xgrow[:10] = self.x
else:
xgrow[:10] = xin
# H2 abundances is hardwired for NL99 network
xgrow[10] = _xH2
# He abundance = total He abundance - He+ abundance
xgrow[11] = self.xHe - xgrow[0]
# Neutral metal abundance = total metal abundance - ionized
# metal abundance
xgrow[12] = self.xM - xgrow[9]
# e abundance = He+ + H3+ + C+ + HCO+ + M+
xgrow[13] = xgrow[0] + xgrow[1] + xgrow[6] + xgrow[7] \
+ xgrow[9]
# Return
return xgrow
####################################################################
# Method to return the time derivative of all chemical rates
####################################################################
def dxdt(self, xin, time):
"""
This method returns the time derivative of all abundances for
this chemical network.
Parameters
----------
xin : array(10)
current abundances of all species
time : float
current time; not actually used, but included as an
argument for compatibility with odeint
Returns
-------
dxdt : array(10)
time derivative of x
"""
# Vector to store results; it is convenient for this to have
# some phantom slots; slots 10, 11, 12, and 13 store
# abundances of H2, HeI, MI, and e, respectively
xdot = np.zeros(14)
xgrow = self.extendAbundances(xin)
# Cosmic ray / x-ray ionization reactions
xdot[0] = xgrow[11]*self.ionRate
xdot[1] = self.ionRate
# Photon reactions
ratecoef = 1.7*self.chi*np.exp(-_avfac*self.AV)*_kph
rate = ratecoef*xgrow[_inph]
# Apply CO line shielding factor
rate[2] = rate[2] * fShield_CO_vDB(xgrow[4]*self.NH, self.NH/2.0)
for i, n in enumerate(_inph):
xdot[_inph[i]] -= rate[i]
xdot[_outph1[i]] += rate[i]
xdot[_outph2[i]] += rate[i]
# Two-body reactions
rate = _k2*self.temp**_k2Texp*self.cfac*self.nH * \
xgrow[_in2bdy1]*xgrow[_in2bdy2]
for i, n in enumerate(_in2bdy1):
xdot[_in2bdy1[i]] -= rate[i]
xdot[_in2bdy2[i]] -= rate[i]
xdot[_out2bdy1[i]] += rate[i]
xdot[_out2bdy2[i]] += rate[i]
# Return results
return xdot[:10]
####################################################################
# Method to write the currently stored abundances to a cloud
####################################################################
def applyAbundances(self, addEmitters=False):
"""
This method writes the abundances produced by the chemical
network to the cloud's emitter list.
Parameters
----------
addEmitters : Boolean
if True, emitters that are included in the chemical
network but not in the cloud's existing emitter list will
be added; if False, abundances of emitters already in the
emitter list will be updated, but new emiters will not be
added to the cloud
Returns
-------
Nothing
Remarks
-------
If there is no cloud associated with this chemical network,
this routine does nothing and silently returns.
"""
# SAFETY check: make sure we have an associated cloud to which
# we can write
if self.cloud == None:
return
# Make a case-insensitive version of the emitter list for
# convenience
emList = dict(zip(map(string.lower,
self.cloud.emitters.keys()),
self.cloud.emitters.values()))
# Save rtios of ^12C to ^13C, and ^16O to ^18O
if '13co' in emList and 'co' in emList:
c13_12 = emList['13co'].abundance / \
emList['co'].abundance
if 'c18o' in emList and 'co' in emList:
o18_16 = emList['c18o'].abundance / \
emList['co'].abundance
# OH, assuming OHx is half OH
if 'oh' in emList:
emList['oh'].abundance = self.x[2]/2.0
elif addEmitters:
try:
self.cloud.addEmitter('oh', self.x[2]/2.0)
except despoticError:
print 'Warning: unable to add OH; cannot find LAMDA file'
# H2O, assuming OHx is half H2O, and that oH2O and pH2O are
# equally abundance
if 'ph2o' in emList:
emList['ph2o'].abundance = self.x[2]/4.0
elif 'p-h2o' in emList:
emList['p-h2o'].abundance = self.x[2]/4.0
elif addEmitters:
try:
self.cloud.addEmitter('ph2o', self.x[2]/4.0)
except despoticError:
print 'Warning: unable to add p-H2O; cannot find LAMDA file'
if 'oh2o' in emList:
emList['oh2o'].abundance = self.x[2]/4.0
elif 'o-h2o' in emList:
emList['o-h2o'].abundance = self.x[2]/4.0
elif addEmitters:
try:
self.cloud.addEmitter('oh2o', self.x[2]/4.0)
except despoticError:
print 'Warning: unable to add o-H2O; cannot find LAMDA file'
# CO
if 'co' in emList:
emList['co'].abundance = self.x[4]
elif addEmitters:
try:
self.cloud.addEmitter('co', self.x[4])
except despoticError:
print 'Warning: unable to add CO; cannot find LAMDA file'
# if we have 13CO or C18O, make their abundances match that of CO
# multiplied by the appropriate isotopic abundances
if '13co' in emList:
emList['13co'].abundance = self.x[4]*c13_12
if 'c18o' in emList:
emList['c18o'].abundance = self.x[4]*o18_16
# C
if 'c' in emList:
emList['c'].abundance = self.x[5]
elif addEmitters:
try:
self.cloud.addEmitter('c', self.x[5])
except despoticError:
print 'Warning: unable to add C; cannot find LAMDA file'
# C+
if 'c+' in emList:
emList['c+'].abundance = self.x[6]
elif addEmitters:
try:
self.cloud.addEmitter('c+', self.x[6])
except despoticError:
print 'Warning: unable to add C+; cannot find LAMDA file'
# HCO+
if 'hco+' in emList:
emList['hco+'].abundance = self.x[7]
elif addEmitters:
try:
self.cloud.addEmitter('hco+', self.x[7])
except despoticError:
print 'Warning: unable to add HCO+; cannot find LAMDA file'
# O
if 'o' in emList:
emList['o'].abundance = self.x[8]
elif addEmitters:
try:
self.cloud.addEmitter('o', self.x[8])
except despoticError:
print 'Warning: unable to add O; cannot find LAMDA file' | PypiClean |
/Automated_cartography-0.0.2-py3-none-any.whl/robot/robot.py | import time
# import LSC_Client
from robot import LSC_Client
import threading
from inspect import signature
from functools import wraps
def typeassert(*type_args, **type_kwargs):
def decorate(func):
sig = signature(func)
bound_types = sig.bind_partial(*type_args, **type_kwargs).arguments
@wraps(func)
def wrapper(*args, **kwargs):
bound_values = sig.bind(*args, **kwargs)
for name, value in bound_values.arguments.items():
if name in bound_types:
if not isinstance(value, bound_types[name]):
raise TypeError('Argument {} must be {}'.format(name, bound_types[name]))
return func(*args, **kwargs)
return wrapper
return decorate
lsc = LSC_Client.LSC_Client()
class robot(object):
lsc.MoveServo(6, 1500, 1000)
lsc.MoveServo(7, 1500, 1000)
time.sleep(1.1)
def up(self, step):
lsc.RunActionGroup(0, step)
lsc.WaitForFinish(int(step * 20000))
def check(self, step):
lsc.RunActionGroup(188, step)
lsc.WaitForFinish(int(step * 20000))
def forward(self, step):
lsc.RunActionGroup(1, step)
lsc.WaitForFinish(int(step * 20000))
def backward(self, step):
lsc.RunActionGroup(2, step)
lsc.WaitForFinish(int(step * 20000))
def left(self, step):
lsc.RunActionGroup(3, step)
lsc.WaitForFinish(int(step * 20000))
def right(self, step):
lsc.RunActionGroup(4, step)
lsc.WaitForFinish(int(step * 20000))
def circle(self, step, radius):
for j in range(0, step):
for i in range(0, 10):
self.right(2)
self.forward(radius)
self.up(1)
def shaking_head(self, step):
lsc.RunActionGroup(50, step)
lsc.WaitForFinish(int(step * 20000))
def nod(self, step):
lsc.RunActionGroup(51, step)
lsc.WaitForFinish(int(step * 20000))
# ------------福利区,暂时不对外开放--------------
'''
还没想好写些什么
'''
class show(object):
lsc.RunActionGroup(0, 1)
lsc.WaitForFinish(int(20000))
def hiphop(self):
lsc.RunActionGroup(16, 1)
lsc.WaitForFinish(60000)
def jiangnanstyle(self):
lsc.RunActionGroup(17, 1)
lsc.WaitForFinish(60000)
def smallapple(self):
lsc.RunActionGroup(18, 1)
lsc.WaitForFinish(60000)
def lasong(self):
lsc.RunActionGroup(19, 1)
lsc.WaitForFinish(60000)
def feelgood(self):
lsc.RunActionGroup(20, 1)
lsc.WaitForFinish(60000)
# ----------------测试区------------------
from robot import voice
class speak(object):
def speak(self, viocenum):
if viocenum >= 48 or viocenum <= 25:
return "超出语音模块区域"
else:
lsc.RunActionGroup(viocenum, 1)
vlist=voice.voicelist()
lsc.WaitForFinish(int(20000))
time.sleep(int(vlist.voicelist()[viocenum])) | PypiClean |
/Eskapade_Core-1.0.0-py3-none-any.whl/escore/core_ops/links/to_ds_dict.py | from escore import DataStore
from escore import Link
from escore import StatusCode
from escore import process_manager
class ToDsDict(Link):
"""Stores one object in the DataStore dict during run time."""
def __init__(self, **kwargs):
"""Link to store one external object in the DataStore dict during run time.
:param str name: name of link
:param str store_key: key of object to store in data store
:param obj: object to store
:param bool force: overwrite if already present in datastore. default is false. (optional)
:param bool at_initialize: store at initialize of link. Default is false.
:param bool at_execute: store at execute of link. Default is true.
:param bool copydict: if true and obj is a dict, copy all key value pairs into datastore. Default is false.
"""
Link.__init__(self, kwargs.pop('name', 'ToDsDict'))
# process keyword arguments
self._process_kwargs(kwargs,
store_key=None,
obj=None,
at_initialize=False,
at_execute=True,
force=False,
copydict=False)
self.check_extra_kwargs(kwargs)
def initialize(self):
"""Initialize the link."""
# perform basic checks.
if self.obj is None:
raise RuntimeError('object "{}" to store is of type None'.format(self.store_key))
# storage key needs to be set in nearly all cases
if not (self.copydict and isinstance(self.obj, dict)):
if not (isinstance(self.store_key, str) and self.store_key):
raise RuntimeError('object storage key has not been set')
ds = process_manager.service(DataStore)
return StatusCode.Success if not self.at_initialize else self.do_storage(ds)
def execute(self):
"""Execute the link."""
ds = process_manager.service(DataStore)
return StatusCode.Success if not self.at_execute else self.do_storage(ds)
def do_storage(self, ds):
"""Perform storage in datastore.
Function makes a distinction been dicts and any other object.
"""
# if dict and copydict==true, store all individual items
if self.copydict and isinstance(self.obj, dict):
stats = [(self.store(ds, v, k, force=self.force)).value for k, v in self.obj.items()]
return StatusCode(max(stats))
# default: store obj under store_key
return self.store(ds, self.obj, force=self.force) | PypiClean |
/DeckenMagicPlugin-0.2.0.tar.gz/DeckenMagicPlugin-0.2.0/deckenmagicplugin/magicplugin.py | from urllib import urlretrieve
import urlparse
import sqlite3
import os
import conjunto
formatos = ['', 'Standard', 'Block', 'Extended', 'Legacy', 'Vintage']
standard = ['', 'roe','wwk','zen','m10','arb','cfx','ala' ]
extended = standard + ['9e','eve','shm',
'mt','lw','fut','pc','ts','tsts','cs','ai','ia','di','gp','rav','sok','bok',
'chk','5dn','ds','mi']
others = ['sc','le','on','ju','tr','od','ap','ps','in','pr','ne','mm','ud','ui','us',
'ex','sh','tp','wl','vi','mr','hl','fe','dk','lg','aq','an','8e','7e','6e','5e',
'4e','rv','un','be','al']
legacy = extended + others
vintage = legacy
sets_por_formato = {
'Standard' : standard,
'Block': ['roe','wwk','zen'],
'Extended': extended,
'Legacy': legacy,
'Vintage': vintage,
}
palavras_tipos = {
'' : '',
'Creature' : r" (tipo like '%Creature%' and tipo not like '%Enchant Creature%') ",
'Artifact' : r" (tipo like '%Artifact%' and tipo not like '%Creature%' )",
'Planeswalker' : r" tipo like '%Planeswalker%' ",
'Enchantment' : r" tipo like '%Enchant%' ",
'Sorcery' : r" tipo = 'Sorcery' ",
'Instant' : r" tipo like '%Instant%' ",
'Land' : r" (tipo like '%Land%' and tipo not like '%Enchant Land%') " }
class Card(dict):
def __init__(self, **attributes):
for i in attributes:
self[i] = attributes[i]
def query_to_cards(scroll):
cards = []
for i in scroll:
card = Card(nome=i[0], sigla=i[1], numero=i[2], tipo=i[3], mana=i[4], raridade=i[5], texto=i[6])
cards.append(card)
return cards
def montar_filtros(filtros):
clausules = []
for filtro in filtros:
combo = filtros[filtro]
model = combo.get_model()
active = combo.get_active()
if active < 0:
continue
else:
valor = model[active][0]
if not valor: continue
if filtro == 'Card Type':
clausules.append( palavras_tipos[ model[active][0] ] )
elif filtro == 'Card Format':
lista = [ "'%s'" % x for x in sets_por_formato[valor] if x ]
clausules.append( " sigla in (%s) " % ','.join(lista) )
else:
clausules.append( " sigla in (select sigla from colecao where descricao = '%s') " % valor )
print ' and '.join( clausules )
return ' and '.join( clausules )
class Query:
def __init__(self, conn):
self.conn = conn
self.c = self.conn.cursor()
self.select = "select nome, sigla, numero, tipo, mana, raridade, texto from card"
def all_cards(self, filtros):
if filtros:
self.c.execute(self.select + r" where %s order by nome" % filtros)
else:
self.c.execute(self.select + " order by nome")
return query_to_cards(self.c.fetchall())
def find_by(self, **criteria):
scroll = []
for i in criteria:
scroll.append("%s=?" % i)
clause = " and ".join(scroll)
query = "%s where %s order by nome" % (self.select, clause)
print 'QUERY', query
print 'CRITERIOS', criteria
self.c.execute(query, criteria.values())
return query_to_cards(self.c.fetchall())
def find_by_name(self, name, filtros):
if filtros:
query = "%s where upper(nome) like '%s%%' and ( " % (self.select, name.upper()) + filtros + ") order by nome"
else:
query = "%s where upper(nome) like '%s%%' order by nome" % (self.select, name.upper())
print query
self.c.execute(query)
return query_to_cards(self.c.fetchall())
class MagicPlugin:
number_columns = 2
number_columns_invisibles = 3
columns_names = [ 'Card', 'Set' ]
attributes_card_columns = [ 'name', 'sigla', 'numero' ]
select_filters = ['Card Format','Card Set','Card Type']
def get_select_filter_values(self, name, conn):
filter_values = {
'Card Format' : formatos,
'Card Set' : [''] + conjunto.names_sets(conn, legacy),
'Card Type' : palavras_tipos.keys(),
}
return filter_values[name]
def get_image_back(self):
return os.path.join(os.path.dirname(__file__), 'images', 'back.jpg')
def value_columns_by_card(self, card):
return [ card['nome'], card['sigla'], card['numero'] ]
def download_image(self, card, path):
url = "http://magiccards.info/scans/en/%s/%s.jpg" % (card['sigla'], card['numero'])
parsed = list(urlparse.urlparse(url))
print 'Baixando ', parsed
urlretrieve(urlparse.urlunparse(parsed), path)
def find_card(self, conn, column_values):
query = Query(conn)
posicao_sigla = column_values[self.attributes_card_columns.index('sigla')]
posicao_numero = column_values[self.attributes_card_columns.index('numero')]
result = query.find_by(sigla = posicao_sigla, numero= posicao_numero)
print result
return result
def description_card(self, card):
return "%s - %s" % (card['nome'], card['sigla'])
def detail_card(self, card):
texto = '%s\t\t%s\n\n%s\t\t%s - %s\n\n%s\n\n%s' % (card['nome'], card['mana'],
card['tipo'], card['sigla'], card['raridade'],
card['texto'],
card['numero']
)
return texto
def find_or_create_path(self, local, card):
if card['sigla'] not in os.listdir(local):
os.mkdir(os.path.join(local,card['sigla']))
caminho = "%s/%s/%s.jpg" % (local, card[ 'sigla'], card['numero'])
return caminho
def find_by_name(self, conn, name, filtros):
query = Query(conn)
return query.find_by_name(name, montar_filtros(filtros))
def all_cards(self, conn, filtros):
query = Query(conn)
return query.all_cards( montar_filtros(filtros) )
def update_sets(self, conn):
return conjunto.update_sets(conn)
def load_sets(self, conn):
conjunto.load_sets(conn)
def create_tables(self, conn):
conjunto.create_tables(conn)
def teste(self):
conjunto.teste() | PypiClean |
/KalturaApiClient-19.3.0.tar.gz/KalturaApiClient-19.3.0/KalturaClient/Plugins/CuePoint.py | from __future__ import absolute_import
from .Core import *
from ..Base import (
getXmlNodeBool,
getXmlNodeFloat,
getXmlNodeInt,
getXmlNodeText,
KalturaClientPlugin,
KalturaEnumsFactory,
KalturaObjectBase,
KalturaObjectFactory,
KalturaParams,
KalturaServiceBase,
)
########## enums ##########
# @package Kaltura
# @subpackage Client
class KalturaCuePointStatus(object):
READY = 1
DELETED = 2
HANDLED = 3
PENDING = 4
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
# @package Kaltura
# @subpackage Client
class KalturaQuestionType(object):
MULTIPLE_CHOICE_ANSWER = 1
TRUE_FALSE = 2
REFLECTION_POINT = 3
MULTIPLE_ANSWER_QUESTION = 4
FILL_IN_BLANK = 5
HOT_SPOT = 6
GO_TO = 7
OPEN_QUESTION = 8
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
# @package Kaltura
# @subpackage Client
class KalturaQuizOutputType(object):
PDF = 1
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
# @package Kaltura
# @subpackage Client
class KalturaScoreType(object):
HIGHEST = 1
LOWEST = 2
LATEST = 3
FIRST = 4
AVERAGE = 5
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
# @package Kaltura
# @subpackage Client
class KalturaThumbCuePointSubType(object):
SLIDE = 1
CHAPTER = 2
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
# @package Kaltura
# @subpackage Client
class KalturaCuePointOrderBy(object):
CREATED_AT_ASC = "+createdAt"
INT_ID_ASC = "+intId"
PARTNER_SORT_VALUE_ASC = "+partnerSortValue"
START_TIME_ASC = "+startTime"
TRIGGERED_AT_ASC = "+triggeredAt"
UPDATED_AT_ASC = "+updatedAt"
CREATED_AT_DESC = "-createdAt"
INT_ID_DESC = "-intId"
PARTNER_SORT_VALUE_DESC = "-partnerSortValue"
START_TIME_DESC = "-startTime"
TRIGGERED_AT_DESC = "-triggeredAt"
UPDATED_AT_DESC = "-updatedAt"
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
# @package Kaltura
# @subpackage Client
class KalturaCuePointType(object):
AD = "adCuePoint.Ad"
ANNOTATION = "annotation.Annotation"
CODE = "codeCuePoint.Code"
EVENT = "eventCuePoint.Event"
QUIZ_ANSWER = "quiz.QUIZ_ANSWER"
QUIZ_QUESTION = "quiz.QUIZ_QUESTION"
THUMB = "thumbCuePoint.Thumb"
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
########## classes ##########
# @package Kaltura
# @subpackage Client
class KalturaCuePoint(KalturaObjectBase):
def __init__(self,
id=NotImplemented,
intId=NotImplemented,
cuePointType=NotImplemented,
status=NotImplemented,
entryId=NotImplemented,
partnerId=NotImplemented,
createdAt=NotImplemented,
updatedAt=NotImplemented,
triggeredAt=NotImplemented,
tags=NotImplemented,
startTime=NotImplemented,
userId=NotImplemented,
partnerData=NotImplemented,
partnerSortValue=NotImplemented,
forceStop=NotImplemented,
thumbOffset=NotImplemented,
systemName=NotImplemented,
isMomentary=NotImplemented,
copiedFrom=NotImplemented):
KalturaObjectBase.__init__(self)
# @var string
# @readonly
self.id = id
# @var int
# @readonly
self.intId = intId
# @var KalturaCuePointType
# @readonly
self.cuePointType = cuePointType
# @var KalturaCuePointStatus
# @readonly
self.status = status
# @var string
# @insertonly
self.entryId = entryId
# @var int
# @readonly
self.partnerId = partnerId
# @var int
# @readonly
self.createdAt = createdAt
# @var int
# @readonly
self.updatedAt = updatedAt
# @var int
self.triggeredAt = triggeredAt
# @var string
self.tags = tags
# Start time in milliseconds
# @var int
self.startTime = startTime
# @var string
self.userId = userId
# @var string
self.partnerData = partnerData
# @var int
self.partnerSortValue = partnerSortValue
# @var KalturaNullableBoolean
self.forceStop = forceStop
# @var int
self.thumbOffset = thumbOffset
# @var string
self.systemName = systemName
# @var bool
# @readonly
self.isMomentary = isMomentary
# @var string
# @readonly
self.copiedFrom = copiedFrom
PROPERTY_LOADERS = {
'id': getXmlNodeText,
'intId': getXmlNodeInt,
'cuePointType': (KalturaEnumsFactory.createString, "KalturaCuePointType"),
'status': (KalturaEnumsFactory.createInt, "KalturaCuePointStatus"),
'entryId': getXmlNodeText,
'partnerId': getXmlNodeInt,
'createdAt': getXmlNodeInt,
'updatedAt': getXmlNodeInt,
'triggeredAt': getXmlNodeInt,
'tags': getXmlNodeText,
'startTime': getXmlNodeInt,
'userId': getXmlNodeText,
'partnerData': getXmlNodeText,
'partnerSortValue': getXmlNodeInt,
'forceStop': (KalturaEnumsFactory.createInt, "KalturaNullableBoolean"),
'thumbOffset': getXmlNodeInt,
'systemName': getXmlNodeText,
'isMomentary': getXmlNodeBool,
'copiedFrom': getXmlNodeText,
}
def fromXml(self, node):
KalturaObjectBase.fromXml(self, node)
self.fromXmlImpl(node, KalturaCuePoint.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaObjectBase.toParams(self)
kparams.put("objectType", "KalturaCuePoint")
kparams.addStringIfDefined("entryId", self.entryId)
kparams.addIntIfDefined("triggeredAt", self.triggeredAt)
kparams.addStringIfDefined("tags", self.tags)
kparams.addIntIfDefined("startTime", self.startTime)
kparams.addStringIfDefined("userId", self.userId)
kparams.addStringIfDefined("partnerData", self.partnerData)
kparams.addIntIfDefined("partnerSortValue", self.partnerSortValue)
kparams.addIntEnumIfDefined("forceStop", self.forceStop)
kparams.addIntIfDefined("thumbOffset", self.thumbOffset)
kparams.addStringIfDefined("systemName", self.systemName)
return kparams
def getId(self):
return self.id
def getIntId(self):
return self.intId
def getCuePointType(self):
return self.cuePointType
def getStatus(self):
return self.status
def getEntryId(self):
return self.entryId
def setEntryId(self, newEntryId):
self.entryId = newEntryId
def getPartnerId(self):
return self.partnerId
def getCreatedAt(self):
return self.createdAt
def getUpdatedAt(self):
return self.updatedAt
def getTriggeredAt(self):
return self.triggeredAt
def setTriggeredAt(self, newTriggeredAt):
self.triggeredAt = newTriggeredAt
def getTags(self):
return self.tags
def setTags(self, newTags):
self.tags = newTags
def getStartTime(self):
return self.startTime
def setStartTime(self, newStartTime):
self.startTime = newStartTime
def getUserId(self):
return self.userId
def setUserId(self, newUserId):
self.userId = newUserId
def getPartnerData(self):
return self.partnerData
def setPartnerData(self, newPartnerData):
self.partnerData = newPartnerData
def getPartnerSortValue(self):
return self.partnerSortValue
def setPartnerSortValue(self, newPartnerSortValue):
self.partnerSortValue = newPartnerSortValue
def getForceStop(self):
return self.forceStop
def setForceStop(self, newForceStop):
self.forceStop = newForceStop
def getThumbOffset(self):
return self.thumbOffset
def setThumbOffset(self, newThumbOffset):
self.thumbOffset = newThumbOffset
def getSystemName(self):
return self.systemName
def setSystemName(self, newSystemName):
self.systemName = newSystemName
def getIsMomentary(self):
return self.isMomentary
def getCopiedFrom(self):
return self.copiedFrom
# @package Kaltura
# @subpackage Client
class KalturaCuePointListResponse(KalturaListResponse):
def __init__(self,
totalCount=NotImplemented,
objects=NotImplemented):
KalturaListResponse.__init__(self,
totalCount)
# @var array of KalturaCuePoint
# @readonly
self.objects = objects
PROPERTY_LOADERS = {
'objects': (KalturaObjectFactory.createArray, 'KalturaCuePoint'),
}
def fromXml(self, node):
KalturaListResponse.fromXml(self, node)
self.fromXmlImpl(node, KalturaCuePointListResponse.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaListResponse.toParams(self)
kparams.put("objectType", "KalturaCuePointListResponse")
return kparams
def getObjects(self):
return self.objects
# @package Kaltura
# @subpackage Client
class KalturaCuePointBaseFilter(KalturaRelatedFilter):
def __init__(self,
orderBy=NotImplemented,
advancedSearch=NotImplemented,
idEqual=NotImplemented,
idIn=NotImplemented,
cuePointTypeEqual=NotImplemented,
cuePointTypeIn=NotImplemented,
statusEqual=NotImplemented,
statusIn=NotImplemented,
entryIdEqual=NotImplemented,
entryIdIn=NotImplemented,
createdAtGreaterThanOrEqual=NotImplemented,
createdAtLessThanOrEqual=NotImplemented,
updatedAtGreaterThanOrEqual=NotImplemented,
updatedAtLessThanOrEqual=NotImplemented,
triggeredAtGreaterThanOrEqual=NotImplemented,
triggeredAtLessThanOrEqual=NotImplemented,
tagsLike=NotImplemented,
tagsMultiLikeOr=NotImplemented,
tagsMultiLikeAnd=NotImplemented,
startTimeGreaterThanOrEqual=NotImplemented,
startTimeLessThanOrEqual=NotImplemented,
userIdEqual=NotImplemented,
userIdIn=NotImplemented,
partnerSortValueEqual=NotImplemented,
partnerSortValueIn=NotImplemented,
partnerSortValueGreaterThanOrEqual=NotImplemented,
partnerSortValueLessThanOrEqual=NotImplemented,
forceStopEqual=NotImplemented,
systemNameEqual=NotImplemented,
systemNameIn=NotImplemented):
KalturaRelatedFilter.__init__(self,
orderBy,
advancedSearch)
# @var string
self.idEqual = idEqual
# @var string
self.idIn = idIn
# @var KalturaCuePointType
self.cuePointTypeEqual = cuePointTypeEqual
# @var string
self.cuePointTypeIn = cuePointTypeIn
# @var KalturaCuePointStatus
self.statusEqual = statusEqual
# @var string
self.statusIn = statusIn
# @var string
self.entryIdEqual = entryIdEqual
# @var string
self.entryIdIn = entryIdIn
# @var int
self.createdAtGreaterThanOrEqual = createdAtGreaterThanOrEqual
# @var int
self.createdAtLessThanOrEqual = createdAtLessThanOrEqual
# @var int
self.updatedAtGreaterThanOrEqual = updatedAtGreaterThanOrEqual
# @var int
self.updatedAtLessThanOrEqual = updatedAtLessThanOrEqual
# @var int
self.triggeredAtGreaterThanOrEqual = triggeredAtGreaterThanOrEqual
# @var int
self.triggeredAtLessThanOrEqual = triggeredAtLessThanOrEqual
# @var string
self.tagsLike = tagsLike
# @var string
self.tagsMultiLikeOr = tagsMultiLikeOr
# @var string
self.tagsMultiLikeAnd = tagsMultiLikeAnd
# @var int
self.startTimeGreaterThanOrEqual = startTimeGreaterThanOrEqual
# @var int
self.startTimeLessThanOrEqual = startTimeLessThanOrEqual
# @var string
self.userIdEqual = userIdEqual
# @var string
self.userIdIn = userIdIn
# @var int
self.partnerSortValueEqual = partnerSortValueEqual
# @var string
self.partnerSortValueIn = partnerSortValueIn
# @var int
self.partnerSortValueGreaterThanOrEqual = partnerSortValueGreaterThanOrEqual
# @var int
self.partnerSortValueLessThanOrEqual = partnerSortValueLessThanOrEqual
# @var KalturaNullableBoolean
self.forceStopEqual = forceStopEqual
# @var string
self.systemNameEqual = systemNameEqual
# @var string
self.systemNameIn = systemNameIn
PROPERTY_LOADERS = {
'idEqual': getXmlNodeText,
'idIn': getXmlNodeText,
'cuePointTypeEqual': (KalturaEnumsFactory.createString, "KalturaCuePointType"),
'cuePointTypeIn': getXmlNodeText,
'statusEqual': (KalturaEnumsFactory.createInt, "KalturaCuePointStatus"),
'statusIn': getXmlNodeText,
'entryIdEqual': getXmlNodeText,
'entryIdIn': getXmlNodeText,
'createdAtGreaterThanOrEqual': getXmlNodeInt,
'createdAtLessThanOrEqual': getXmlNodeInt,
'updatedAtGreaterThanOrEqual': getXmlNodeInt,
'updatedAtLessThanOrEqual': getXmlNodeInt,
'triggeredAtGreaterThanOrEqual': getXmlNodeInt,
'triggeredAtLessThanOrEqual': getXmlNodeInt,
'tagsLike': getXmlNodeText,
'tagsMultiLikeOr': getXmlNodeText,
'tagsMultiLikeAnd': getXmlNodeText,
'startTimeGreaterThanOrEqual': getXmlNodeInt,
'startTimeLessThanOrEqual': getXmlNodeInt,
'userIdEqual': getXmlNodeText,
'userIdIn': getXmlNodeText,
'partnerSortValueEqual': getXmlNodeInt,
'partnerSortValueIn': getXmlNodeText,
'partnerSortValueGreaterThanOrEqual': getXmlNodeInt,
'partnerSortValueLessThanOrEqual': getXmlNodeInt,
'forceStopEqual': (KalturaEnumsFactory.createInt, "KalturaNullableBoolean"),
'systemNameEqual': getXmlNodeText,
'systemNameIn': getXmlNodeText,
}
def fromXml(self, node):
KalturaRelatedFilter.fromXml(self, node)
self.fromXmlImpl(node, KalturaCuePointBaseFilter.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaRelatedFilter.toParams(self)
kparams.put("objectType", "KalturaCuePointBaseFilter")
kparams.addStringIfDefined("idEqual", self.idEqual)
kparams.addStringIfDefined("idIn", self.idIn)
kparams.addStringEnumIfDefined("cuePointTypeEqual", self.cuePointTypeEqual)
kparams.addStringIfDefined("cuePointTypeIn", self.cuePointTypeIn)
kparams.addIntEnumIfDefined("statusEqual", self.statusEqual)
kparams.addStringIfDefined("statusIn", self.statusIn)
kparams.addStringIfDefined("entryIdEqual", self.entryIdEqual)
kparams.addStringIfDefined("entryIdIn", self.entryIdIn)
kparams.addIntIfDefined("createdAtGreaterThanOrEqual", self.createdAtGreaterThanOrEqual)
kparams.addIntIfDefined("createdAtLessThanOrEqual", self.createdAtLessThanOrEqual)
kparams.addIntIfDefined("updatedAtGreaterThanOrEqual", self.updatedAtGreaterThanOrEqual)
kparams.addIntIfDefined("updatedAtLessThanOrEqual", self.updatedAtLessThanOrEqual)
kparams.addIntIfDefined("triggeredAtGreaterThanOrEqual", self.triggeredAtGreaterThanOrEqual)
kparams.addIntIfDefined("triggeredAtLessThanOrEqual", self.triggeredAtLessThanOrEqual)
kparams.addStringIfDefined("tagsLike", self.tagsLike)
kparams.addStringIfDefined("tagsMultiLikeOr", self.tagsMultiLikeOr)
kparams.addStringIfDefined("tagsMultiLikeAnd", self.tagsMultiLikeAnd)
kparams.addIntIfDefined("startTimeGreaterThanOrEqual", self.startTimeGreaterThanOrEqual)
kparams.addIntIfDefined("startTimeLessThanOrEqual", self.startTimeLessThanOrEqual)
kparams.addStringIfDefined("userIdEqual", self.userIdEqual)
kparams.addStringIfDefined("userIdIn", self.userIdIn)
kparams.addIntIfDefined("partnerSortValueEqual", self.partnerSortValueEqual)
kparams.addStringIfDefined("partnerSortValueIn", self.partnerSortValueIn)
kparams.addIntIfDefined("partnerSortValueGreaterThanOrEqual", self.partnerSortValueGreaterThanOrEqual)
kparams.addIntIfDefined("partnerSortValueLessThanOrEqual", self.partnerSortValueLessThanOrEqual)
kparams.addIntEnumIfDefined("forceStopEqual", self.forceStopEqual)
kparams.addStringIfDefined("systemNameEqual", self.systemNameEqual)
kparams.addStringIfDefined("systemNameIn", self.systemNameIn)
return kparams
def getIdEqual(self):
return self.idEqual
def setIdEqual(self, newIdEqual):
self.idEqual = newIdEqual
def getIdIn(self):
return self.idIn
def setIdIn(self, newIdIn):
self.idIn = newIdIn
def getCuePointTypeEqual(self):
return self.cuePointTypeEqual
def setCuePointTypeEqual(self, newCuePointTypeEqual):
self.cuePointTypeEqual = newCuePointTypeEqual
def getCuePointTypeIn(self):
return self.cuePointTypeIn
def setCuePointTypeIn(self, newCuePointTypeIn):
self.cuePointTypeIn = newCuePointTypeIn
def getStatusEqual(self):
return self.statusEqual
def setStatusEqual(self, newStatusEqual):
self.statusEqual = newStatusEqual
def getStatusIn(self):
return self.statusIn
def setStatusIn(self, newStatusIn):
self.statusIn = newStatusIn
def getEntryIdEqual(self):
return self.entryIdEqual
def setEntryIdEqual(self, newEntryIdEqual):
self.entryIdEqual = newEntryIdEqual
def getEntryIdIn(self):
return self.entryIdIn
def setEntryIdIn(self, newEntryIdIn):
self.entryIdIn = newEntryIdIn
def getCreatedAtGreaterThanOrEqual(self):
return self.createdAtGreaterThanOrEqual
def setCreatedAtGreaterThanOrEqual(self, newCreatedAtGreaterThanOrEqual):
self.createdAtGreaterThanOrEqual = newCreatedAtGreaterThanOrEqual
def getCreatedAtLessThanOrEqual(self):
return self.createdAtLessThanOrEqual
def setCreatedAtLessThanOrEqual(self, newCreatedAtLessThanOrEqual):
self.createdAtLessThanOrEqual = newCreatedAtLessThanOrEqual
def getUpdatedAtGreaterThanOrEqual(self):
return self.updatedAtGreaterThanOrEqual
def setUpdatedAtGreaterThanOrEqual(self, newUpdatedAtGreaterThanOrEqual):
self.updatedAtGreaterThanOrEqual = newUpdatedAtGreaterThanOrEqual
def getUpdatedAtLessThanOrEqual(self):
return self.updatedAtLessThanOrEqual
def setUpdatedAtLessThanOrEqual(self, newUpdatedAtLessThanOrEqual):
self.updatedAtLessThanOrEqual = newUpdatedAtLessThanOrEqual
def getTriggeredAtGreaterThanOrEqual(self):
return self.triggeredAtGreaterThanOrEqual
def setTriggeredAtGreaterThanOrEqual(self, newTriggeredAtGreaterThanOrEqual):
self.triggeredAtGreaterThanOrEqual = newTriggeredAtGreaterThanOrEqual
def getTriggeredAtLessThanOrEqual(self):
return self.triggeredAtLessThanOrEqual
def setTriggeredAtLessThanOrEqual(self, newTriggeredAtLessThanOrEqual):
self.triggeredAtLessThanOrEqual = newTriggeredAtLessThanOrEqual
def getTagsLike(self):
return self.tagsLike
def setTagsLike(self, newTagsLike):
self.tagsLike = newTagsLike
def getTagsMultiLikeOr(self):
return self.tagsMultiLikeOr
def setTagsMultiLikeOr(self, newTagsMultiLikeOr):
self.tagsMultiLikeOr = newTagsMultiLikeOr
def getTagsMultiLikeAnd(self):
return self.tagsMultiLikeAnd
def setTagsMultiLikeAnd(self, newTagsMultiLikeAnd):
self.tagsMultiLikeAnd = newTagsMultiLikeAnd
def getStartTimeGreaterThanOrEqual(self):
return self.startTimeGreaterThanOrEqual
def setStartTimeGreaterThanOrEqual(self, newStartTimeGreaterThanOrEqual):
self.startTimeGreaterThanOrEqual = newStartTimeGreaterThanOrEqual
def getStartTimeLessThanOrEqual(self):
return self.startTimeLessThanOrEqual
def setStartTimeLessThanOrEqual(self, newStartTimeLessThanOrEqual):
self.startTimeLessThanOrEqual = newStartTimeLessThanOrEqual
def getUserIdEqual(self):
return self.userIdEqual
def setUserIdEqual(self, newUserIdEqual):
self.userIdEqual = newUserIdEqual
def getUserIdIn(self):
return self.userIdIn
def setUserIdIn(self, newUserIdIn):
self.userIdIn = newUserIdIn
def getPartnerSortValueEqual(self):
return self.partnerSortValueEqual
def setPartnerSortValueEqual(self, newPartnerSortValueEqual):
self.partnerSortValueEqual = newPartnerSortValueEqual
def getPartnerSortValueIn(self):
return self.partnerSortValueIn
def setPartnerSortValueIn(self, newPartnerSortValueIn):
self.partnerSortValueIn = newPartnerSortValueIn
def getPartnerSortValueGreaterThanOrEqual(self):
return self.partnerSortValueGreaterThanOrEqual
def setPartnerSortValueGreaterThanOrEqual(self, newPartnerSortValueGreaterThanOrEqual):
self.partnerSortValueGreaterThanOrEqual = newPartnerSortValueGreaterThanOrEqual
def getPartnerSortValueLessThanOrEqual(self):
return self.partnerSortValueLessThanOrEqual
def setPartnerSortValueLessThanOrEqual(self, newPartnerSortValueLessThanOrEqual):
self.partnerSortValueLessThanOrEqual = newPartnerSortValueLessThanOrEqual
def getForceStopEqual(self):
return self.forceStopEqual
def setForceStopEqual(self, newForceStopEqual):
self.forceStopEqual = newForceStopEqual
def getSystemNameEqual(self):
return self.systemNameEqual
def setSystemNameEqual(self, newSystemNameEqual):
self.systemNameEqual = newSystemNameEqual
def getSystemNameIn(self):
return self.systemNameIn
def setSystemNameIn(self, newSystemNameIn):
self.systemNameIn = newSystemNameIn
# @package Kaltura
# @subpackage Client
class KalturaCuePointFilter(KalturaCuePointBaseFilter):
def __init__(self,
orderBy=NotImplemented,
advancedSearch=NotImplemented,
idEqual=NotImplemented,
idIn=NotImplemented,
cuePointTypeEqual=NotImplemented,
cuePointTypeIn=NotImplemented,
statusEqual=NotImplemented,
statusIn=NotImplemented,
entryIdEqual=NotImplemented,
entryIdIn=NotImplemented,
createdAtGreaterThanOrEqual=NotImplemented,
createdAtLessThanOrEqual=NotImplemented,
updatedAtGreaterThanOrEqual=NotImplemented,
updatedAtLessThanOrEqual=NotImplemented,
triggeredAtGreaterThanOrEqual=NotImplemented,
triggeredAtLessThanOrEqual=NotImplemented,
tagsLike=NotImplemented,
tagsMultiLikeOr=NotImplemented,
tagsMultiLikeAnd=NotImplemented,
startTimeGreaterThanOrEqual=NotImplemented,
startTimeLessThanOrEqual=NotImplemented,
userIdEqual=NotImplemented,
userIdIn=NotImplemented,
partnerSortValueEqual=NotImplemented,
partnerSortValueIn=NotImplemented,
partnerSortValueGreaterThanOrEqual=NotImplemented,
partnerSortValueLessThanOrEqual=NotImplemented,
forceStopEqual=NotImplemented,
systemNameEqual=NotImplemented,
systemNameIn=NotImplemented,
freeText=NotImplemented,
userIdEqualCurrent=NotImplemented,
userIdCurrent=NotImplemented):
KalturaCuePointBaseFilter.__init__(self,
orderBy,
advancedSearch,
idEqual,
idIn,
cuePointTypeEqual,
cuePointTypeIn,
statusEqual,
statusIn,
entryIdEqual,
entryIdIn,
createdAtGreaterThanOrEqual,
createdAtLessThanOrEqual,
updatedAtGreaterThanOrEqual,
updatedAtLessThanOrEqual,
triggeredAtGreaterThanOrEqual,
triggeredAtLessThanOrEqual,
tagsLike,
tagsMultiLikeOr,
tagsMultiLikeAnd,
startTimeGreaterThanOrEqual,
startTimeLessThanOrEqual,
userIdEqual,
userIdIn,
partnerSortValueEqual,
partnerSortValueIn,
partnerSortValueGreaterThanOrEqual,
partnerSortValueLessThanOrEqual,
forceStopEqual,
systemNameEqual,
systemNameIn)
# @var string
self.freeText = freeText
# @var KalturaNullableBoolean
self.userIdEqualCurrent = userIdEqualCurrent
# @var KalturaNullableBoolean
self.userIdCurrent = userIdCurrent
PROPERTY_LOADERS = {
'freeText': getXmlNodeText,
'userIdEqualCurrent': (KalturaEnumsFactory.createInt, "KalturaNullableBoolean"),
'userIdCurrent': (KalturaEnumsFactory.createInt, "KalturaNullableBoolean"),
}
def fromXml(self, node):
KalturaCuePointBaseFilter.fromXml(self, node)
self.fromXmlImpl(node, KalturaCuePointFilter.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaCuePointBaseFilter.toParams(self)
kparams.put("objectType", "KalturaCuePointFilter")
kparams.addStringIfDefined("freeText", self.freeText)
kparams.addIntEnumIfDefined("userIdEqualCurrent", self.userIdEqualCurrent)
kparams.addIntEnumIfDefined("userIdCurrent", self.userIdCurrent)
return kparams
def getFreeText(self):
return self.freeText
def setFreeText(self, newFreeText):
self.freeText = newFreeText
def getUserIdEqualCurrent(self):
return self.userIdEqualCurrent
def setUserIdEqualCurrent(self, newUserIdEqualCurrent):
self.userIdEqualCurrent = newUserIdEqualCurrent
def getUserIdCurrent(self):
return self.userIdCurrent
def setUserIdCurrent(self, newUserIdCurrent):
self.userIdCurrent = newUserIdCurrent
########## services ##########
# @package Kaltura
# @subpackage Client
class KalturaCuePointService(KalturaServiceBase):
"""Cue Point service"""
def __init__(self, client = None):
KalturaServiceBase.__init__(self, client)
def add(self, cuePoint):
"""Allows you to add an cue point object associated with an entry"""
kparams = KalturaParams()
kparams.addObjectIfDefined("cuePoint", cuePoint)
self.client.queueServiceActionCall("cuepoint_cuepoint", "add", "KalturaCuePoint", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCuePoint')
def addFromBulk(self, fileData):
"""Allows you to add multiple cue points objects by uploading XML that contains multiple cue point definitions"""
kparams = KalturaParams()
kfiles = {"fileData": fileData}
self.client.queueServiceActionCall("cuepoint_cuepoint", "addFromBulk", "KalturaCuePointListResponse", kparams, kfiles)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCuePointListResponse')
def clone(self, id, entryId):
"""Clone cuePoint with id to given entry"""
kparams = KalturaParams()
kparams.addStringIfDefined("id", id)
kparams.addStringIfDefined("entryId", entryId)
self.client.queueServiceActionCall("cuepoint_cuepoint", "clone", "KalturaCuePoint", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCuePoint')
def count(self, filter = NotImplemented):
"""count cue point objects by filter"""
kparams = KalturaParams()
kparams.addObjectIfDefined("filter", filter)
self.client.queueServiceActionCall("cuepoint_cuepoint", "count", "None", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return getXmlNodeInt(resultNode)
def delete(self, id):
"""delete cue point by id, and delete all children cue points"""
kparams = KalturaParams()
kparams.addStringIfDefined("id", id)
self.client.queueServiceActionCall("cuepoint_cuepoint", "delete", "None", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
def get(self, id):
"""Retrieve an CuePoint object by id"""
kparams = KalturaParams()
kparams.addStringIfDefined("id", id)
self.client.queueServiceActionCall("cuepoint_cuepoint", "get", "KalturaCuePoint", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCuePoint')
def list(self, filter = NotImplemented, pager = NotImplemented):
"""List cue point objects by filter and pager"""
kparams = KalturaParams()
kparams.addObjectIfDefined("filter", filter)
kparams.addObjectIfDefined("pager", pager)
self.client.queueServiceActionCall("cuepoint_cuepoint", "list", "KalturaCuePointListResponse", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCuePointListResponse')
def serveBulk(self, filter = NotImplemented, pager = NotImplemented):
"""Download multiple cue points objects as XML definitions"""
kparams = KalturaParams()
kparams.addObjectIfDefined("filter", filter)
kparams.addObjectIfDefined("pager", pager)
self.client.queueServiceActionCall('cuepoint_cuepoint', 'serveBulk', None ,kparams)
return self.client.getServeUrl()
def update(self, id, cuePoint):
"""Update cue point by id"""
kparams = KalturaParams()
kparams.addStringIfDefined("id", id)
kparams.addObjectIfDefined("cuePoint", cuePoint)
self.client.queueServiceActionCall("cuepoint_cuepoint", "update", "KalturaCuePoint", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCuePoint')
def updateCuePointsTimes(self, id, startTime, endTime = NotImplemented):
kparams = KalturaParams()
kparams.addStringIfDefined("id", id)
kparams.addIntIfDefined("startTime", startTime);
kparams.addIntIfDefined("endTime", endTime);
self.client.queueServiceActionCall("cuepoint_cuepoint", "updateCuePointsTimes", "KalturaCuePoint", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCuePoint')
def updateStatus(self, id, status):
"""Update cuePoint status by id"""
kparams = KalturaParams()
kparams.addStringIfDefined("id", id)
kparams.addIntIfDefined("status", status);
self.client.queueServiceActionCall("cuepoint_cuepoint", "updateStatus", "None", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
########## main ##########
class KalturaCuePointClientPlugin(KalturaClientPlugin):
# KalturaCuePointClientPlugin
instance = None
# @return KalturaCuePointClientPlugin
@staticmethod
def get():
if KalturaCuePointClientPlugin.instance == None:
KalturaCuePointClientPlugin.instance = KalturaCuePointClientPlugin()
return KalturaCuePointClientPlugin.instance
# @return array<KalturaServiceBase>
def getServices(self):
return {
'cuePoint': KalturaCuePointService,
}
def getEnums(self):
return {
'KalturaCuePointStatus': KalturaCuePointStatus,
'KalturaQuestionType': KalturaQuestionType,
'KalturaQuizOutputType': KalturaQuizOutputType,
'KalturaScoreType': KalturaScoreType,
'KalturaThumbCuePointSubType': KalturaThumbCuePointSubType,
'KalturaCuePointOrderBy': KalturaCuePointOrderBy,
'KalturaCuePointType': KalturaCuePointType,
}
def getTypes(self):
return {
'KalturaCuePoint': KalturaCuePoint,
'KalturaCuePointListResponse': KalturaCuePointListResponse,
'KalturaCuePointBaseFilter': KalturaCuePointBaseFilter,
'KalturaCuePointFilter': KalturaCuePointFilter,
}
# @return string
def getName(self):
return 'cuePoint' | PypiClean |
/MPT5.0.1-0.1.tar.gz/MPT5.0.1-0.1/src/MPT5/Database/PostGet.py | import sqlite3
from . import wxsq as sq
from . import srcsql as ss
from Config.Init import *
class Get:
def __init__(self, DBF, Data, file):
self.DBF = DBF
self.Data = Data
if file != '':
sqlfile = DATABASE_PATH + 'sqls' + SLASH + file
#sqlfile = Src_dbf + 'sqls' + SLASH + file
self.SQLtxt = self.openSql(sqlfile)
def openSql(self, sqlfile):
with open(sqlfile) as f:
alltxt = f.readlines()
#print(alltxt)
return alltxt[0]
def GetFromDbf(self):
return sq.wxsqltxt(self.DBF, self.SQLtxt)
def GetFromDbfWithData(self):
return sq.wxsqltxt(self.DBF, self.SQLtxt + self.Data )
def GetFromString(self, string):
return sq.wxsqltxt(self.DBF, string)
def __del__(self):
pass
def __hash__(self):
pass
class Post:
def __init__(self, DBF, Tabel, Field, Data):
self.DBF = DBF
self.Tabel = Tabel
#self.Field = Field
#self.Data = Data
def Addrecord(self,Field,Data):
return sq.wxsqins(self.DBF, self.Tabel, Field, Data)
def Addrecord2(self,Field,Data):
return sq.wxsqins2(self.DBF, self.Tabel, Field, Data)
def Updaterecord(self,Field,Data):
return sq.wxsqlup(self.DBF, self.Tabel, Field, Data)
def Updaterecord2(self,Field,Data):
return sq.wxsqlup2(self.DBF, self.Tabel, Field, Data)
def Deleterecord(self,Data):
return sq.wxsqdel(self.DBF, self.Tabel, Data)
def DeleteAllrecord(self,Field):
return sq.wxsqdall(self.DBF, Field)
def __del__(self):
pass
def __hash__(self):
pass
class Get2:
def __init__(self, DBF, Data, file):
self.DBF = DBF
self.Data = Data
if file != '':
sqlfile = DATABASE_PATH + 'sqls' + SLASH + file
#sqlfile = Src_dbf + 'sqls' + SLASH + file
self.SQLtxt = self.openSql(sqlfile)
def openSql(self, sqlfile):
with open(sqlfile) as f:
alltxt = f.readlines()
#print(alltxt)
return alltxt[0]
def GetFromDbf(self):
return ss.wxsqltxt(self.DBF, self.SQLtxt)
def GetFromDbfWithData(self):
return ss.wxsqltxt(self.DBF, self.SQLtxt + self.Data )
def GetFromString(self, string):
return ss.wxsqltxt(self.DBF, string)
def GetFromString2(self, string, fields):
cur = ss.SFDB(self.DBF)
return cur.cursor.execute(string,fields)
def GetCommandStr(self, database , string):
cur = ss.MyDB_Path(database)
cur.execute(string)
def __del__(self):
pass
def __hash__(self):
pass
class Post2:
def __init__(self, DBF, Tabel, Field, Data):
self.DBF = DBF
self.Tabel = Tabel
#self.Field = Field
#self.Data = Data
def Addrecord(self,Field,Data):
return ss.wxsqins(self.DBF, self.Tabel, Field, Data)
def Addrecord2(self,Field,Data):
return ss.wxsqins2(self.DBF, self.Tabel, Field, Data)
def Addrecord3(self,Field,Data):
return ss.wxsqins3(self.DBF, self.Tabel, Field, Data)
def Updaterecord(self,Field,Data):
return ss.wxsqlup(self.DBF, self.Tabel, Field, Data)
def Updaterecord2(self,Field,Data):
return ss.wxsqlup2(self.DBF, self.Tabel, Field, Data)
def Deleterecord(self,Data):
return ss.wxsqdel(self.DBF, self.Tabel, Data)
def DeleteAllrecord(self,Field):
return ss.wxsqdall(self.DBF, Field)
def __del__(self):
pass
def __hash__(self):
pass | PypiClean |
/Mopidy-Muse-0.0.27.tar.gz/Mopidy-Muse-0.0.27/mopidy_muse/static/client/legacy/client.9a1e6cd3.js | import"core-js/modules/es.array.iterator.js";import"core-js/modules/es.object.to-string.js";import"core-js/modules/es.promise.js";import"core-js/modules/es.string.iterator.js";import"core-js/modules/web.dom-collections.iterator.js";import"core-js/modules/es.array.map.js";import"core-js/modules/web.url.js";import"./client.4760e5ea.js";import"core-js/modules/es.symbol.js";import"core-js/modules/es.symbol.description.js";import"core-js/modules/es.symbol.iterator.js";import"core-js/modules/es.symbol.async-iterator.js";import"core-js/modules/es.symbol.to-string-tag.js";import"core-js/modules/es.json.to-string-tag.js";import"core-js/modules/es.math.to-string-tag.js";import"core-js/modules/es.object.get-prototype-of.js";import"core-js/modules/es.function.name.js";import"core-js/modules/es.object.set-prototype-of.js";import"core-js/modules/web.dom-collections.for-each.js";import"core-js/modules/es.array.slice.js";import"core-js/modules/es.regexp.exec.js";import"core-js/modules/es.string.split.js";import"core-js/modules/es.string.replace.js";import"core-js/modules/es.string.starts-with.js";import"core-js/modules/es.string.search.js";import"core-js/modules/es.object.assign.js";import"core-js/modules/es.string.match.js";import"core-js/modules/es.array.filter.js";import"core-js/modules/es.regexp.to-string.js";import"core-js/modules/es.array.reduce.js";import"core-js/modules/es.reflect.construct.js";import"core-js/modules/es.object.keys.js";import"core-js/modules/es.set.js";import"core-js/modules/es.array.concat.js";import"core-js/modules/es.typed-array.int32-array.js";import"core-js/modules/es.typed-array.copy-within.js";import"core-js/modules/es.typed-array.every.js";import"core-js/modules/es.typed-array.fill.js";import"core-js/modules/es.typed-array.filter.js";import"core-js/modules/es.typed-array.find.js";import"core-js/modules/es.typed-array.find-index.js";import"core-js/modules/es.typed-array.for-each.js";import"core-js/modules/es.typed-array.includes.js";import"core-js/modules/es.typed-array.index-of.js";import"core-js/modules/es.typed-array.iterator.js";import"core-js/modules/es.typed-array.join.js";import"core-js/modules/es.typed-array.last-index-of.js";import"core-js/modules/es.typed-array.map.js";import"core-js/modules/es.typed-array.reduce.js";import"core-js/modules/es.typed-array.reduce-right.js";import"core-js/modules/es.typed-array.reverse.js";import"core-js/modules/es.typed-array.set.js";import"core-js/modules/es.typed-array.slice.js";import"core-js/modules/es.typed-array.some.js";import"core-js/modules/es.typed-array.sort.js";import"core-js/modules/es.typed-array.subarray.js";import"core-js/modules/es.typed-array.to-locale-string.js";import"core-js/modules/es.typed-array.to-string.js";import"core-js/modules/es.array.sort.js";import"core-js/modules/es.array.index-of.js";import"core-js/modules/es.object.get-own-property-descriptors.js";import"core-js/modules/es.array.from.js";import"core-js/modules/es.array.splice.js";import"core-js/modules/es.string.trim.js";import"core-js/modules/es.array.join.js";import"core-js/modules/es.string.anchor.js";import"core-js/modules/es.map.js";import"core-js/modules/es.array.fill.js";import"core-js/modules/es.parse-float.js";import"core-js/modules/es.string.pad-start.js";import"core-js/modules/es.object.values.js";import"core-js/modules/es.array.find.js";import"core-js/modules/es.object.get-own-property-descriptor.js";import"core-js/modules/es.reflect.own-keys.js"; | PypiClean |
/Corrfunc-2.5.1.tar.gz/Corrfunc-2.5.1/docs/source/development/contributing.rst | .. _contributing:
=========================
Contributing to Corrfunc
=========================
Corrfunc is written in a very modular fashion with minimal interaction between
the various calculations. The algorithm presented in Corrfunc is applicable to
a broad-range of astrophysical problems, viz., any situation that requires
looking at *all* objects around a target and performing some analysis with
this group of objects.
Here are the basic steps to get your statistic into the Corrfunc package:
* Fork the repo and add your statistic
* Add exhaustive tests. The output of your statistic should **exactly** agree with a
brute-force implementation (under double-precision). Look at ``test_periodic.c`` and ``test_nonperiodic.c``
under ``theory/tests/`` for tests on simulation volumes. For mock
catalogs, look at ``mocks/tests/tests_mocks.c``.
* Add a python extension for the new statistic. This extension should reside in file
``theory/python_bindings/_countpairs.c`` or
``mocks/python_bindings/_countpairs_mocks.c`` for statistics relevant for
simulations and mocks respectively. It is preferred to have the extension
documented but not necessary.
* Add a call to this new *extension* in the
``python_bindings/call_correlation_functions*.py`` script.
.. note:: Different from corresponding script in ``Corrfunc/`` directory.
* Add a python wrapper for the previous python extension. This wrapper should
exist in ``Corrfunc/theory/`` or ``Corrfunc/mocks/``. Wrapper **must** have
inline API docs.
* Add the new wrapper to ``__all__`` in ``__init__.py`` within the relevant
directory.
* Add an example call to this *wrapper* in
``Corrfunc/call_correlation_functions.py`` or
``Corrfunc/call_correlation_functions_mocks.py`` for simulations and mocks
respectively.
.. note:: Different from corresponding script in ``python_bindings`` directory.
* Add the new wrapper to the API docs within
``ROOT_DIR/docs/source/theory_functions.rst`` or
``ROOT_DIR/docs/source/mocks_functions.rst``.
* Add to the contributors list under
``ROOT_DIR/docs/source/development/contributors.rst``.
* Submit pull request
.. note:: Please feel free to email the `author <mailto:[email protected]>`_ or
the `Corrfunc Google Groups <https://groups.google.com/forum/#!forum/corrfunc>`_ if you need help at any stage.
Corrfunc Design
~~~~~~~~~~~~~~~~
All of the algorithms in Corrfunc have the following components:
* Reading in data. Relevant routines are in the ``io/`` directory with a
mapping within ``io.c`` to handle the file format
* Creating the 3-D lattice structure. Relevant routines are in the
``utils/gridlink_impl.c.src`` and ``utils/gridlink_mocks.c.src``. This
lattice grids up the particle distribution on cell-sizes of ``rmax`` (the
maximum search radius).
.. note:: The current lattice code duplicates the particle memory. If you
need a lattice that does not duplicate the particle memory, then please email
the `author <mailto:[email protected]>`_. Relevant code existed in Corrfunc
but has been removed in the current incarnation.
* Setting up the OpenMP sections such that threads have local copies of
histogram arrays. If OpenMP is not enabled, then this section should not
produce any compilable code.
* Looping over all cells in the 3-D lattice and then looping over all
neighbouring cells for each cell.
* For a pair of cells, hand over the two sets of arrays into a specialized
kernel (``count*kernel.c.src``) for computing pairs.
* Aggregate the results, if OpenMP was enabled.
Directory and file layout
~~~~~~~~~~~~~~~~~~~~~~~~~~
* Codes that compute statistics on simulation volumes (Cartesian XYZ as input)
go into a separate directory within ``theory``
* Codes that compute statistics on mock catalogs (RA, DEC [CZ]) go into a
separate directory within ``mocks``
* Public API in a ``count*.h`` file. Corresponding C file simply dispatches to
appropriate floating point implementation.
* Floating point implmentation in file ``count*_impl.c.src``. This file is
processed via ``sed`` to generate both single and double precision
implementations.
* A kernel named ``count*kernels.c.src`` containing implementations for
counting pairs on two sets of arrays. This kernel file is also preprocessed
to produce both the single and double precision kernels.
* Tests go within ``tests`` directory under ``theory`` or ``mocks``, as
appropriate. For simulation routines, tests with and without periodic
boundaries go into ``test_periodic.c`` and ``test_nonperiodic.c``
* C code to generate the python extensions goes under ``python_bindings``
directory into the file ``_countpairs*.c``
* Each python extension has a python wrapper within ``Corrfunc`` directory
Coding Guidelines
~~~~~~~~~~~~~~~~~
C guidelines
============
Code contents
-------------
* **Always** check for error conditions when calling a function
* If an error condition occurs when making an kernel/external library call,
first call ``perror`` and then return the error status. If calling a wrapper
from within Corrfunc, assume that ``perror`` has already been called and
simply return the status. Clean up memory before returning status.
* Declare variables in the smallest possible scope.
* Add ``const`` qualifiers liberally
* There **must** not be any compiler warnings (with ``gcc6.0``) under the given set of Warnings
already enabled within ``common.mk``. If the warning can not be avoided
because of logic issues, then suppress the warning but note why that
suppression is required. Warnings are treated as errors on the continuous integration platform (TRAVIS)
* Valgrind should not report any fixable memory or file leaks (memory
leaks in OpenMP library, e.g., ``libgomp``, are fine)
Style
------
The coding style is loosely based on `Linux Kernel Guideline
<https://www.kernel.org/doc/Documentation/CodingStyle>`_. These are recommended
but not strictly enforced. However, note that if you do contribute code to
Corrfunc, the style may get converted.
* Braces
- Opening braces start at the same line, except for functions
- Closing braces on new line
- Even single line conditionals must have opening and closing braces
* Comments
- Explanatory comments on top of code segment enclosed with ``/**/``
- Inline comments must be single-line on the right
* Indentation is ``tab:=4 spaces``
* Avoid ``typedef`` for ``structs`` and ``unions``
Python guidelines
=================
* Follow the `astropy python code guide <http://docs.astropy.org/en/stable/development/codeguide_emacs.html>`_
* Docs are in ``numpydocs`` format. Follow any of the wrapper routines in
``Corrfunc`` (which are, in turn, taken from `halotools <http://halotools.readthedocs.io/>`_)
| PypiClean |
/BlackLager-1.0.14.tar.gz/BlackLager-1.0.14/blackLager/textwindow.py | from client_utils import error_handler
import curses
from datetime import datetime
import traceback
class TextWindow(object):
def __init__(self, name, rows, columns, y1, x1, y2, x2, ShowBorder, BorderColor, TitleColor, stdscr):
self.name = name
self.rows = rows
self.columns = columns
self.y1 = y1
self.x1 = x1
self.y2 = y2
self.x2 = x2
self.ShowBorder = ShowBorder
self.BorderColor = BorderColor # pre defined text colors 1-7
self.TextWindow = curses.newwin(
self.rows, self.columns, self.y1, self.x1)
self.CurrentRow = 1
self.StartColumn = 1
# we will modify this later, based on if we show borders or not
self.DisplayRows = self.rows
# we will modify this later, based on if we show borders or not
self.DisplayColumns = self.columns
self.PreviousLineText = ""
self.PreviousLineRow = 0
self.PreviousLineColor = 2
self.Title = ""
self.TitleColor = TitleColor
self.stdscr = stdscr
# If we are showing border, we only print inside the lines
if (self.ShowBorder == 'Y'):
self.CurrentRow = 1
self.StartColumn = 1
self.DisplayRows = self.rows - 2 # we don't want to print over the border
# we don't want to print over the border
self.DisplayColumns = self.columns - 2
self.TextWindow.attron(curses.color_pair(BorderColor))
self.TextWindow.border()
self.TextWindow.attroff(curses.color_pair(BorderColor))
self.TextWindow.refresh()
else:
self.CurrentRow = 0
self.StartColumn = 0
def scroll_print(self, PrintLine, Color=2, TimeStamp=False, BoldLine=True):
# print(PrintLine)
# for now the string is printed in the window and the current row is incremented
# when the counter reaches the end of the window, we will wrap around to the top
# we don't print on the window border
# make sure to pad the new string with spaces to overwrite any old text
current_time = datetime.now().strftime("%H:%M:%S")
if (TimeStamp):
PrintLine = current_time + ": {}".format(PrintLine)
# expand tabs to X spaces, pad the string with space
PrintLine = PrintLine.expandtabs(4)
# adjust strings
# Get a part of the big string that will fit in the window
PrintableString = PrintLine[0:self.DisplayColumns]
RemainingString = PrintLine[self.DisplayColumns+1:]
try:
while (len(PrintableString) > 0):
# padd with spaces
PrintableString = PrintableString.ljust(
self.DisplayColumns, ' ')
# if (self.rows == 1):
# #if you print on the last character of a window you get an error
# PrintableString = PrintableString[0:-2]
# self.TextWindow.addstr(0,0,PrintableString)
# else:
# unbold Previous line
self.TextWindow.attron(
curses.color_pair(self.PreviousLineColor))
self.TextWindow.addstr(
self.PreviousLineRow, self.StartColumn, self.PreviousLineText)
self.TextWindow.attroff(
curses.color_pair(self.PreviousLineColor))
if BoldLine:
# A_NORMAL Normal display (no highlight)
# A_STANDOUT Best highlighting mode of the terminal
# A_UNDERLINE Underlining
# A_REVERSE Reverse video
# A_BLINK Blinking
# A_DIM Half bright
# A_BOLD Extra bright or bold
# A_PROTECT Protected mode
# A_INVIS Invisible or blank mode
# A_ALTCHARSET Alternate character set
# A_CHARTEXT Bit-mask to extract a character
# COLOR_PAIR(n) Color-pair number n
# print new line in bold
self.TextWindow.attron(curses.color_pair(Color))
self.TextWindow.addstr(
self.CurrentRow, self.StartColumn, PrintableString, curses.A_BOLD)
self.TextWindow.attroff(curses.color_pair(Color))
else:
# print new line in Regular
self.TextWindow.attron(curses.color_pair(Color))
self.TextWindow.addstr(
self.CurrentRow, self.StartColumn, PrintableString)
self.TextWindow.attroff(curses.color_pair(Color))
self.PreviousLineText = PrintableString
self.PreviousLineColor = Color
self.PreviousLineRow = self.CurrentRow
self.CurrentRow = self.CurrentRow + 1
# Adjust strings
PrintableString = RemainingString[0:self.DisplayColumns]
RemainingString = RemainingString[self.DisplayColumns:]
if (self.CurrentRow > (self.DisplayRows)):
if (self.ShowBorder == 'Y'):
self.CurrentRow = 1
else:
self.CurrentRow = 0
# erase to end of line
# self.TextWindow.clrtoeol()
self.TextWindow.refresh()
except Exception as ErrorMessage:
TraceMessage = traceback.format_exc()
AdditionalInfo = "PrintLine: {}".format(PrintLine)
error_handler(ErrorMessage, TraceMessage, AdditionalInfo, self.stdscr)
def window_print(self, y, x, PrintLine, Color=2):
# print at a specific coordinate within the window
# try:
# expand tabs to X spaces, pad the string with space then truncate
PrintLine = PrintLine.expandtabs(4)
# pad the print line with spaces then truncate at the display length
PrintLine = PrintLine.ljust(self.DisplayColumns - 1)
PrintLine = PrintLine[0:self.DisplayColumns - x]
self.TextWindow.attron(curses.color_pair(Color))
self.TextWindow.addstr(y, x, PrintLine)
self.TextWindow.attroff(curses.color_pair(Color))
self.TextWindow.refresh()
def display_title(self):
# display the window title
title = ''
try:
# expand tabs to X spaces, pad the string with space then truncate
title = self.Title[0:self.DisplayColumns-3]
self.TextWindow.attron(curses.color_pair(self.TitleColor))
if (self.rows > 2):
# print new line in bold
self.TextWindow.addstr(0, 2, title)
else:
print("ERROR - You cannot display title on a window smaller than 3 rows")
self.TextWindow.attroff(curses.color_pair(self.TitleColor))
self.TextWindow.refresh()
except Exception as ErrorMessage:
TraceMessage = traceback.format_exc()
AdditionalInfo = "Title: " + title
error_handler(ErrorMessage, TraceMessage, AdditionalInfo, self.stdscr)
def clear(self):
self.TextWindow.erase()
self.TextWindow.attron(curses.color_pair(self.BorderColor))
self.TextWindow.border()
self.TextWindow.attroff(curses.color_pair(self.BorderColor))
self.display_title()
if self.ShowBorder == 'Y':
self.CurrentRow = 1
self.StartColumn = 1
else:
self.CurrentRow = 0
self.StartColumn = 0 | PypiClean |
/DJModels-0.0.6-py3-none-any.whl/djmodels/contrib/gis/feeds.py | from djmodels.contrib.syndication.views import Feed as BaseFeed
from djmodels.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
class GeoFeedMixin:
"""
This mixin provides the necessary routines for SyndicationFeed subclasses
to produce simple GeoRSS or W3C Geo elements.
"""
def georss_coords(self, coords):
"""
In GeoRSS coordinate pairs are ordered by lat/lon and separated by
a single white space. Given a tuple of coordinates, return a string
GeoRSS representation.
"""
return ' '.join('%f %f' % (coord[1], coord[0]) for coord in coords)
def add_georss_point(self, handler, coords, w3c_geo=False):
"""
Adds a GeoRSS point with the given coords using the given handler.
Handles the differences between simple GeoRSS and the more popular
W3C Geo specification.
"""
if w3c_geo:
lon, lat = coords[:2]
handler.addQuickElement('geo:lat', '%f' % lat)
handler.addQuickElement('geo:lon', '%f' % lon)
else:
handler.addQuickElement('georss:point', self.georss_coords((coords,)))
def add_georss_element(self, handler, item, w3c_geo=False):
"""Add a GeoRSS XML element using the given item and handler."""
# Getting the Geometry object.
geom = item.get('geometry')
if geom is not None:
if isinstance(geom, (list, tuple)):
# Special case if a tuple/list was passed in. The tuple may be
# a point or a box
box_coords = None
if isinstance(geom[0], (list, tuple)):
# Box: ( (X0, Y0), (X1, Y1) )
if len(geom) == 2:
box_coords = geom
else:
raise ValueError('Only should be two sets of coordinates.')
else:
if len(geom) == 2:
# Point: (X, Y)
self.add_georss_point(handler, geom, w3c_geo=w3c_geo)
elif len(geom) == 4:
# Box: (X0, Y0, X1, Y1)
box_coords = (geom[:2], geom[2:])
else:
raise ValueError('Only should be 2 or 4 numeric elements.')
# If a GeoRSS box was given via tuple.
if box_coords is not None:
if w3c_geo:
raise ValueError('Cannot use simple GeoRSS box in W3C Geo feeds.')
handler.addQuickElement('georss:box', self.georss_coords(box_coords))
else:
# Getting the lower-case geometry type.
gtype = str(geom.geom_type).lower()
if gtype == 'point':
self.add_georss_point(handler, geom.coords, w3c_geo=w3c_geo)
else:
if w3c_geo:
raise ValueError('W3C Geo only supports Point geometries.')
# For formatting consistent w/the GeoRSS simple standard:
# http://georss.org/1.0#simple
if gtype in ('linestring', 'linearring'):
handler.addQuickElement('georss:line', self.georss_coords(geom.coords))
elif gtype in ('polygon',):
# Only support the exterior ring.
handler.addQuickElement('georss:polygon', self.georss_coords(geom[0].coords))
else:
raise ValueError('Geometry type "%s" not supported.' % geom.geom_type)
# ### SyndicationFeed subclasses ###
class GeoRSSFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super().rss_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super().add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class GeoAtom1Feed(Atom1Feed, GeoFeedMixin):
def root_attributes(self):
attrs = super().root_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super().add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class W3CGeoFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super().rss_attributes()
attrs['xmlns:geo'] = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
self.add_georss_element(handler, item, w3c_geo=True)
def add_root_elements(self, handler):
super().add_root_elements(handler)
self.add_georss_element(handler, self.feed, w3c_geo=True)
# ### Feed subclass ###
class Feed(BaseFeed):
"""
This is a subclass of the `Feed` from `djmodels.contrib.syndication`.
This allows users to define a `geometry(obj)` and/or `item_geometry(item)`
methods on their own subclasses so that geo-referenced information may
placed in the feed.
"""
feed_type = GeoRSSFeed
def feed_extra_kwargs(self, obj):
return {'geometry': self._get_dynamic_attr('geometry', obj)}
def item_extra_kwargs(self, item):
return {'geometry': self._get_dynamic_attr('item_geometry', item)} | PypiClean |
/Natume-0.1.0.tar.gz/Natume-0.1.0/natume/client.py | import urllib2
import base64
from natume.connection import HTTPConnection, HTTPSConnection, urlsplit, urljoin, urlencode
from natume.util import decompress
from natume.compat import SimpleCookie
from json import loads
class WebClient(object):
DEFAULT_HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.114 Safari/537.36',
'Accept-Encoding': 'gzip',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
}
def __init__(self, url, headers=None, auth=None, ca=None):
scheme, uri, path, query, fragment = urlsplit(url)
http_class = scheme == 'http' and HTTPConnection or HTTPSConnection
self.connection = http_class(uri)
self.default_headers = self.DEFAULT_HEADERS.copy()
if headers:
self.default_headers.update(headers)
self.path = path
self.headers = {}
self.cookies = {}
self.etags = {}
self.status_code = 0
self.body = None
self.__content = None
self.__json = None
self.auth = auth
# todo: add ca handle
#self.ca = ca
def ajax(self, method, path, **kwargs):
""" GET HTTP AJAX request."""
headers = headers or {}
headers['X-Requested-With'] = 'XMLHttpRequest'
return self.do_request(method, path, headers=headers, **kwargs)
def get(self, path, **kwargs):
""" GET HTTP request."""
return self.do_request('GET', path, **kwargs)
def head(self, path, **kwargs):
""" HEAD HTTP request."""
return self.do_request('HEAD', path, **kwargs)
def post(self, path, **kwargs):
""" POST HTTP request."""
return self.do_request('POST', path, **kwargs)
def follow(self):
sc = self.status_code
assert sc in [207, 301, 302, 303, 307]
location = self.headers['location'][0]
scheme, netloc, path, query, fragment = urlsplit(location)
method = sc == 307 and self.method or 'GET'
return self.do_request(method, path)
def do_request(self, method, path, payload=None, headers={}, auth=None):
headers = self.default_headers.copy()
headers.update(headers)
auth = auth or self.auth
if auth:
self.handle_auth_header(auth[0], auth[1])
if self.cookies:
headers['Cookie'] = '; '.join(
'%s=%s' % cookie for cookie in self.cookies.items())
path = urljoin(self.path, path)
if path in self.etags:
headers['If-None-Match'] = self.etags[path]
body = ''
if payload:
if method == 'GET':
path += '?' + urlencode(payload, doseq=True)
else:
body = urlencode(payload, doseq=True)
headers['Content-Type'] = 'application/x-www-form-urlencoded'
self.status_code = 0
self.body = None
self.__content = None
self.__json = None
self.connection.connect()
self.connection.request(method, path, body, headers)
r = self.connection.getresponse()
self.body = r.read()
self.connection.close()
self.status_code = r.status
self.headers = {}
for name, value in r.getheaders():
self.headers[name] = value
self.handle_content_encoding()
self.handle_etag(path)
self.handle_cookies()
return self.status_code
def handle_auth_header(self, username, password):
auth_base64 = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
self.heades.add_header("Authorization", "Basic %s" % base64string)
def handle_content_encoding(self):
if 'content-encoding' in self.headers \
and 'gzip' in self.headers['content-encoding']:
self.body = decompress(self.body)
def handle_etag(self, path):
"""Etags process"""
if 'etag' in self.headers:
self.etags[path] = self.headers['etag'][-1]
def handle_cookies(self):
if 'set-cookie' in self.headers:
cookie_string = self.headers['set-cookie']
cookies = SimpleCookie(cookie_string)
for name in cookies:
value = cookies[name].value
if value:
self.cookies[name] = value
elif name in self.cookies:
del self.cookies[name]
def clear_cookies(self):
"""Clear cookies"""
self.cookies = {}
@property
def content(self):
""" Returns a content of the response.
"""
if self.__content is None:
self.__content = self.body.decode('utf-8')
return self.__content
@property
def json(self):
""" Returns a json response."""
assert 'application/json' in self.headers['content-type']
if self.__json is None:
self.__json = loads(self.body)
return self.__json
def show(self):
"""Opens the current page in real web browser."""
with open('page.html', 'w') as fp:
fp.write(self.body)
import webbrowser
import os
url = 'file://' + os.path.abspath('page.html')
webbrowser.open(url)
def get_header(self, key):
key = key.replace('_', '-')
if key in self.headers:
return self.headers[key]
@property
def content_type(self):
"""Get Content type"""
value = self.get_header('content-type')
c = value.split(';')
return c[0]
@property
def charset(self):
"""Get http chaset encoding"""
value = self.get_header('content-type')
c = value.split(';')
if len(c) == 2:
return c[1].split('=')[1] | PypiClean |
/DoorPi-2.4.1.8.tar.gz/DoorPi-2.4.1.8/doorpi/status/webserver_lib/session_handler.py |
import logging
logger = logging.getLogger(__name__)
logger.debug("%s loaded", __name__)
import time # session timestamp
from doorpi.action.base import SingleAction
import doorpi
CONF_AREA_PREFIX = 'AREA_'
class SessionHandler:
_Sessions = {}
@property
def config(self): return doorpi.DoorPi().config
@property
def session_ids(self): return self._Sessions.keys()
@property
def sessions(self): return self._Sessions
def __init__(self):
doorpi.DoorPi().event_handler.register_event('WebServerCreateNewSession', __name__)
doorpi.DoorPi().event_handler.register_event('WebServerAuthUnknownUser', __name__)
doorpi.DoorPi().event_handler.register_event('WebServerAuthWrongPassword', __name__)
def destroy(self):
doorpi.DoorPi().event_handler.unregister_source(__name__, True)
__del__ = destroy
def get_session(self, session_id):
if session_id in self._Sessions:
logger.trace('session %s found: %s', session_id, self._Sessions[session_id])
return self._Sessions[session_id]
else:
logger.trace('no session with session id %s found', session_id)
return None
__call__ = get_session
def exists_session(self, session_id):
return session_id in self._Sessions
def build_security_object(self, username, password, remote_client = ''):
if not len(self.config.get_keys('User')):
self.config.set_value(section = 'User', key = 'door', value = 'pi', password = True)
self.config.set_value(section = 'Group', key = 'administrator', value = 'door')
self.config.set_value(section = 'WritePermission', key = 'administrator', value = 'installer')
self.config.set_value(section = 'AREA_installer', key = '.*', value = '')
groups_with_write_permissions = self.config.get_keys('WritePermission')
groups_with_read_permissions = self.config.get_keys('ReadPermission')
groups = self.config.get_keys('Group')
users = self.config.get_keys('User')
if not username in users:
doorpi.DoorPi().event_handler('WebServerAuthUnknownUser', __name__, {
'username': username,
'remote_client': remote_client
})
return None
real_password = self.config.get('User', username, password = True)
if real_password != password:
doorpi.DoorPi().event_handler('WebServerAuthWrongPassword', __name__, {
'username': username,
'password': password,
'remote_client': remote_client
})
return None
web_session = dict(
username = username,
remote_client = remote_client,
session_starttime = time.time(),
readpermissions = [],
writepermissions = [],
groups = []
)
for group in groups:
users_in_group = self.config.get_list('Group', group)
if username in users_in_group: web_session['groups'].append(group)
for group in groups_with_read_permissions:
if group in web_session['groups']:
modules = self.config.get_list('ReadPermission', group)
for modul in modules:
web_session['readpermissions'].extend(
self.config.get_keys(CONF_AREA_PREFIX+modul)
)
for group in groups_with_write_permissions:
if group in web_session['groups']:
modules = self.config.get_list('WritePermission', group)
for modul in modules:
web_session['writepermissions'].extend(
self.config.get_keys(CONF_AREA_PREFIX+modul)
)
web_session['readpermissions'].extend(
self.config.get_keys(CONF_AREA_PREFIX+modul)
)
web_session['readpermissions'] = list(set(web_session['readpermissions']))
web_session['readpermissions'].sort()
web_session['writepermissions'] = list(set(web_session['writepermissions']))
web_session['writepermissions'].sort()
doorpi.DoorPi().event_handler('WebServerCreateNewSession', __name__, {
'session': web_session
})
self._Sessions[web_session['username']] = web_session
return web_session | PypiClean |
/Mathics3-6.0.2.tar.gz/Mathics3-6.0.2/README.rst | Welcome to Mathics Core!
========================
|Pypi Installs| |Latest Version| |Supported Python Versions| |SlackStatus|_
|Packaging status|
Mathics is a general-purpose computer algebra system (CAS).
However this repository contains just the Python modules for WL Built-in functions, variables, core primitives, e.g. Symbol, a parser to create Expressions, and an evaluator to execute them.
The home page for Mathics is https://mathics.org where you will find a list of screenshots and components making up the system.
Installing
----------
Installing locally, requires a number of dependencies and OS package dependencies.
See the `Installing Mathics <https://mathics-development-guide.readthedocs.io/en/latest/installing.html>`_ for instructions on installing Mathics3.
Running:
--------
Mathics3, the core library comes with a very simple command-line program called ``mathics``::
$ mathics
Mathics 5.0.3dev0
on CPython 3.8.12 (heads/v2.3.4.1_release:4a6b4d3504, Jun 3 2022, 15:46:12)
using SymPy 1.10.1, mpmath 1.2.1, numpy 1.23.1, cython 0.29.30
Copyright (C) 2011-2022 The Mathics Team.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions.
See the documentation for the full license.
Quit by evaluating Quit[] or by pressing CONTROL-D.
In[1]:=
Type ``mathics --help`` for options that can be provided.
For a more featureful frontend, see `mathicsscript
<https://pypi.org/project/mathicsscript/>`_.
For a Django front-end based web front-end see `<https://pypi.org/project/Mathics-Django/>`_.
Contributing
------------
Please feel encouraged to contribute to Mathics! Create your own fork, make the desired changes, commit, and make a pull request.
License
-------
Mathics is released under the GNU General Public License Version 3 (GPL3).
.. |SlackStatus| image:: https://mathics-slackin.herokuapp.com/badge.svg
.. _SlackStatus: https://mathics-slackin.herokuapp.com/
.. |Travis| image:: https://secure.travis-ci.org/Mathics3/mathics-core.svg?branch=master
.. _Travis: https://travis-ci.org/Mathics3/mathics-core
.. _PyPI: https://pypi.org/project/Mathics/
.. |mathicsscript| image:: https://github.com/Mathics3/mathicsscript/blob/master/screenshots/mathicsscript1.gif
.. |mathicssserver| image:: https://mathics.org/images/mathicsserver.png
.. |Latest Version| image:: https://badge.fury.io/py/Mathics3.svg
:target: https://badge.fury.io/py/Mathics3
.. |Pypi Installs| image:: https://pepy.tech/badge/Mathics3
.. |Supported Python Versions| image:: https://img.shields.io/pypi/pyversions/Mathics3.svg
.. |Packaging status| image:: https://repology.org/badge/vertical-allrepos/mathics.svg
:target: https://repology.org/project/mathics/versions
| PypiClean |
/Babel-2.12.1.tar.gz/Babel-2.12.1/docs/support.rst | .. -*- mode: rst; encoding: utf-8 -*-
=============================
Support Classes and Functions
=============================
The ``babel.support`` modules contains a number of classes and functions that
can help with integrating Babel, and internationalization in general, into your
application or framework. The code in this module is not used by Babel itself,
but instead is provided to address common requirements of applications that
should handle internationalization.
---------------
Lazy Evaluation
---------------
One such requirement is lazy evaluation of translations. Many web-based
applications define some localizable message at the module level, or in general
at some level where the locale of the remote user is not yet known. For such
cases, web frameworks generally provide a "lazy" variant of the ``gettext``
functions, which basically translates the message not when the ``gettext``
function is invoked, but when the string is accessed in some manner.
---------------------------
Extended Translations Class
---------------------------
Many web-based applications are composed of a variety of different components
(possibly using some kind of plugin system), and some of those components may
provide their own message catalogs that need to be integrated into the larger
system.
To support this usage pattern, Babel provides a ``Translations`` class that is
derived from the ``GNUTranslations`` class in the ``gettext`` module. This
class adds a ``merge()`` method that takes another ``Translations`` instance,
and merges the content of the latter into the main catalog:
.. code-block:: python
translations = Translations.load('main')
translations.merge(Translations.load('plugin1'))
| PypiClean |
/Bis-Miner-3.11.1.tar.gz/Bis-Miner-3.11.0/Orange/widgets/data/owpurgedomain.py | from AnyQt.QtCore import Qt
from Orange.data import Table
from Orange.preprocess.remove import Remove
from Orange.widgets import gui, widget
from Orange.widgets.settings import Setting
from Orange.widgets.utils.sql import check_sql_input
from Orange.widgets.widget import Input, Output
class OWPurgeDomain(widget.OWWidget):
name = "清理特征域"
description = "从数据集中去除冗余的数值和特征;给数值排序"
icon = "icons/PurgeDomain.svg"
category = "Data"
keywords = ["data", "purge", "domain"]
class Inputs:
data = Input("数据", Table)
class Outputs:
data = Output("数据", Table)
removeValues = Setting(1)
removeAttributes = Setting(1)
removeClasses = Setting(1)
removeClassAttribute = Setting(1)
removeMetaAttributeValues = Setting(1)
removeMetaAttributes = Setting(1)
autoSend = Setting(True)
sortValues = Setting(True)
sortClasses = Setting(True)
want_main_area = False
resizing_enabled = False
buttons_area_orientation = Qt.Vertical
feature_options = (('sortValues', 'Sort categorical feature values'),
('removeValues', 'Remove unused feature values'),
('removeAttributes', 'Remove constant features'))
class_options = (('sortClasses', 'Sort categorical class values'),
('removeClasses', 'Remove unused class variable values'),
('removeClassAttribute', 'Remove constant class variables'))
meta_options = (('removeMetaAttributeValues', 'Remove unused meta attribute values'),
('removeMetaAttributes', 'Remove constant meta attributes'))
stat_labels = (('Sorted features', 'resortedAttrs'),
('Reduced features', 'reducedAttrs'),
('Removed features', 'removedAttrs'),
('Sorted classes', 'resortedClasses'),
('Reduced classes', 'reducedClasses'),
('Removed classes', 'removedClasses'),
('Reduced metas', 'reducedMetas'),
('Removed metas', 'removedMetas'))
def __init__(self):
super().__init__()
self.data = None
self.removedAttrs = "-"
self.reducedAttrs = "-"
self.resortedAttrs = "-"
self.removedClasses = "-"
self.reducedClasses = "-"
self.resortedClasses = "-"
self.removedMetas = "-"
self.reducedMetas = "-"
boxAt = gui.vBox(self.controlArea, "Features")
for not_first, (value, label) in enumerate(self.feature_options):
if not_first:
gui.separator(boxAt, 2)
gui.checkBox(boxAt, self, value, label,
callback=self.optionsChanged)
boxAt = gui.vBox(self.controlArea, "Classes", addSpace=True)
for not_first, (value, label) in enumerate(self.class_options):
if not_first:
gui.separator(boxAt, 2)
gui.checkBox(boxAt, self, value, label,
callback=self.optionsChanged)
boxAt = gui.vBox(self.controlArea, "Meta attributes", addSpace=True)
for not_first, (value, label) in enumerate(self.meta_options):
if not_first:
gui.separator(boxAt, 2)
gui.checkBox(boxAt, self, value, label,
callback=self.optionsChanged)
box3 = gui.vBox(self.controlArea, 'Statistics', addSpace=True)
for i, (label, value) in enumerate(self.stat_labels):
# add a separator after each group of three
if i != 0 and i % 3 == 0:
gui.separator(box3, 2)
gui.label(box3, self, "{}: %({})s".format(label, value))
gui.auto_commit(self.buttonsArea, self, "autoSend", "Apply",
orientation=Qt.Horizontal)
gui.rubber(self.controlArea)
@Inputs.data
@check_sql_input
def setData(self, dataset):
if dataset is not None:
self.data = dataset
self.unconditional_commit()
else:
self.removedAttrs = "-"
self.reducedAttrs = "-"
self.resortedAttrs = "-"
self.removedClasses = "-"
self.reducedClasses = "-"
self.resortedClasses = "-"
self.removedMetas = "-"
self.reducedMetas = "-"
self.Outputs.data.send(None)
self.data = None
def optionsChanged(self):
self.commit()
def commit(self):
if self.data is None:
return
attr_flags = sum([Remove.SortValues * self.sortValues,
Remove.RemoveConstant * self.removeAttributes,
Remove.RemoveUnusedValues * self.removeValues])
class_flags = sum([Remove.SortValues * self.sortClasses,
Remove.RemoveConstant * self.removeClassAttribute,
Remove.RemoveUnusedValues * self.removeClasses])
meta_flags = sum([Remove.RemoveConstant * self.removeMetaAttributes,
Remove.RemoveUnusedValues * self.removeMetaAttributeValues])
remover = Remove(attr_flags, class_flags, meta_flags)
data = remover(self.data)
attr_res, class_res, meta_res = \
remover.attr_results, remover.class_results, remover.meta_results
self.removedAttrs = attr_res['removed']
self.reducedAttrs = attr_res['reduced']
self.resortedAttrs = attr_res['sorted']
self.removedClasses = class_res['removed']
self.reducedClasses = class_res['reduced']
self.resortedClasses = class_res['sorted']
self.removedMetas = meta_res['removed']
self.reducedMetas = meta_res['reduced']
self.Outputs.data.send(data)
def send_report(self):
def list_opts(opts):
return "; ".join(label.lower()
for value, label in opts
if getattr(self, value)) or "no changes"
self.report_items("Settings", (
("Features", list_opts(self.feature_options)),
("Classes", list_opts(self.class_options)),
("Metas", list_opts(self.meta_options))))
if self.data:
self.report_items("Statistics", (
(label, getattr(self, value))
for label, value in self.stat_labels
))
if __name__ == "__main__":
from AnyQt.QtWidgets import QApplication
appl = QApplication([])
ow = OWPurgeDomain()
data = Table("car.tab")
subset = [inst for inst in data
if inst["buying"] == "v-high"]
subset = Table(data.domain, subset)
# The "buying" should be removed and the class "y" reduced
ow.setData(subset)
ow.show()
appl.exec_()
ow.saveSettings() | PypiClean |
/Infomericaclass-1.0.0.tar.gz/Infomericaclass-1.0.0/inf/examples/ethnicolr_app_contrib2000.ipynb | ## Application: Illustrating the use of the package by imputing the race of the campaign contributors recorded by FEC for the years 2000 and 2010
a) what proportion of contributors were black, whites, hispanics, asian etc.
b) and proportion of total donation given by blacks, hispanics, whites, and asians.
c) get amount contributed by people of each race and divide it by total amount contributed.
```
import pandas as pd
df = pd.read_csv('/opt/names/fec_contrib/contribDB_2000.csv', nrows=100)
df.columns
```
amount, date, contributor_name, contributor_lname, contributor_fname, contributor_type == 'I'
```
#df = pd.read_csv('/opt/names/fec_contrib/contribDB_2000.csv', usecols=['date', 'amount', 'contributor_type', 'contributor_lname', 'contributor_fname', 'contributor_name'], nrows=300000)
df = pd.read_csv('/opt/names/fec_contrib/contribDB_2000.csv', usecols=['date', 'amount', 'contributor_type', 'contributor_lname', 'contributor_fname', 'contributor_name'])
df
#sdf = df[df.contributor_type=='I'].sample(1000)
sdf = df[df.contributor_type=='I'].copy()
sdf
from clean_names import clean_name
def do_clean_name(n):
n = str(n)
return clean_name(n)
#sdf['clean_name'] = sdf['contributor_name'].apply(lambda c: do_clean_name(c))
#sdf
from ethnicolr import census_ln, pred_census_ln
rdf = pred_census_ln(sdf, 'contributor_lname', 2000)
rdf
#rdf.to_csv('output-pred-contrib2000-ln.csv', index_label='idx')
```
### a) what proportion of contributors were black, whites, hispanics, asian etc.
```
adf = rdf.groupby(['race']).agg({'contributor_lname': 'count'})
adf *100 / adf.sum()
```
### b) and proportion of total donation given by blacks, hispanics, whites, and asians.
```
bdf = rdf.groupby(['race']).agg({'amount': 'sum'})
bdf * 100 / bdf.sum()
```
### c) get amount contributed by people of each race and divide it by total amount contributed.
```
contrib_white = sum(rdf.amount * rdf.white)
contrib_black = sum(rdf.amount * rdf.black)
contrib_api = sum(rdf.amount * rdf.api)
contrib_hispanic = sum(rdf.amount * rdf.hispanic)
contrib_amount = [{'race': 'white', 'amount': contrib_white},
{'race': 'black', 'amount': contrib_black},
{'race': 'api', 'amount': contrib_api},
{'race': 'hispanic', 'amount': contrib_hispanic}]
contrib_df = pd.DataFrame(contrib_amount, columns=['race', 'amount'])
contrib_df.amount /= 10e6
contrib_df.columns = ['race', 'amount($1M)']
contrib_df
contrib_df.set_index('race', inplace=True, drop=True)
contrib_df.columns = ['% amount']
contrib_df * 100 / contrib_df.sum()
```
| PypiClean |
/FEADRE_AI-1.0.7.tar.gz/FEADRE_AI-1.0.7/FEADRE_AI/ai/calc/x_nms.py | import numpy as np
import random
import cv2
def non_max_suppress(predicts_dict, threshold):
for object_name, bbox in predicts_dict.items(): # 对每一个类别分别进行NMS;一次读取一对键值(即某个类别的所有框)
bbox_array = np.array(bbox, dtype=np.float)
# 下面分别获取框的左上角坐标(x1,y1),右下角坐标(x2,y2)及此框的置信度;这里需要注意的是图像左上角可以看做坐标点(0,0),右下角可以看做坐标点(1,1),也就是说从左往右x值增大,从上往下y值增大
x1 = bbox_array[:, 0]
y1 = bbox_array[:, 1]
x2 = bbox_array[:, 2]
y2 = bbox_array[:, 3]
scores = bbox_array[:, 4]
order = scores.argsort()[::-1] # argsort函数返回的是数组值从小到大的索引值,[::-1]表示取反。即这里返回的是数组值从大到小的索引值
areas = (x2 - x1 + 1) * (
y2 - y1 + 1) # 当前类所有框的面积(python会自动使用广播机制,相当于MATLAB中的.*即两矩阵对应元素相乘);x1=3,x2=5,习惯上计算x方向长度就是x=3、4、5这三个像素,即5-3+1=3,而不是5-3=2,所以需要加1
keep = []
# 按confidence从高到低遍历bbx,移除所有与该矩形框的IoU值大于threshold的矩形框
while order.shape_hwc > 0:
i = order[0]
keep.append(i) # 保留当前最大confidence对应的bbx索引
# 获取所有与当前bbx的交集对应的左上角和右下角坐标,并计算IoU(注意这里是同时计算一个bbx与其他所有bbx的IoU)
xx1 = np.maximum(x1[i], x1[order[1:]]) # 最大置信度的左上角坐标分别与剩余所有的框的左上角坐标进行比较,分别保存较大值;因此这里的xx1的维数应该是当前类的框的个数减1
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
inter = np.maximum(0.0, xx2 - xx1 + 1) * np.maximum(0.0, yy2 - yy1 + 1)
iou = inter / (areas[i] + areas[order[1:]] - inter) # 注意这里都是采用广播机制,同时计算了置信度最高的框与其余框的IoU
inds = np.where(iou <= threshold)[0] # 保留iou小于等于阙值的框的索引值
order = order[inds + 1] # 将order中的第inds+1处的值重新赋值给order;即更新保留下来的索引,加1是因为因为没有计算与自身的IOU,所以索引相差1,需要加上
bbox = bbox_array[keep]
predicts_dict[object_name] = bbox.tolist()
# predicts_dict = predicts_dict
return predicts_dict
if __name__ == '__main__':
# 下面在一张全黑图片上测试非极大值抑制的效果
img = np.zeros((600, 600), np.uint8)
# predicts_dict = {'black1': [[83, 54, 165, 163, 0.8], [67, 48, 118, 132, 0.5], [91, 38, 192, 171, 0.6]]}
predicts_dict = {'black1': [[83, 54, 165, 163, 0.8], [67, 48, 118, 132, 0.5], [91, 38, 192, 171, 0.6]],
'black2': [[59, 120, 137, 368, 0.12], [54, 154, 148, 382, 0.13]]}
# 在全黑的图像上画出设定的几个框
for object_name, bbox in predicts_dict.items():
for box in bbox:
x1, y1, x2, y2, score = box[0], box[1], box[2], box[3], box[-1]
y_text = int(random.uniform(y1,
y2)) # uniform()是不能直接访问的,需要导入 random 模块,然后通过 random 静态对象调用该方法。uniform() 方法将随机生成下一个实数,它在 [x, y) 范围内
cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 255), 2)
cv2.putText(img, str(score), (x2 - 30, y_text), 2, 1, (255, 255, 0))
cv2.namedWindow("black1_roi") # 创建一个显示图像的窗口
cv2.imshow("black1_roi", img) # 在窗口中显示图像;注意这里的窗口名字如果不是刚刚创建的窗口的名字则会自动创建一个新的窗口并将图像显示在这个窗口
cv2.waitKey(0) # 如果不添这一句,在IDLE中执行窗口直接无响应。在命令行中执行的话,则是一闪而过。
cv2.destroyAllWindows() # 最后释放窗口是个好习惯!
# 在全黑图片上画出经过非极大值抑制后的框
img_cp = np.zeros((600, 600), np.uint8)
predicts_dict_nms = non_max_suppress(predicts_dict, 0.1)
for object_name, bbox in predicts_dict_nms.items():
for box in bbox:
x1, y1, x2, y2, score = int(box[0]), int(box[1]), int(box[2]), int(box[3]), box[-1]
y_text = int(random.uniform(y1,
y2)) # uniform()是不能直接访问的,需要导入 random 模块,然后通过 random 静态对象调用该方法。uniform() 方法将随机生成下一个实数,它在 [x, y) 范围内
cv2.rectangle(img_cp, (x1, y1), (x2, y2), (255, 255, 255), 2)
cv2.putText(img_cp, str(score), (x2 - 30, y_text), 2, 1, (255, 255, 0))
cv2.namedWindow("black1_nms") # 创建一个显示图像的窗口
cv2.imshow("black1_nms", img_cp) # 在窗口中显示图像;注意这里的窗口名字如果不是刚刚创建的窗口的名字则会自动创建一个新的窗口并将图像显示在这个窗口
cv2.waitKey(0) # 如果不添这一句,在IDLE中执行窗口直接无响应。在命令行中执行的话,则是一闪而过。
cv2.destroyAllWindows() # 最后释放窗口是个好习惯! | PypiClean |
/KonFoo-3.0.0-py3-none-any.whl/konfoo/__init__.py | # Categories
from .categories import Category
# Core classes
from .core import (
is_any, is_field, is_container, is_array, is_structure,
is_pointer, is_mixin,
Patch, Index, Alignment,
Container, Structure, Sequence, Array,
Field,
Stream, String,
Float, Double,
Decimal, Bit, Byte, Char, Signed, Unsigned, Bitset, Bool, Enum, Scaled,
Fraction, Bipolar, Unipolar, Datetime, IPv4Address,
Pointer, StructurePointer, SequencePointer, ArrayPointer, StreamPointer,
StringPointer, AutoStringPointer,
RelativePointer, StructureRelativePointer, SequenceRelativePointer,
ArrayRelativePointer, StreamRelativePointer, StringRelativePointer
)
# Enumerations
from .enums import Enumeration
# Exceptions
from .exceptions import (
ByteOrderTypeError, ByteOrderValueError,
EnumTypeError, FactoryTypeError, MemberTypeError,
ProviderTypeError, ContainerLengthError,
FieldAddressError, FieldAlignmentError, FieldByteOrderError,
FieldIndexError, FieldSizeError, FieldTypeError, FieldValueError,
FieldValueEncodingError,
FieldGroupByteOrderError, FieldGroupOffsetError, FieldGroupSizeError
)
# Field classes
from .fields import (
Decimal8, Decimal16, Decimal24, Decimal32, Decimal64,
Signed8, Signed16, Signed24, Signed32, Signed64,
Unsigned8, Unsigned16, Unsigned24, Unsigned32, Unsigned64,
Bool8, Bool16, Bool24, Bool32, Bool64,
Antivalent, Enum4, Enum8, Enum16, Enum24, Enum32, Enum64,
Bitset8, Bitset16, Bitset24, Bitset32, Bitset64,
Scaled8, Scaled16, Scaled24, Scaled32, Scaled64,
Bipolar2, Bipolar4, Unipolar2
)
# Globals
from .globals import Byteorder, BYTEORDER
# Pointer classes
from .pointers import (
Pointer8, Pointer16, Pointer24,
Pointer32, Pointer48, Pointer64,
StructurePointer8, StructurePointer16, StructurePointer24,
StructurePointer32, StructurePointer48, StructurePointer64,
ArrayPointer8, ArrayPointer16, ArrayPointer24,
ArrayPointer32, ArrayPointer48, ArrayPointer64,
StreamPointer8, StreamPointer16, StreamPointer24,
StreamPointer32, StreamPointer48, StreamPointer64,
StringPointer8, StringPointer16, StringPointer24,
StringPointer32, StringPointer48, StringPointer64,
FloatPointer,
Signed8Pointer, Signed16Pointer, Signed32Pointer,
Unsigned8Pointer, Unsigned16Pointer, Unsigned32Pointer
)
# Relative pointer classes
from .pointers import (
RelativePointer8, RelativePointer16, RelativePointer24,
RelativePointer32, RelativePointer48, RelativePointer64,
StructureRelativePointer8, StructureRelativePointer16,
StructureRelativePointer24, StructureRelativePointer32,
StructureRelativePointer48, StructureRelativePointer64,
ArrayRelativePointer8, ArrayRelativePointer16, ArrayRelativePointer24,
ArrayRelativePointer32, ArrayRelativePointer48, ArrayRelativePointer64,
StreamRelativePointer8, StreamRelativePointer16, StreamRelativePointer24,
StreamRelativePointer32, StreamRelativePointer48, StreamRelativePointer64,
StringRelativePointer8, StringRelativePointer16, StringRelativePointer24,
StringRelativePointer32, StringRelativePointer48, StringRelativePointer64,
)
# Providers
from .providers import Provider, FileProvider
# Utilities
from .utils import d3flare_json, HexViewer
__all__ = [
# Enumerations
'Enumeration',
# Categories
'Category',
# Globals
'Byteorder', 'BYTEORDER',
# Exceptions
'ByteOrderTypeError',
'ByteOrderValueError',
'EnumTypeError',
'FactoryTypeError',
'MemberTypeError',
'ProviderTypeError',
'ContainerLengthError',
'FieldAddressError',
'FieldAlignmentError',
'FieldByteOrderError',
'FieldIndexError',
'FieldSizeError',
'FieldValueError',
'FieldTypeError',
'FieldValueEncodingError',
'FieldGroupByteOrderError',
'FieldGroupOffsetError',
'FieldGroupSizeError',
# Provider
'Provider',
'FileProvider',
# Core classes
'is_any',
'is_field',
'is_container',
'is_array',
'is_structure',
'is_pointer',
'is_mixin',
'Patch',
'Index',
'Alignment',
'Container',
'Structure',
'Sequence',
'Array',
'Field',
'Stream',
'String',
'Float', 'Double',
'Decimal',
'Bit',
'Byte',
'Char',
'Signed',
'Unsigned',
'Bitset',
'Bool',
'Enum',
'Scaled',
'Fraction',
'Bipolar',
'Unipolar',
'Datetime',
'IPv4Address',
'Pointer',
'StructurePointer',
'SequencePointer', 'ArrayPointer',
'StreamPointer', 'StringPointer', 'AutoStringPointer',
'RelativePointer',
'StructureRelativePointer',
'SequenceRelativePointer', 'ArrayRelativePointer',
'StreamRelativePointer', 'StringRelativePointer',
# Field classes
'Decimal8',
'Decimal16',
'Decimal24',
'Decimal32',
'Decimal64',
'Signed8',
'Signed16',
'Signed24',
'Signed32',
'Signed64',
'Unsigned8',
'Unsigned16',
'Unsigned24',
'Unsigned32',
'Unsigned64',
'Bool8',
'Bool16',
'Bool24',
'Bool32',
'Bool64',
'Antivalent',
'Enum4',
'Enum8',
'Enum16',
'Enum24',
'Enum32',
'Enum64',
'Bitset8',
'Bitset16',
'Bitset24',
'Bitset32',
'Bitset64',
'Scaled8',
'Scaled16',
'Scaled24',
'Scaled32',
'Scaled64',
'Bipolar2',
'Bipolar4',
'Unipolar2',
# Pointer classes
'Pointer8',
'Pointer16',
'Pointer24',
'Pointer32',
'Pointer48',
'Pointer64',
'StructurePointer8',
'StructurePointer16',
'StructurePointer24',
'StructurePointer32',
'StructurePointer48',
'StructurePointer64',
'ArrayPointer8',
'ArrayPointer16',
'ArrayPointer24',
'ArrayPointer32',
'ArrayPointer48',
'ArrayPointer64',
'StreamPointer8',
'StreamPointer16',
'StreamPointer24',
'StreamPointer32',
'StreamPointer48',
'StreamPointer64',
'StringPointer8',
'StringPointer16',
'StringPointer24',
'StringPointer32',
'StringPointer48',
'StringPointer64',
'FloatPointer',
'Signed8Pointer',
'Signed16Pointer',
'Signed32Pointer',
'Unsigned8Pointer',
'Unsigned16Pointer',
'Unsigned32Pointer',
# Relative pointer classes
'RelativePointer8',
'RelativePointer16',
'RelativePointer24',
'RelativePointer32',
'RelativePointer48',
'RelativePointer64',
'StructureRelativePointer8',
'StructureRelativePointer16',
'StructureRelativePointer24',
'StructureRelativePointer32',
'StructureRelativePointer48',
'StructureRelativePointer64',
'ArrayRelativePointer8',
'ArrayRelativePointer16',
'ArrayRelativePointer24',
'ArrayRelativePointer32',
'ArrayRelativePointer48',
'ArrayRelativePointer64',
'StreamRelativePointer8',
'StreamRelativePointer16',
'StreamRelativePointer24',
'StreamRelativePointer32',
'StreamRelativePointer48',
'StreamRelativePointer64',
'StringRelativePointer8',
'StringRelativePointer16',
'StringRelativePointer24',
'StringRelativePointer32',
'StringRelativePointer48',
'StringRelativePointer64',
# Utilities
'd3flare_json', 'HexViewer',
]
__version__ = '3.0.0' | PypiClean |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.