input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<reponame>TugberkArkose/MLScheduler<filename>benchmarks/SimResults/combinations_spec_locality/cmp_perlbenchgamessbzip2calculix/power.py<gh_stars>0
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.252824,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.401267,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.06855,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.727199,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.25925,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.722213,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.70866,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.554982,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 8.08674,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.201871,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0263615,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.297346,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.19496,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.499218,
'Execution Unit/Register Files/Runtime Dynamic': 0.221321,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.787775,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.76062,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.49724,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00213072,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00213072,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00186333,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000725418,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00280061,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00892538,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0201619,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.18742,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.460352,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.636562,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.31342,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0239859,
'L2/Runtime Dynamic': 0.00602656,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 6.72114,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.64318,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.177421,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.177421,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 7.56237,
'Load Store Unit/Runtime Dynamic': 3.69558,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.43749,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.874981,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.155267,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.155538,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0757317,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.82692,
'Memory Management Unit/Runtime Dynamic': 0.231269,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 30.0305,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.704284,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0456598,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.370436,
'Renaming Unit/Int Front End RAT/Subthreshold | |
raritan.rpc
from raritan.rpc import (
Interface,
Structure,
ValueObject,
Enumeration,
typecheck,
DecodeException,
)
import raritan.rpc.event
import raritan.rpc.idl
import raritan.rpc.peripheral
import raritan.rpc.sensors
# interface
class DeviceManager(Interface):
idlType = "peripheral.DeviceManager:2.0.0"
ERR_INVALID_PARAMS = 1
# enumeration
class ZCoordMode(Enumeration):
idlType = "peripheral.DeviceManager.ZCoordMode:1.0.0"
values = ["RACKUNITS", "FREEFORM"]
ZCoordMode.RACKUNITS = ZCoordMode(0)
ZCoordMode.FREEFORM = ZCoordMode(1)
# structure
class Settings(Structure):
idlType = "peripheral.DeviceManager.Settings:1.0.0"
elements = [
"zCoordMode",
"autoManageNewDevices",
"deviceAltitude",
"presenceDetectionTimeout",
"defaultThresholdsMap",
]
def __init__(
self,
zCoordMode,
autoManageNewDevices,
deviceAltitude,
presenceDetectionTimeout,
defaultThresholdsMap,
):
typecheck.is_enum(
zCoordMode,
raritan.rpc.peripheral.DeviceManager.ZCoordMode,
AssertionError,
)
typecheck.is_bool(autoManageNewDevices, AssertionError)
typecheck.is_float(deviceAltitude, AssertionError)
typecheck.is_int(presenceDetectionTimeout, AssertionError)
self.zCoordMode = zCoordMode
self.autoManageNewDevices = autoManageNewDevices
self.deviceAltitude = deviceAltitude
self.presenceDetectionTimeout = presenceDetectionTimeout
self.defaultThresholdsMap = defaultThresholdsMap
@classmethod
def decode(cls, json, agent):
obj = cls(
zCoordMode=raritan.rpc.peripheral.DeviceManager.ZCoordMode.decode(
json["zCoordMode"]
),
autoManageNewDevices=json["autoManageNewDevices"],
deviceAltitude=json["deviceAltitude"],
presenceDetectionTimeout=json["presenceDetectionTimeout"],
defaultThresholdsMap=dict(
[
(
elem["key"],
raritan.rpc.sensors.NumericSensor.Thresholds.decode(
elem["value"], agent
),
)
for elem in json["defaultThresholdsMap"]
]
),
)
return obj
def encode(self):
json = {}
json["zCoordMode"] = raritan.rpc.peripheral.DeviceManager.ZCoordMode.encode(
self.zCoordMode
)
json["autoManageNewDevices"] = self.autoManageNewDevices
json["deviceAltitude"] = self.deviceAltitude
json["presenceDetectionTimeout"] = self.presenceDetectionTimeout
json["defaultThresholdsMap"] = [
dict(
key=k, value=raritan.rpc.sensors.NumericSensor.Thresholds.encode(v)
)
for k, v in self.defaultThresholdsMap.items()
]
return json
# structure
class MetaData(Structure):
idlType = "peripheral.DeviceManager.MetaData:1.0.0"
elements = ["oneWirePortCount", "onboardDeviceCount"]
def __init__(self, oneWirePortCount, onboardDeviceCount):
typecheck.is_int(oneWirePortCount, AssertionError)
typecheck.is_int(onboardDeviceCount, AssertionError)
self.oneWirePortCount = oneWirePortCount
self.onboardDeviceCount = onboardDeviceCount
@classmethod
def decode(cls, json, agent):
obj = cls(
oneWirePortCount=json["oneWirePortCount"],
onboardDeviceCount=json["onboardDeviceCount"],
)
return obj
def encode(self):
json = {}
json["oneWirePortCount"] = self.oneWirePortCount
json["onboardDeviceCount"] = self.onboardDeviceCount
return json
# structure
class DeviceTypeInfo(Structure):
idlType = "peripheral.DeviceManager.DeviceTypeInfo:1.0.0"
elements = [
"type",
"isActuator",
"identifier",
"name",
"defaultRange",
"defaultDecDigits",
]
def __init__(
self, type, isActuator, identifier, name, defaultRange, defaultDecDigits
):
typecheck.is_struct(
type, raritan.rpc.sensors.Sensor.TypeSpec, AssertionError
)
typecheck.is_bool(isActuator, AssertionError)
typecheck.is_string(identifier, AssertionError)
typecheck.is_string(name, AssertionError)
typecheck.is_struct(
defaultRange, raritan.rpc.sensors.NumericSensor.Range, AssertionError
)
typecheck.is_int(defaultDecDigits, AssertionError)
self.type = type
self.isActuator = isActuator
self.identifier = identifier
self.name = name
self.defaultRange = defaultRange
self.defaultDecDigits = defaultDecDigits
@classmethod
def decode(cls, json, agent):
obj = cls(
type=raritan.rpc.sensors.Sensor.TypeSpec.decode(json["type"], agent),
isActuator=json["isActuator"],
identifier=json["identifier"],
name=json["name"],
defaultRange=raritan.rpc.sensors.NumericSensor.Range.decode(
json["defaultRange"], agent
),
defaultDecDigits=json["defaultDecDigits"],
)
return obj
def encode(self):
json = {}
json["type"] = raritan.rpc.sensors.Sensor.TypeSpec.encode(self.type)
json["isActuator"] = self.isActuator
json["identifier"] = self.identifier
json["name"] = self.name
json["defaultRange"] = raritan.rpc.sensors.NumericSensor.Range.encode(
self.defaultRange
)
json["defaultDecDigits"] = self.defaultDecDigits
return json
# value object
class SettingsChangedEvent(raritan.rpc.event.UserEvent):
idlType = "peripheral.DeviceManager.SettingsChangedEvent:1.0.0"
def __init__(self, oldSettings, newSettings, actUserName, actIpAddr, source):
super(
raritan.rpc.peripheral.DeviceManager.SettingsChangedEvent, self
).__init__(actUserName, actIpAddr, source)
typecheck.is_struct(
oldSettings,
raritan.rpc.peripheral.DeviceManager.Settings,
AssertionError,
)
typecheck.is_struct(
newSettings,
raritan.rpc.peripheral.DeviceManager.Settings,
AssertionError,
)
self.oldSettings = oldSettings
self.newSettings = newSettings
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.SettingsChangedEvent, self
).encode()
json["oldSettings"] = raritan.rpc.peripheral.DeviceManager.Settings.encode(
self.oldSettings
)
json["newSettings"] = raritan.rpc.peripheral.DeviceManager.Settings.encode(
self.newSettings
)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldSettings=raritan.rpc.peripheral.DeviceManager.Settings.decode(
json["oldSettings"], agent
),
newSettings=raritan.rpc.peripheral.DeviceManager.Settings.decode(
json["newSettings"], agent
),
# for event.UserEvent
actUserName=json["actUserName"],
actIpAddr=json["actIpAddr"],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["oldSettings", "newSettings"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.SettingsChangedEvent, self
).listElements()
)
return elements
# value object
class DeviceEvent(raritan.rpc.idl.Event):
idlType = "peripheral.DeviceManager.DeviceEvent:1.0.0"
def __init__(self, device, allDevices, source):
super(raritan.rpc.peripheral.DeviceManager.DeviceEvent, self).__init__(
source
)
typecheck.is_valobj(device, raritan.rpc.peripheral.Device, AssertionError)
for x0 in allDevices:
typecheck.is_valobj(x0, raritan.rpc.peripheral.Device, AssertionError)
self.device = device
self.allDevices = allDevices
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.DeviceEvent, self
).encode()
json["device"] = ValueObject.encode(self.device)
json["allDevices"] = [ValueObject.encode(x0) for x0 in self.allDevices]
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
device=ValueObject.decode(json["device"], agent),
allDevices=[ValueObject.decode(x0, agent) for x0 in json["allDevices"]],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["device", "allDevices"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.DeviceEvent, self
).listElements()
)
return elements
# value object
class DeviceAddedEvent(DeviceEvent):
idlType = "peripheral.DeviceManager.DeviceAddedEvent:1.0.0"
def __init__(self, device, allDevices, source):
super(raritan.rpc.peripheral.DeviceManager.DeviceAddedEvent, self).__init__(
device, allDevices, source
)
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.DeviceAddedEvent, self
).encode()
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
# for peripheral.DeviceManager_2_0_0.DeviceEvent
device=ValueObject.decode(json["device"], agent),
allDevices=[ValueObject.decode(x0, agent) for x0 in json["allDevices"]],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = []
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.DeviceAddedEvent, self
).listElements()
)
return elements
# value object
class DeviceRemovedEvent(DeviceEvent):
idlType = "peripheral.DeviceManager.DeviceRemovedEvent:1.0.0"
def __init__(self, device, allDevices, source):
super(
raritan.rpc.peripheral.DeviceManager.DeviceRemovedEvent, self
).__init__(device, allDevices, source)
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.DeviceRemovedEvent, self
).encode()
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
# for peripheral.DeviceManager_2_0_0.DeviceEvent
device=ValueObject.decode(json["device"], agent),
allDevices=[ValueObject.decode(x0, agent) for x0 in json["allDevices"]],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = []
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.DeviceRemovedEvent, self
).listElements()
)
return elements
# value object
class UnknownDeviceAttachedEvent(raritan.rpc.idl.Event):
idlType = "peripheral.DeviceManager.UnknownDeviceAttachedEvent:1.0.0"
def __init__(self, romCode, position, source):
super(
raritan.rpc.peripheral.DeviceManager.UnknownDeviceAttachedEvent, self
).__init__(source)
typecheck.is_string(romCode, AssertionError)
for x0 in position:
typecheck.is_struct(
x0, raritan.rpc.peripheral.PosElement, AssertionError
)
self.romCode = romCode
self.position = position
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.UnknownDeviceAttachedEvent, self
).encode()
json["romCode"] = self.romCode
json["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in self.position
]
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
romCode=json["romCode"],
position=[
raritan.rpc.peripheral.PosElement.decode(x0, agent)
for x0 in json["position"]
],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["romCode", "position"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.UnknownDeviceAttachedEvent,
self,
).listElements()
)
return elements
# enumeration
class DeviceFirmwareUpdateState(Enumeration):
idlType = "peripheral.DeviceManager.DeviceFirmwareUpdateState:1.0.0"
values = ["UPDATE_STARTED", "UPDATE_SUCCESSFUL", "UPDATE_FAILED"]
DeviceFirmwareUpdateState.UPDATE_STARTED = DeviceFirmwareUpdateState(0)
DeviceFirmwareUpdateState.UPDATE_SUCCESSFUL = DeviceFirmwareUpdateState(1)
DeviceFirmwareUpdateState.UPDATE_FAILED = DeviceFirmwareUpdateState(2)
# value object
class DeviceFirmwareUpdateStateChangedEvent(raritan.rpc.idl.Event):
idlType = "peripheral.DeviceManager.DeviceFirmwareUpdateStateChangedEvent:1.0.0"
def __init__(self, oldVersion, newVersion, serial, state, source):
super(
raritan.rpc.peripheral.DeviceManager.DeviceFirmwareUpdateStateChangedEvent,
self,
).__init__(source)
typecheck.is_string(oldVersion, AssertionError)
typecheck.is_string(newVersion, AssertionError)
typecheck.is_string(serial, AssertionError)
typecheck.is_enum(
state,
raritan.rpc.peripheral.DeviceManager.DeviceFirmwareUpdateState,
AssertionError,
)
self.oldVersion = oldVersion
self.newVersion = newVersion
self.serial = serial
self.state = state
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.DeviceFirmwareUpdateStateChangedEvent,
self,
).encode()
json["oldVersion"] = self.oldVersion
json["newVersion"] = self.newVersion
json["serial"] = self.serial
json[
"state"
] = raritan.rpc.peripheral.DeviceManager.DeviceFirmwareUpdateState.encode(
self.state
)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldVersion=json["oldVersion"],
newVersion=json["newVersion"],
serial=json["serial"],
state=raritan.rpc.peripheral.DeviceManager.DeviceFirmwareUpdateState.decode(
json["state"]
),
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["oldVersion", "newVersion", "serial", "state"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.DeviceFirmwareUpdateStateChangedEvent,
self,
).listElements()
)
return elements
# value object
class PackageEvent(raritan.rpc.idl.Event):
idlType = "peripheral.DeviceManager.PackageEvent:1.0.0"
def __init__(self, packageInfo, allPackages, source):
super(raritan.rpc.peripheral.DeviceManager.PackageEvent, self).__init__(
source
)
typecheck.is_struct(
packageInfo, raritan.rpc.peripheral.PackageInfo, AssertionError
)
for x0 in allPackages:
typecheck.is_struct(
x0, raritan.rpc.peripheral.PackageInfo, AssertionError
)
self.packageInfo = packageInfo
self.allPackages = allPackages
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.PackageEvent, self
).encode()
json["packageInfo"] = raritan.rpc.peripheral.PackageInfo.encode(
self.packageInfo
)
json["allPackages"] = [
raritan.rpc.peripheral.PackageInfo.encode(x0) for x0 in self.allPackages
]
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
packageInfo=raritan.rpc.peripheral.PackageInfo.decode(
json["packageInfo"], agent
),
allPackages=[
raritan.rpc.peripheral.PackageInfo.decode(x0, agent)
for x0 in json["allPackages"]
],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["packageInfo", "allPackages"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.PackageEvent, self
).listElements()
)
return elements
# value object
class PackageAddedEvent(PackageEvent):
idlType = "peripheral.DeviceManager.PackageAddedEvent:1.0.0"
def __init__(self, packageInfo, allPackages, source):
super(
raritan.rpc.peripheral.DeviceManager.PackageAddedEvent, self
).__init__(packageInfo, allPackages, source)
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.PackageAddedEvent, self
).encode()
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
# for peripheral.DeviceManager_2_0_0.PackageEvent
packageInfo=raritan.rpc.peripheral.PackageInfo.decode(
json["packageInfo"], agent
),
allPackages=[
raritan.rpc.peripheral.PackageInfo.decode(x0, agent)
for x0 in json["allPackages"]
],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = []
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.PackageAddedEvent, self
).listElements()
)
return elements
# value object
class PackageRemovedEvent(PackageEvent):
idlType = "peripheral.DeviceManager.PackageRemovedEvent:1.0.0"
def __init__(self, packageInfo, allPackages, source):
super(
raritan.rpc.peripheral.DeviceManager.PackageRemovedEvent, self
).__init__(packageInfo, allPackages, source)
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.PackageRemovedEvent, self
).encode()
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
# for peripheral.DeviceManager_2_0_0.PackageEvent
packageInfo=raritan.rpc.peripheral.PackageInfo.decode(
json["packageInfo"], agent
),
allPackages=[
raritan.rpc.peripheral.PackageInfo.decode(x0, agent)
for x0 in json["allPackages"]
],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = []
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.PackageRemovedEvent, self
).listElements()
)
return elements
# structure
class Statistics(Structure):
idlType = "peripheral.DeviceManager.Statistics:1.0.0"
elements = ["cSumErrCnt"]
def __init__(self, cSumErrCnt):
typecheck.is_int(cSumErrCnt, AssertionError)
self.cSumErrCnt = cSumErrCnt
@classmethod
def decode(cls, json, agent):
obj = cls(
cSumErrCnt=json["cSumErrCnt"],
)
return obj
def encode(self):
json = {}
json["cSumErrCnt"] = self.cSumErrCnt
return json
def getDeviceSlots(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getDeviceSlots", args)
_ret_ = [Interface.decode(x0, agent) for x0 in rsp["_ret_"]]
for x0 in _ret_:
typecheck.is_interface(
x0, raritan.rpc.peripheral.DeviceSlot, DecodeException
)
return _ret_
def getDeviceSlot(self, idx):
agent = self.agent
typecheck.is_int(idx, AssertionError)
args = {}
args["idx"] = idx
rsp = agent.json_rpc(self.target, "getDeviceSlot", args)
_ret_ = Interface.decode(rsp["_ret_"], agent)
typecheck.is_interface(
_ret_, raritan.rpc.peripheral.DeviceSlot, DecodeException
)
return _ret_
def getDiscoveredDevices(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getDiscoveredDevices", args)
_ret_ = [ValueObject.decode(x0, agent) for x0 in rsp["_ret_"]]
for x0 in | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Shared code between plugins.
Use python's help() for prettier help info.
"""
import gi
gi.require_version('Gimp', '3.0')
from gi.repository import Gimp
gi.require_version('Gegl', '0.4')
from gi.repository import Gegl
from gi.repository import GObject
from gi.repository import GLib
# from gi.repository import Gio
from abc import ABC, abstractmethod
# UI imports. Can't figure out a good way to only import these
# in INTERACTIVE mode while keeping ui stuff in the params.
gi.require_version('GimpUi', '3.0')
from gi.repository import GimpUi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk # noqa: F401
gi.require_version('Gdk', '3.0')
from gi.repository import Gdk
import sys
import os.path
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import bszgw
import threading
import time
def PDB(procedure: str, *args):
# {{{
argsv = Gimp.ValueArray.new(len(args))
for num, arg in enumerate(args):
if isinstance(arg, str):
gtype = GObject.TYPE_STRING
elif isinstance(arg, Gimp.RunMode):
gtype = Gimp.RunMode
elif isinstance(arg, Gimp.Image):
gtype = Gimp.Image
elif isinstance(arg, Gimp.Drawable):
gtype = Gimp.Drawable
else:
raise ValueError("PDB Type not supported")
argsv.insert(num, GObject.Value(gtype, arg))
return Gimp.get_pdb().run_procedure_array(procedure, argsv)
# }}}
GEGL_COMPOSITORS = {
# {{{
"Source": "svg:src",
"Source-Atop": "svg:src-atop",
"Source-In": "svg:src-in",
"Source-Out": "svg:src-out",
"Source-Over": "svg:src-over",
"Destination": "svg:dst",
"Destination-Atop": "svg:dst-atop",
"Destination-In": "svg:dst-in",
"Destination-Out": "svg:dst-out",
"Destination-Over": "svg:dst-over",
"Lighten": "svg:lighten",
"Screen": "svg:screen",
"Color-Dodge": "svg:color-dodge",
"Add": "gegl:add",
"Plus": "svg:plus",
"Darken": "svg:darken",
"Multiply": "gegl:multiply",
"Color-Burn": "svg:color-burn",
"Overlay": "svg:overlay",
"Soft-Light": "gegl:soft-light",
"Hard-Light": "svg:hard-light",
"Difference": "svg:difference",
"Exclusion": "svg:exclusion",
"Subtract": "gegl:subtract",
"Divide": "gegl:divide",
"Gamma": "gegl:gamma",
"Seamless-Clone-Compose": "gegl:seamless-clone-compose",
"Weighted-Blend": "gegl:weighted-blend",
"Clear": "svg:clear",
"Xor": "svg:xor",
} # }}}
class Param(ABC):
# {{{
"""Abstract class taken by PlugIn."""
def __init__(self, name: str, value,
description: str = "", ui_preview: bool = True,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1):
self.name = name
if not description:
self.description = name
else:
self.description = description
self.ui_preview = ui_preview
self.ui_column = ui_column
self.ui_row = ui_row
self.ui_width = ui_width
self.ui_height = ui_height
self.value = value
self.__widget = None
def connect_preview(self, function: callable, *args):
"""Connects the widget's value change signal to the function
`pass` acceptable for widgets where it makes no sense"""
if self.ui_preview:
self.connect_changed(function, *args if args else ())
@abstractmethod
def create_widget(self):
"""Returns a new widget for param.
Mostly used internally for widget property."""
pass
@abstractmethod
def connect_changed(self, function: callable, *args):
"""Connects widget's appropriate value change signal to fn with args.
Mostly used internally for widget property."""
pass
def ui_reset(self):
"""Assuming ui_value properties are set up correctly,
there's no reason to implement this differently on a class-by-class basis."""
self.ui_value = self.value
@property
@abstractmethod
def gproperty(self):
"""Returns a dictionary containing the gproperty for the parameter."""
pass
@property
@abstractmethod
def ui_value(self):
"""Returns/sets ui value. Usually a shadow of bszgw.Widget.value.
'Why make this its own property instead of calling param.widget.value' you ask?
I intend to eventually use some gimp specific widgets when they're available"""
pass
@ui_value.setter
@abstractmethod
def ui_value(self, new):
pass
@property
def widget(self):
"""Readonly property containing the ui widget.
Will create the widget on first read."""
if self.__widget is None:
self.__widget = self.create_widget()
return self.__widget
# }}}
class ParamBool(Param):
# {{{
"""Creates a BSZGW CheckButton for booleans"""
def __init__(self, name: str, value: bool,
description: str = "", ui_preview: bool = True,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1):
super(ParamBool, self).__init__(name, value,
description, ui_preview,
ui_column, ui_row,
ui_width, ui_height)
def connect_changed(self, function, *args):
self.widget.connect_changed(function, *args)
def create_widget(self):
widget = bszgw.CheckButton(self.name, self.value)
widget.props.tooltip_text = self.description
return widget
@property
def gproperty(self):
return {self.name.lower().replace(' ', '-'):
(bool,
self.name,
self.description,
self.value,
GObject.ParamFlags.READWRITE)
}
@property
def ui_value(self):
return self.widget.value
@ui_value.setter
def ui_value(self, new):
self.widget.value = new
# }}}
class ParamCombo(Param):
# {{{
"""Creates a BSZGW ComboBox from a dictionary"""
def __init__(self, name: str, dictionary: dict, value,
description: str = "", ui_preview: bool = True,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1):
super(ParamCombo, self).__init__(name, value,
description, ui_preview,
ui_column, ui_row,
ui_width, ui_height)
self.dictionary = dictionary
def connect_changed(self, function, *args):
self.widget.connect_changed(function, *args)
def create_widget(self):
widget = bszgw.ComboBox.new_dict(
self.dictionary,
self.value,
show_ids=False,
)
widget.props.tooltip_text = self.description
return widget
@property
def gproperty(self):
return {self.name.lower().replace(' ', '-'):
(str,
self.name,
self.description,
self.value,
GObject.ParamFlags.READWRITE)
}
@property
def ui_value(self):
return self.widget.value
@ui_value.setter
def ui_value(self, new):
self.widget.value = new
# }}}
class ParamNumber(Param):
# {{{
"""Creates a BSZGW Adjustment for numeric (float or int) parameters.
AKA a cool slider"""
def __init__(self, name: str, value: int, min, max,
description: str = "", ui_preview: bool = True,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1,
integer: bool = False,
ui_step: int = 1, ui_logarithmic: bool = False):
super(ParamNumber, self).__init__(name, value,
description, ui_preview,
ui_column, ui_row,
ui_width, ui_height)
self.min = min
self.max = max
self.integer = integer
self.ui_step = ui_step
self.ui_logarithmic = ui_logarithmic
def connect_changed(self, function, *args):
self.widget.connect_changed(function, *args)
def create_widget(self):
widget = bszgw.SpinScale.new(
value=self.value,
min_value=self.min,
max_value=self.max,
step_increment=self.ui_step,
page_increment=self.ui_step,
label=self.name,
digits=0 if self.integer else 2,
logarithmic=self.ui_logarithmic
)
widget.props.tooltip_text = self.description
return widget
@property
def gproperty(self):
return {self.name.lower().replace(' ', '-'):
(int if self.integer else float,
self.name,
self.description,
self.min, self.max, self.value,
GObject.ParamFlags.READWRITE)
}
@property
def ui_value(self):
return self.widget.value
@ui_value.setter
def ui_value(self, new):
self.widget.value = new
# }}}
class ParamNumberChain(Param):
# {{{
"""Creates a chain (checkbutton for now) linking two `ParamNumber`s
Note chain ui columns are *separate* from regular ui columns
Currently only visually good for chaining across-columns."""
def __init__(self, name: str, value: bool,
param1: ParamNumber, param2: ParamNumber,
description: str = "",
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1):
super(ParamNumberChain, self).__init__(name, value,
description, False,
ui_column, ui_row,
ui_width, ui_height)
self.param1 = param1
self.param2 = param2
def create_widget(self):
self.param1.widget.adjustment.connect(
"value-changed", self.update, self.param1, self.param2)
self.param2.widget.adjustment.connect(
"value-changed", self.update, self.param2, self.param1)
widget = bszgw.CheckButton("Link", self.value)
widget.props.tooltip_text = self.description
return widget
# # Currently Gimp.ChainButton() is borked
# return GimpUi.ChainButton(active=self.value)
def connect_changed(self, function, *args):
pass
def update(self, widget, from_param, to_param):
"""copies values between params"""
if self.widget.get_active():
# using logarithmic scales can cause an update-loop
# thus we *double* check that the values aren't identical
# to avoid sending more signals
if to_param.ui_value != from_param.ui_value:
to_param.ui_value = from_param.ui_value
@property
def gproperty(self):
return None
@property
def ui_value(self):
return self.widget.get_active()
@ui_value.setter
def ui_value(self, new):
self.widget.set_active(new)
# }}}
class ParamString(Param):
# {{{
"""Creates a BSZGW Entry for inputting text."""
def __init__(self, name: str, value: str,
description: str = "", ui_preview: bool = False,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1,
ui_multiline: bool = False,
ui_min_width: int = 300, ui_min_height: int = 100):
super(ParamString, self).__init__(name, value,
description, ui_preview,
ui_column, ui_row,
ui_width, ui_height)
self.ui_multiline = ui_multiline
self.ui_min_width = ui_min_width
self.ui_min_height = ui_min_height
def connect_changed(self, function, *args):
self.widget.connect_changed(function, *args)
def create_widget(self):
widget = bszgw.Entry(
value=self.value,
label=self.name,
multi_line=self.ui_multiline,
min_width=self.ui_min_width,
min_height=self.ui_min_height
)
widget.props.tooltip_text = self.description
return widget
@property
def gproperty(self):
return {self.name.lower().replace(' ', '-'):
(str,
self.name,
self.name, # desc?
self.value,
GObject.ParamFlags.READWRITE)
}
@property
def ui_value(self):
return self.widget.value
@ui_value.setter
def ui_value(self, new):
self.widget.value = new
# }}}
class PreviewThread(threading.Thread):
# {{{
"""Runs `function` after self.request_preview has been called no more than
once in the last 0.5 seconds."""
def __init__(self, function, *args):
super(PreviewThread, self).__init__()
self.function = function
self.args = args
self.time = time.time()
self.active = True
self.request = True
def run(self):
"""Thread's main loop. Not called directly, use thread.start()"""
while self.active:
time.sleep(0.1)
if time.time() - self.time > 0.5 and self.request:
self.function(*self.args)
self.time = time.time()
self.request = False
def request_preview(self, *args):
self.request = True
self.time = time.time()
def stop(self, *args):
self.active = False
self.join()
# }}}
class PlugIn():
# {{{
"""Automatically creates a gimp plugin UI from given Param classes.
It's basically the old GimpFu but way cooler and more unstable.
Check out one of my scripts that uses it and you'll instantly go
\"ah it's like that\"."""
# Get & save properties
def __init__(self, name: str, function: callable, *params: Param,
description: str, alt_description: str = None,
gegl_preview: bool = True,
procedure_name: str = None, images: str = "RGB*",
path: str = "<Image>/Beinsezii/", icon=GimpUi.ICON_GEGL,
authors: str = "Beinsezii", copyright: str = None,
date: str = "2020"):
# {{{
if not procedure_name:
procedure_name = name.lower().replace(" ", "-")
if not alt_description:
alt_description = description
if not copyright:
copyright = authors
gproperties = {}
for param in params:
gproperty = param.gproperty
if gproperty:
gproperties.update(gproperty)
class Procedure(Gimp.PlugIn):
# {{{
"""The generated pdb procedure stuff. Class inside | |
<filename>voodoopadplugins/scriptstuff/src/py2html.py
#!/usr/bin/python -u
# -*- coding: utf-8 -*-
""" Python Highlighter Version: 0.8
py2html.py [options] files...
options:
-h print help
- read from stdin, write to stdout
-stdout read from files, write to stdout
-files read from files, write to filename+'.html' (default)
-format:
html output XHTML page (default)
rawhtml output pure XHTML (without headers, titles, etc.)
-mode:
color output in color (default)
mono output b/w (for printing)
-title:Title use 'Title' as title of the generated page
-bgcolor:color use color as background-color for page
-header:file use contents of file as header
-footer:file use contents of file as footer
-URL replace all occurances of 'URL: link' with
'<a href="link">link</a>'; this is always enabled
in CGI mode
-v verbose
Takes the input, assuming it is Python code and formats it into
colored XHTML. When called without parameters the script tries to
work in CGI mode. It looks for a field 'script=URL' and tries to
use that URL as input file. If it can't find this field, the path
info (the part of the URL following the CGI script name) is
tried. In case no host is given, the host where the CGI script
lives and HTTP are used.
* Uses Just van Rossum's PyFontify version 0.3 to tag Python scripts.
You can get it via his homepage on starship:
URL: http://starship.python.net/crew/just
"""
__comments__ = """
The following snippet is a small shell script I use for viewing
Python scripts via less on Unix:
pyless:
#!/bin/sh
# Browse pretty printed Python code using ANSI codes for highlighting
py2html -stdout -format:ansi -mode:color $* | less -r
History:
0.8: Added patch by <NAME> to have py2html.py use style
sheets for markup
0.7: Added patch by <NAME>tt‰ to make py2html.py output
valid XHTML.
0.6: Fixed a bug in .escape_html(); thanks to <NAME> for
finding this one.
0.5: Added a few suggestions by Kevin Ng to make the CGI version
a little more robust.
"""
__copyright__ = """\
Copyright (c) 1998-2000, <NAME>; mailto:<EMAIL>
Copyright (c) 2000-2002, eGenix.com Software GmbH; mailto:<EMAIL>
Distributed under the terms and conditions of the eGenix.com Public
License. See http://www.egenix.com/files/python/mxLicense.html for
details, or contact the author. All Rights Reserved.\
"""
__version__ = '0.8'
__cgifooter__ = ('\n<pre># code highlighted using <a href='
'"http://www.lemburg.com/files/python/">py2html.py</a> '
'version %s</pre>\n' % __version__)
import sys,string,re
# Adjust path so that PyFontify is found...
sys.path.append('.')
### Constants
# URL of the input form the user is redirected to in case no script=xxx
# form field is given. The URL *must* be absolute. Leave blank to
# have the script issue an error instead.
INPUT_FORM = 'http://www.lemburg.com/files/python/SoftwareDescriptions.html#py2html.py'
# HTML DOCTYPE and XML namespace
HTML_DOCTYPE = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
HTML_XMLNS = ' xmlns="http://www.w3.org/1999/xhtml"'
### Helpers
def fileio(file, mode='rb', data=None, close=0):
if type(file) == type(''):
f = open(file,mode)
close = 1
else:
f = file
if data:
f.write(data)
else:
data = f.read()
if close: f.close()
return data
### Converter class
class PrettyPrint:
""" generic Pretty Printer class
* supports tagging Python scripts in the following ways:
# format/mode | color mono
# --------------------------
# rawhtml | x x (HTML without headers, etc.)
# html | x x (a HTML page with HEAD&BODY:)
# ansi | x x (with Ansi-escape sequences)
* interfaces:
file_filter -- takes two files: input & output (may be stdin/stdout)
filter -- takes a string and returns the highlighted version
* to create an instance use:
c = PrettyPrint(tagfct,format,mode)
where format and mode must be strings according to the
above table if you plan to use PyFontify.fontify as
tagfct
* the tagfct has to take one argument, text, and return a taglist
(format: [(id,left,right,sublist),...], where id is the
"name" given to the slice left:right in text and sublist is a
taglist for tags inside the slice or None)
"""
# misc settings
title = ''
bgcolor = '#FFFFFF'
css = ''
header = ''
footer = ''
replace_URLs = 0
# formats to be used
formats = {}
def __init__(self,tagfct=None,format='html',mode='color'):
self.tag = tagfct
self.set_mode = getattr(self,'set_mode_%s_%s' % (format, mode))
self.filter = getattr(self,'filter_%s' % format)
def file_filter(self,infile,outfile):
self.set_mode()
text = fileio(infile,'r')
if type(infile) == type('') and self.title == '':
self.title = infile
fileio(outfile,'w',self.filter(text))
### Set pre- and postfixes for formats & modes
#
# These methods must set self.formats to a dictionary having
# an entry for every tag returned by the tagging function.
#
# The format used is simple:
# tag:(prefix,postfix)
# where prefix and postfix are either strings or callable objects,
# that return a string (they are called with the matching tag text
# as only parameter). prefix is inserted in front of the tag, postfix
# is inserted right after the tag.
def set_mode_html_color(self):
self.css = """
<STYLE TYPE="text/css">
<!--
body{ background: %s; }
.PY_KEYWORD{ color: #0000C0; font-weight: bold; }
.PY_COMMENT{ color: #000080; }
.PY_PARAMETER{ color: #C00000; }
.PY_IDENTIFIER{ color: #C00000; font-weight: bold; }
.PY_STRING{ color: #008000; }
-->
</STYLE> """ % self.bgcolor
self.formats = {
'all':('<pre>','</pre>'),
'comment':('<span class="PY_COMMENT">','</span>'),
'keyword':('<span class="PY_KEYWORD">','</span>'),
'parameter':('<span class="PY_PARAMETER">','</span>'),
'identifier':( lambda x,strip=string.strip:
'<a name="%s"><span class="PY_IDENTIFIER">' % (strip(x)),
'</span></a>'),
'string':('<span class="PY_STRING">','</span>')
}
set_mode_rawhtml_color = set_mode_html_color
def set_mode_html_mono(self):
self.css = """
<STYLE TYPE="text/css">
<!--
body{ background-color: %s }
.PY_KEYWORD{ text-decoration: underline }
.PY_COMMENT{ }
.PY_PARAMETER{ }
.PY_IDENTIFIER{ font-weight: bold}
.PY_STRING{ font-style: italic}
-->
</STYLE> """ % self.bgcolor
self.formats = {
'all':('<pre>','</pre>'),
'comment':('<span class="PY_COMMENT">','</span>'),
'keyword':( '<span class="PY_KEYWORD">','</span>'),
'parameter':('<span class="PY_PARAMETER">','</span>'),
'identifier':( lambda x,strip=string.strip:
'<a name="%s"><span class="PY_IDENTIFIER">' % (strip(x)),
'</span></a>'),
'string':('<span class="PY_STRING">','</span>')
}
set_mode_rawhtml_mono = set_mode_html_mono
def set_mode_ansi_mono(self):
self.formats = {
'all':('',''),
'comment':('\033[2m','\033[m'),
'keyword':('\033[4m','\033[m'),
'parameter':('',''),
'identifier':('\033[1m','\033[m'),
'string':('','')
}
def set_mode_ansi_color(self):
self.formats = {
'all':('',''),
'comment':('\033[34;2m','\033[m'),
'keyword':('\033[1;34m','\033[m'),
'parameter':('',''),
'identifier':('\033[1;31m','\033[m'),
'string':('\033[32;2m','\033[m')
}
### Filters for Python scripts given as string
def escape_html(self,text):
t = (('&','&'),('<','<'),('>','>'))
for x,y in t:
text = string.join(string.split(text,x),y)
return text
def filter_html(self,text):
output = self.fontify(self.escape_html(text))
if self.replace_URLs:
output = re.sub('URL:([ \t]+)([^ \n\r<]+)',
'URL:\\1<a href="\\2">\\2</a>',output)
html = """%s<html%s>
<head>
<title>%s</title>
<!--css-->
%s
</head>
<body>
<!--header-->
%s
<!--script-->
%s
<!--footer-->
%s
</body></html>\n"""%(HTML_DOCTYPE,
HTML_XMLNS,
self.title,
self.css,
self.header,
output,
self.footer)
return html
def filter_rawhtml(self,text):
output = self.fontify(self.escape_html(text))
if self.replace_URLs:
output = re.sub('URL:([ \t]+)([^ \n\r<]+)',
'URL:\\1<a href="\\2">\\2</a>',output)
return self.header + output + self.footer
def filter_ansi(self,text):
output = self.fontify(text)
return self.header + output + self.footer
### Fontify engine
def fontify(self,pytext):
# parse
taglist = self.tag(pytext)
# prepend special 'all' tag:
taglist[:0] = [('all',0,len(pytext),None)]
# prepare splitting
splits = []
addsplits(splits,pytext,self.formats,taglist)
# do splitting & inserting
splits.sort()
l = []
li = 0
for ri,dummy,insert in splits:
if ri > li: l.append(pytext[li:ri])
l.append(insert)
li = ri
if li < len(pytext): l.append(pytext[li:])
return string.join(l,'')
def addsplits(splits,text,formats,taglist):
""" Helper for .fontify()
"""
for id,left,right,sublist in taglist:
try:
pre,post = formats[id]
except KeyError:
# sys.stderr.write('Warning: no format for %s specified\n'%repr(id))
pre,post = '',''
if type(pre) != type(''):
pre = pre(text[left:right])
if type(post) != type(''):
post = post(text[left:right])
# len(splits) is a dummy used to make sorting stable
splits.append((left,len(splits),pre))
if sublist:
addsplits(splits,text,formats,sublist)
splits.append((right,len(splits),post))
def write_html_error(titel,text):
print """\
%s<html%s><head><title>%s</title></head>
<body>
<h2>%s</h2>
%s
</body></html>
""" % (HTML_DOCTYPE,HTML_XMLNS,titel,titel,text)
def redirect_to(url):
sys.stdout.write('Content-Type: text/html\r\n')
sys.stdout.write('Status: 302\r\n')
sys.stdout.write('Location: %s\r\n\r\n' % url)
print """
%s<html%s><head>
<title>302 Moved Temporarily</title>
</head><body>
<h1>302 Moved Temporarily</h1>
The document has moved to <a href="%s">%s</a>.<p></p>
</body></html>
""" % (HTML_DOCTYPE,HTML_XMLNS,url,url)
def main(cmdline):
""" main(cmdline) -- process cmdline as if it were sys.argv
"""
# parse options/files
options = []
optvalues = {}
for o in cmdline[1:]:
if o[0] == '-':
if ':' in o:
k,v = tuple(string.split(o,':'))
optvalues[k] = v
options.append(k)
else:
options.append(o)
else:
break
files = cmdline[len(options)+1:]
### create converting object
# load fontifier
if '-marcs' in options:
# use mxTextTool's tagging engine as fontifier
from mx.TextTools import tag
from mx.TextTools.Examples.Python import python_script
tagfct = lambda text,tag=tag,pytable=python_script: \
tag(text,pytable)[1]
print "Py2HTML: using Marc's tagging engine"
else:
# load Just's fontifier
try:
import PyFontify
if PyFontify.__version__ < '0.3': raise ValueError
tagfct = PyFontify.fontify
except:
print """
Sorry, but this script needs the PyFontify.py module version 0.3;
You can download it from Just's homepage at
URL: http://starship.python.net/crew/just
"""
sys.exit()
if '-format' in options:
format = optvalues['-format']
else:
# use default
format = 'html'
if '-mode' in | |
<reponame>martok/py-symcircuit
import re
from typing import List, Dict, Set, Tuple, Union, Optional, Iterable
from sympy import StrPrinter, Eq, Symbol, symbols, Expr, Limit, cse
from sympy import Tuple as TTuple
from sympy.core.assumptions import _assume_defined
from sympy.printing.pycode import pycode
from sympy.solvers.solvers import _invert as sym_invert
# focus & seek based on https://github.com/sympy/sympy/issues/2720#issuecomment-312437508 by <NAME> @smichr
SymbolMap = Dict[str, Symbol]
ExpressionMap = Dict[str, Expr]
SymbolSet = Set[Symbol]
SymbolItS = Iterable[Union[Symbol, str]]
ReplacementRule = Tuple[Symbol, Expr]
ReplacementRules = List[ReplacementRule]
def complexity(e: Union[Eq, Expr]) -> int:
return e.count_ops(visual=False)
def sortedsyms(s: SymbolSet) -> Iterable[Symbol]:
return sorted(s, key=lambda sym: sym.name)
def as_symbols(syms: Optional[SymbolItS]) -> Optional[Iterable[Symbol]]:
if syms is None:
return
return [Symbol(s) if isinstance(s, str) else s for s in syms]
def sympy_parse(s: str, locs: SymbolMap) -> Optional[Expr]:
from sympy.parsing.sympy_parser import parse_expr
from sympy.parsing.sympy_parser import standard_transformations, implicit_multiplication
transformations = standard_transformations + (implicit_multiplication,)
try:
return parse_expr(s, local_dict=locs, transformations=transformations)
except SyntaxError:
return None
def seek(eqs: List[Eq], goals: SymbolSet, rules: ReplacementRules, *,
recursive: bool = False, parameters: Optional[SymbolSet] = None, verbose=False):
if parameters is None:
parameters = set()
goals.difference_update(parameters)
def solve_for_x(eq: Eq, x: Symbol) -> Optional[Expr]:
# rewrite two-sided equation to single expression == 0
tozero = eq.lhs - eq.rhs
ex, d = sym_invert(tozero, x)
if d != x:
return
return ex
def rule_generator():
# produce all possible replacements from current rules and goals
expanded_eqs = set()
for ieq, eq in enumerate(eqs):
if not isinstance(eq, Eq):
continue
for s in sortedsyms(goals):
ex = solve_for_x(eq, s)
if ex is None:
if ieq not in expanded_eqs and s in eq.free_symbols:
# symbol is present, maybe invert just failed?
eq = eq.expand()
eqs[ieq] = eq
expanded_eqs.add(ieq)
ex = solve_for_x(eq, s)
if ex is None:
# really not possible
continue
else:
continue
yield eq, s, ex
def introduced_goals(ex: Expr):
# how many new symbols would we have to solve for when this expressions is applied?
missing = set(ex.free_symbols) - goals - parameters
return len(missing)
def expression_schedule(gen):
# prefer expressions that introduce the least number of new goals
# ideally takes solutions that only involve constant parameters, but generally still uses previous
# knowledge/rules more effectively
return sorted(gen, key=lambda t: introduced_goals(t[2]))
def find_replacement_step():
for eq, s, ex in expression_schedule(rule_generator()):
# remove from pool and return
eqs.remove(eq)
if verbose:
print(f"from {str(eq)}: {str(s)} == {ex}")
return s, ex
# no equation solves to anything we need. this is bad
raise ValueError(f"equation system cannot be inverted for: {str(goals)}")
def replace_in_all(sym, expr):
rep = {sym: expr}
for i, eq in enumerate(eqs):
if not isinstance(eq, Eq):
continue
# avoid dividing by zero
ln, ld = eq.lhs.as_numer_denom()
rn, rd = eq.rhs.as_numer_denom()
lhs = (ln * rd).collect(sym)
rhs = (rn * ld).collect(sym)
new: Eq = Eq(lhs, rhs).xreplace(rep)
if new is False:
raise ValueError('inconsistency detected')
eqs[i] = new
eqs.sort(key=complexity)
eqs.sort(key=complexity)
while goals and eqs:
try:
sym, expr = find_replacement_step()
except ValueError:
if recursive:
break
raise
rules.append((sym, expr))
goals.remove(sym)
if recursive:
goals |= (expr.free_symbols - parameters)
replace_in_all(sym, expr)
def reduce_replacements(rules: ReplacementRules, final: int) -> ReplacementRules:
targets = rules[:final]
parts = rules[final:]
# apply all instructions for independents into the target expressions
while parts:
x, s = parts.pop(0)
s = s.cancel()
rep = {x: s}
for i, (xx, ss) in enumerate(targets):
# slightly simplify, but don't call simplify() because that can be *very* complex
ss = ss.collect(x)
sr = ss.xreplace(rep)
sr = sr.cancel()
targets[i] = (xx, sr)
# if we had more than one target, the later ones may be components in the previous ones
# for any rule beyond the first, expand references involving the other targets so that each is fully independent
for i in reversed(range(1, len(targets))):
x, s = rules[i]
rep = {x: s}
for j in range(i):
y, t = rules[j]
# invert may fail if not expanded enough
yex = (y - t.xreplace(rep)).expand()
ind, dep = sym_invert(yex, y)
ind = ind.cancel()
rules[j] = y, ind
return targets
class SymbolicSystem:
def __init__(self, source=None):
self.debug: bool = False
self.statements: List[Eq] = []
self.parameters: SymbolSet = set()
self.limits: List[Limit] = []
self.limit_symbol = Symbol("$$fn")
self.assumptions: SymbolSet = set()
if isinstance(source, str):
self.parse(source)
elif isinstance(source, SymbolicSystem):
self.extend(source)
elif isinstance(source, list):
for s in source:
if isinstance(s, Eq):
self.statements.append(s)
@property
def symbols(self) -> SymbolSet:
return TTuple(*self.statements).free_symbols
@property
def symboldict(self) -> SymbolMap:
return {s.name: s for s in self.symbols}
@property
def parameterdict(self) -> SymbolMap:
return {s.name: s for s in self.parameters}
def info(self) -> str:
r = []
if self.statements:
r.append("Equations:")
r.extend(" " + str(s) for s in self.statements)
if self.assumptions:
r.append("Assuming:")
rdict = {}
for sym in sortedsyms(self.assumptions):
a = " ".join(k for k, v in sym.assumptions0.items() if v == True)
if a in rdict:
rdict[a].append(sym)
else:
rdict[a] = [sym]
for a, vs in rdict.items():
vn = " ".join(v.name for v in vs)
r.append(f" {vn} :: {a}")
if self.parameters:
r.append("Using parameters:")
r.append(" " + " ".join(sorted(str(p) for p in self.parameters)))
if self.limits:
r.append("Using limits:")
for lim in self.limits:
r.append(f" {lim.args[1]} -> {lim.natural_dir}{lim.args[2]}")
return "\n".join(r)
def _parse_expr(self, ex: str) -> Optional[Expr]:
# parse expressions while carrying all already known symbols and their predefined assumptions
return sympy_parse(ex, self.symboldict)
def parse(self, descr: str) -> 'SymbolicSystem':
"""
Parse a string containing one or more definitions and add them to the current state
Syntax::
expr # anything comment out everything after #
[lhs] == [rhs] equality statement
[lhs] equality statement with implied "== 0"
[syms] == const syms are constant and should not be removed from equations
[syms] :: predicate[, predicate] define syms with assumptions. must be done before any use of the symbols
see https://docs.sympy.org/latest/guides/assumptions.html#predicates for a list
of valid predicates and their implications
[syms] -> [+-][sym] after reasoning over statements, syms will be treated in the limit towards sym
:param descr: system definition acording to the syntax above
:return: self for chaining
"""
for sl in descr.splitlines():
sl = sl.strip()
try:
sl = sl[:sl.index("#")].rstrip()
except ValueError:
pass
if not sl:
continue
args = []
def try_match(pat) -> bool:
nonlocal args
args.clear()
m = re.fullmatch("^" + pat + "$", sl, re.IGNORECASE | re.DOTALL)
if m is None:
return False
for g in m.groups():
if g is not None:
args.append(g.strip())
return True
if try_match(r"(.*)==\s*const\s*"):
syms = symbols(args[0], seq=True)
self.parameters.update(syms)
elif try_match(r"(.*)==(.*)"):
lhs = self._parse_expr(args[0])
rhs = self._parse_expr(args[1])
if lhs is None or rhs is None:
raise ValueError(f"Failed to parse statement line: {sl}")
eq = Eq(lhs, rhs)
self.statements.append(eq)
elif try_match(r"(.*)->\s*([+-]?)(oo|0)\s*"):
syms = symbols(args[0], seq=True)
ndir = args[1] or "+"
# SymPy: from where is sym approached, system syntax: to where does sym tend
dir = "-" if ndir == "+" else "-"
lim = self._parse_expr(args[2])
for s in syms:
li = Limit(self.limit_symbol, s, lim, dir)
li.natural_dir = ndir
self.limits.append(li)
elif try_match(r"(.*)::\s*(!?\w+)(?:\s*[, ]\s*(!?\w+))*\s*"):
assum = {}
for a in args[1:]:
val = not a.startswith("!")
if not val:
a = a[1:]
if a not in _assume_defined:
raise ValueError(f"Invalid assumption predicate: {a}")
assum[a] = val
syms = symbols(args[0], seq=True, **assum)
self.assumptions.update(syms)
else:
# parse expressions while carrying all already known symbols
lhs = self._parse_expr(sl)
if lhs is None:
raise ValueError(f"Failed to parse statement line: {sl}")
eq = Eq(lhs, 0)
self.statements.append(eq)
return self
def extend(self, other: 'SymbolicSystem'):
self.statements.extend(st for st in other.statements if st not in self.statements)
self.parameters.update(other.parameters)
for lim in other.limits:
if lim in self.limits:
continue
if any((l.args[1] == lim.args[1]) and not (l.args == lim.args) for l in self.limits):
raise ValueError("Trying to merge systems with conflicting limit definitions")
self.limits.append(lim)
for assu in other.assumptions:
if assu in self.assumptions:
continue
if any(s.name == assu.name for s in self.assumptions):
raise ValueError("Trying to merge systems with conflicting assumptions")
self.assumptions.add(assu)
def __add__(self, other: 'SymbolicSystem') -> 'SymbolicSystem':
res = SymbolicSystem()
res.extend(self)
res.extend(other)
return res
def focus(self, *goals, evaluate=True) -> Union[ReplacementRules, ExpressionMap]:
s = self.symboldict
goals = set(s[g] if isinstance(g, str) else g for g in goals) & self.symbols - self.parameters
replacements: ReplacementRules = []
if goals:
eqs = self.statements.copy()
syms = goals.copy()
# first run to find one expression for each goal
seek(eqs, syms, replacements, recursive=False, verbose=self.debug)
if len(syms) or len(replacements) < len(goals):
raise ValueError(f"Failed to locate all goals: {syms}")
# | |
"""
Class and function definitions for PieceWise Linear (PWL) representations
"""
import copy
import operator
from xml.dom.minidom import Document,Node
from probability import Distribution
from action import Action
CONSTANT = ''
class KeyedVector(dict):
"""
Class for a compact, string-indexable vector
@cvar epsilon: the margin used for equality of vectors (as well as for testing hyperplanes in L{KeyedPlane})
@type epsilon: float
@ivar _string: the C{str} representation of this vector
@type _string: bool
"""
epsilon = 1e-8
def __init__(self,arg={}):
if isinstance(arg,Node):
dict.__init__(self)
self.parse(arg)
else:
dict.__init__(self,arg)
self._string = None
def __eq__(self,other):
delta = 0.
tested = {}
for key,value in self.items():
try:
delta += abs(value-other[key])
except KeyError:
delta += abs(value)
tested[key] = True
for key,value in other.items():
if not tested.has_key(key):
delta += abs(value)
return delta < self.epsilon
def __ne__(self,other):
return not self == other
def __add__(self,other):
result = KeyedVector(self)
for key,value in other.items():
try:
result[key] += value
except KeyError:
result[key] = value
return result
def __neg__(self):
result = KeyedVector()
for key,value in self.items():
result[key] = -value
return result
def __sub__(self,other):
return self + (-other)
def __mul__(self,other):
if isinstance(other,KeyedVector):
# Dot product
total = 0.
for key,value in self.items():
if other.has_key(key):
total += value*other[key]
return total
elif isinstance(other,float):
# Scaling
result = KeyedVector()
for key,value in self.items():
result[key] = value*other
return result
elif isinstance(other,KeyedMatrix):
# Transform vector
result = KeyedVector()
for key in self.keys():
if other.has_key(key):
for col in other[key].keys():
try:
result[col] += self[key]*other[key][col]
except KeyError:
result[col] = self[key]*other[key][col]
return result
else:
raise TypeError,'Unable to multiply %s by %s' % \
(self.__class__.__name__,other.__class__.__name__)
def __setitem__(self,key,value):
self._string = None
dict.__setitem__(self,key,value)
def __delitem__(self,key):
self._string = None
dict.__delitem__(self,key)
def desymbolize(self,table,debug=False):
result = self.__class__()
for key,value in self.items():
if isinstance(value,str):
try:
result[key] = eval(value,globals(),table)
except NameError:
# Undefined reference: assume it'll get sorted out later
result[key] = value
else:
result[key] = value
return result
def filter(self,ignore):
"""
@return: a copy of me applying the given lambda expression to the keys (if a list is provided, then any keys in that list are dropped out)
@rtype: L{KeyedVector}
"""
if isinstance(ignore,list):
test = lambda k: not k in ignore
else:
test = ignore
result = self.__class__()
for key in filter(test,self.keys()):
result[key] = self[key]
return result
def nearestNeighbor(self,vectors):
"""
@return: the vector in the given set that is closest to me
@rtype: L{KeyedVector}
"""
bestVector = None
bestValue = None
for vector in vectors:
d = self.distance(vector)
if bestVector is None or d < bestValue:
bestValue = d
bestVector = vector
return bestVector
def distance(self,vector):
"""
@return: the distance between the given vector and myself
@rtype: float
"""
d = 0.
for key in self.keys():
d += pow(self[key]-vector[key],2)
return d
def __str__(self):
if self._string is None:
keys = self.keys()
keys.sort()
self._string = '\n'.join(map(lambda k: '%s: %s' % (k,self[k]),keys))
return self._string
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__,dict(self))
def __hash__(self):
return hash(str(self))
def __xml__(self):
doc = Document()
root = doc.createElement('vector')
for key,value in self.items():
node = doc.createElement('entry')
node.setAttribute('key',key)
node.setAttribute('value',str(value))
root.appendChild(node)
doc.appendChild(root)
return doc
def parse(self,element):
self._string = None
node = element.firstChild
while node:
if node.nodeType == node.ELEMENT_NODE:
assert node.tagName == 'entry'
key = str(node.getAttribute('key'))
try:
value = float(node.getAttribute('value'))
except ValueError:
value = str(node.getAttribute('value'))
dict.__setitem__(self,key,value)
node = node.nextSibling
class VectorDistribution(Distribution):
"""
A class representing a L{Distribution} over L{KeyedVector} instances
"""
def join(self,key,value):
"""
Modifies the distribution over vectors to have the given value for the given key
@param key: the key to the column to modify
@type key: str
@param value: either a single value to apply to all vectors, or else a L{Distribution} over possible values
"""
original = dict(self)
domain = self.domain()
self.clear()
for row in domain:
prob = original[str(row)]
if isinstance(value,Distribution):
for element in value.domain():
new = row.__class__(row)
new[key] = element
self.addProb(new,prob*value[element])
else:
row[key] = value
self[row] = prob
def merge(self,other):
"""
Merge two distributions (the passed-in distribution takes precedence over this one in case of conflict)
@type other: L{VectorDistribution}
@return: the merged distribution
@rtype: L{VectorDistribution}
"""
result = {}
for diff in other.domain():
for old in self.domain():
new = old.__class__(old)
new.update(diff)
result[new] = self[old]*other[diff]
return self.__class__(result)
def element2xml(self,value):
return value.__xml__().documentElement
def xml2element(self,key,node):
return KeyedVector(node)
def marginal(self,key):
result = {}
for row in self.domain():
try:
result[row[key]] += self[row]
except KeyError:
result[row[key]] = self[row]
return Distribution(result)
def select(self,incremental=False):
"""
@param incremental: if C{True}, then select each key value in series (rather than picking out a joint vector all at once, default is C{False})
"""
if incremental:
# Sample each key and keep track how likely each individual choice was
sample = KeyedVector()
keys = self.domain()[0].keys()
index = 0
while len(self) > 1:
key = keys[index]
dist = self.marginal(key)
if len(dist) > 1:
# Have to make a choice here
element,sample[key] = dist.sample(True)
# Figure out where the "spinner" ended up across entire pie chart
for other in dist.domain():
if other == element:
break
else:
sample[key] += dist[other]
for vector in self.domain():
if vector[key] != element:
del self[vector]
self.normalize()
index += 1
return sample
else:
Distribution.select(self)
def hasColumn(self,key):
"""
@return: C{True} iff the given key appears in all of the vectors of this distribution
@rtype: bool
"""
for vector in self.domain():
if not vector.has_key(key):
return False
return True
def __deepcopy__(self,memo):
result = self.__class__()
for vector in self.domain():
try:
new = memo[id(vector)]
except KeyError:
new = KeyedVector(vector)
memo[id(vector)] = new
result[new] = self[vector]
return result
class KeyedMatrix(dict):
def __init__(self,arg={}):
self._keysIn = None
self._keysOut = None
if isinstance(arg,Node):
dict.__init__(self)
self.parse(arg)
else:
dict.__init__(self,arg)
self._string = None
def __eq__(self,other):
return str(self) == str(other)
# for key,vector in self.items():
# try:
# if vector != other[key]:
# return False
# except KeyError:
# if vector != {}:
# return False
# else:
# return True
def __ne__(self,other):
return not self == other
def __neg__(self):
result = KeyedMatrix()
for key,vector in self.items():
result[key] = -vector
return result
def __add__(self,other):
result = KeyedMatrix()
for key,vector in self.items():
try:
result[key] = vector + other[key]
except KeyError:
result[key] = KeyedVector(vector)
for key,vector in other.items():
if not result.has_key(key):
result[key] = KeyedVector(vector)
return result
def __sub__(self,other):
return self + (-other)
def __mul__(self,other):
if isinstance(other,KeyedMatrix):
result = KeyedMatrix()
for r1,v1 in self.items():
result[r1] = KeyedVector()
for c1,value1 in v1.items():
if other.has_key(c1):
for c2,value2 in other[c1].items():
try:
result[r1][c2] += value1*value2
except KeyError:
result[r1][c2] = value1*value2
elif isinstance(other,KeyedVector):
result = KeyedVector()
for r1,v1 in self.items():
for c1,value1 in v1.items():
if other.has_key(c1):
try:
result[r1] += value1*other[c1]
except KeyError:
result[r1] = value1*other[c1]
elif isinstance(other,VectorDistribution):
result = VectorDistribution()
for vector in other.domain():
product = self*vector
try:
result[product] += other[vector]
except KeyError:
result[product] = other[vector]
else:
raise TypeError,'Unable to multiply %s by %s' % \
(self.__class__.__name__,other.__class__.__name__)
return result
def getKeysIn(self):
"""
@return: a set of keys which affect the result of multiplying by this matrix
"""
if self._keysIn is None:
self._keysIn = set()
self._keysOut = set()
for col,row in self.items():
self._keysIn |= set(row.keys())
self._keysOut.add(col)
return self._keysIn
def getKeysOut(self):
"""
@return: a set of keys which are changed as a result of multiplying by this matrix
"""
if self._keysOut is None:
self.getKeysIn()
return self._keysOut
# def getKeys(self):
# result = set()
# for row in self.values():
# result |= set(row.keys())
# return result
def desymbolize(self,table,debug=False):
result = self.__class__()
for key,row in self.items():
result[key] = row.desymbolize(table)
return result
def scale(self,table):
result = self.__class__()
for row,vector in self.items():
if table.has_key(row):
result[row] = KeyedVector()
lo,hi = table[row]
constant = 0.
for col,value in vector.items():
if col == row:
# Same value
result[row][col] = value
constant += value*lo
elif col != CONSTANT:
# Scale weight for another feature
if abs(value) > vector.epsilon:
assert table.has_key(col),'Unable to mix symbolic and numeric values in single vector'
colLo,colHi = table[col]
result[row][col] = value*(colHi-colLo)*(hi-lo)
constant += value*colLo
result[row][CONSTANT] = constant - lo
if vector.has_key(CONSTANT):
result[row][CONSTANT] += vector[CONSTANT]
result[row][CONSTANT] /- (hi-lo)
else:
result[row] = KeyedVector(vector)
return result
def __setitem__(self,key,value):
assert isinstance(value,KeyedVector),'Illegal row type: %s' % \
(value.__class__.__name__)
self._string = None
dict.__setitem__(self,key,value)
def update(self,other):
self._string = None
dict.update(self,other)
def __str__(self):
if self._string is None:
joiner = lambda item: '%s*%s' % (item[1],item[0])
| |
<reponame>unitedstates/inspectors-general
#!/usr/bin/env python
import datetime
import itertools
import logging
import os
import re
from urllib.parse import urljoin, urlparse, urlunparse, urldefrag
from utils import utils, inspector, admin
# http://oig.hhs.gov/reports-and-publications/index.asp
archive = 1985
# options:
# standard since/year options for a year range to fetch from.
#
# topics - limit reports fetched to one or more topics, comma-separated, which
# correspond to the topics defined on the site. For example:
# 'OAS,OE'
# Defaults to all topics.
#
# OAS - Office of Audit Services
# OE - Office of Evaluation and Inspections
# HCF - Health Care Fraud and Abuse Control Program Report
# SAR - Semiannual Reports to Congress
# MIR - Medicaid Integrity Reports
# TMPC - Top Management & Performance Challenges
# CPR - Compendium of Priority Recommendations
# SP - Strategic Plan
# WP - Work Plan
# POR - Portfolio and Other Reports
# FOIA - Freedom of Information Act (FOIA)
# FRN - Federal Register Notices
# RA - Regulatory Authorities
# B - OIG Budget
# RAOR - Recovery Act Oversight Reports
# RAA - Recovery Act-related Audit and Inspection Reports
# Notes for IG's web team:
# - A large number of reports don't list a date when they were published.
# See "Adverse Events in Hospitals: Medicare's Responses to Alleged Serious
# Events"(OEI-01-08-00590) referenced at
# http://oig.hhs.gov/reports-and-publications/oei/a.asp
# As a fallback, this scraper uses the HTTP Last-Modified header for reports
# published after 2002. For reports published before 2002, we use the report id
# month-year as an approximation of the published date.
# - Fix published date for http://oig.hhs.gov/oas/reports/region3/31200010.asp
# on http://oig.hhs.gov/reports-and-publications/oas/cms.asp. It currently
# says 08-03-2102
# - Fix published date for http://oig.hhs.gov/oei/reports/oei-06-98-00321.pdf
# on http://oig.hhs.gov/reports-and-publications/oei/s.asp. It currently
# says Dec 2028.
# - Fix published date for http://oig.hhs.gov/oas/reports/region7/70903133.asp
# It currently says 14-21-2010.
# - Fix published date for http://oig.hhs.gov/oas/reports/region3/31300031.asp
# It currently says 03-05-2015.
# - Fix published date for http://oig.hhs.gov/oas/reports/region9/91102005.asp
# It currently says 04-23-3012.
# - Add missing report for 'Use of Discounted Airfares by the Office of the Secretary' (A-03-07-00500)
# linked to from http://oig.hhs.gov/reports-and-publications/oas/dept.asp
# - The report http://oig.hhs.gov/oas/reports/region5/50800067.asp returns a 500.
# - The link to the report for "Personnel Suitability and Security (OAI-02-86-00079; 11/87)"
# points to a copy of the report OAI-07-86-00079.
# - The link to the report for "Errors Resulting in Overpayment In the AFDC Program (OAI-04-86-0024; 06/87)"
# points to a copy of the report OEI-05-90-00720.
# - The date for OEI-07-91-01470 is incorrectly listed as 4/94 on the S page,
# and correctly listed as 4/92 on the O page.
# - There is a typo in one of the links on http://oig.hhs.gov/reports-and-publications/oei/h.asp,
# it should point to #hospitals, not #hospiatls.
TOPIC_TO_URL = {
"OAS": 'http://oig.hhs.gov/reports-and-publications/oas/index.asp',
"OE": 'http://oig.hhs.gov/reports-and-publications/oei/subject_index.asp',
"HCF": 'http://oig.hhs.gov/reports-and-publications/hcfac/index.asp',
"SAR": 'http://oig.hhs.gov/reports-and-publications/semiannual/index.asp',
"MIR": 'http://oig.hhs.gov/reports-and-publications/medicaid-integrity/index.asp',
"TMPC": 'http://oig.hhs.gov/reports-and-publications/top-challenges/2015/',
"CPR": 'http://oig.hhs.gov/reports-and-publications/compendium/index.asp',
"SP": 'http://oig.hhs.gov/reports-and-publications/strategic-plan/index.asp',
"WP": 'http://oig.hhs.gov/reports-and-publications/workplan/index.asp',
"POR": 'http://oig.hhs.gov/reports-and-publications/portfolio/index.asp',
"FOIA": 'http://oig.hhs.gov/reports-and-publications/foia/index.asp',
"FRN": 'http://oig.hhs.gov/reports-and-publications/federal-register-notices/index.asp',
"RA": 'http://oig.hhs.gov/reports-and-publications/regulatory-authorities/index.asp',
"B": 'http://oig.hhs.gov/reports-and-publications/budget/index.asp',
# "RAOR": 'http://oig.hhs.gov/reports-and-publications/recovery/index.asp',
"RAA": 'http://oig.hhs.gov/reports-and-publications/recovery/recovery_reports.asp',
}
TOPIC_TO_ARCHIVE_URL = {
"OAS": 'http://oig.hhs.gov/reports-and-publications/archives/oas/index.asp',
# Some reports missing published dates
# "SAR": 'http://oig.hhs.gov/reports-and-publications/archives/semiannual/index.asp',
# Some reports missing published dates
# "TMPC": 'http://oig.hhs.gov/reports-and-publications/archives/top-challenges/index.asp',
# No published dates
# "CPR": 'http://oig.hhs.gov/reports-and-publications/archives/compendium/redbook.asp',
# Some reports missing published dates
# "WP": 'http://oig.hhs.gov/reports-and-publications/archives/workplan/index.asp',
"FRN": 'http://oig.hhs.gov/reports-and-publications/archives/federal-register-notices/index.asp',
"B": 'http://oig.hhs.gov/reports-and-publications/archives/budget/index.asp',
}
TOPIC_NAMES = {
"OAS": 'Office of Audit Services',
"OE": 'Office of Evaluation and Inspections',
"HCF": 'Health Care Fraud and Abuse Control Program Report ',
"SAR": 'Semiannual Reports to Congress',
"MIR": 'Medicaid Integrity Reports',
"TMPC": 'Top Management & Performance Challenges',
"CPR": 'Compendium of Priority Recommendations',
"SP": 'Strategic Plan',
"WP": 'Work Plan',
"POR": 'Portfolio and Other Reports',
"FOIA": 'Freedom of Information Act (FOIA)',
"FRN": 'Federal Register Notices',
"RA": 'Regulatory Authorities',
"B": 'OIG Budget',
"RAOR": 'Recovery Act Oversight Reports',
"RAA": 'Recovery Act-related Audit and Inspection Reports',
}
TOPIC_WITH_SUBTOPICS = ['OAS', 'OE']
REPORT_URL_MAPPING = {
"http://oig.hhs.gov/reports-and-publications/medicaid-integrity/2011/": "http://oig.hhs.gov/reports-and-publications/medicaid-integrity/2011/medicaid_integrity_reportFY11.pdf",
"http://oig.hhs.gov/reports-and-publications/compendium/2011.asp": "http://oig.hhs.gov/publications/docs/compendium/2011/CMP-March2011-Final.pdf",
"http://oig.hhs.gov/reports-and-publications/oas/reports/region3/30700500.htm": "http://oig.hhs.gov/oas/reports/region3/30700500.htm",
}
REPORT_PUBLISHED_MAPPING = {
"31200010": datetime.datetime(2012, 8, 3),
"OIG-Strategic-Plan-2014-2018": datetime.datetime(2014, 1, 1),
"CMP-March2011-Final": datetime.datetime(2011, 3, 1),
"hcfacreport2004": datetime.datetime(2005, 9, 1),
# This is a published draft for next year, date taken from PDF metadata
'FY2015_HHSOIG_Congressional_Justification': datetime.datetime(2014, 3, 4),
# This has an incorrect datetime (2028)
'oei-06-98-00321': datetime.datetime(2000, 12, 1),
# This has an incorrect datetime (14-21-2010)
'70903133': datetime.datetime(2010, 4, 21),
# This has an incorrect datetime (03-05-2015)
'31300031': datetime.datetime(2014, 3, 1),
# This has an incorrect datetime (04-23-3012)
'91102005': datetime.datetime(2012, 4, 23),
# See OEI_COMBINED_LANDING_PAGES below, we are skipping parsing the landing
# pages for these reports for now
'oei-09-08-00580': datetime.datetime(2011, 9, 30),
'oei-09-08-00581': datetime.datetime(2011, 9, 30),
'oei-05-09-00560': datetime.datetime(2011, 8, 29),
'oei-05-09-00561': datetime.datetime(2011, 8, 29),
# This has the right date in one place and the wrong date in another
'oei-07-91-01470': datetime.datetime(1992, 4, 1),
"41206159": datetime.datetime(2013, 12, 4),
}
# This manually entered data is used to skip landing pages that hold more than
# one report. We use the correct PDF link for each, which makes deduplication
# easier.
OEI_COMBINED_LANDING_PAGES = {
"http://oig.hhs.gov/oei/reports/oei-09-08-00580-00581.asp": {
"Access to Mental Health Services at Indian Health Service and Tribal Facilities": "http://oig.hhs.gov/oei/reports/oei-09-08-00580.pdf",
"Access to Kidney Dialysis Services at Indian Health Service and Tribal Facilities": "http://oig.hhs.gov/oei/reports/oei-09-08-00581.pdf"
},
"http://oig.hhs.gov/oei/reports/oei-05-09-00560-00561.asp": {
"Miami Independent Diagnostic Testing Facilities' Compliance with Medicare Standards": "http://oig.hhs.gov/oei/reports/oei-05-09-00560.pdf",
"Los Angeles Independent Diagnostic Testing Facilities' Compliance with Medicare Standards": "http://oig.hhs.gov/oei/reports/oei-05-09-00561.pdf"
},
}
BLACKLIST_TITLES = [
'Return to Reports and Publications',
'Read the Summary',
'Back to Archives',
'Top',
]
# These are links that appear like reports, but are not.
BLACKLIST_REPORT_URLS = [
'http://get.adobe.com/reader/',
# See note to IG web team
'http://oig.hhs.gov/reports/region3/30700500.htm',
'http://oig.hhs.gov/oas/reports/region5/50800067.asp',
# press release, format is inconsistent with everything else
'http://oig.hhs.gov/newsroom/news-releases/2014/sar14fall.asp',
# Duplicate report, uploaded in two regions
'http://oig.hhs.gov/oas/reports/region1/100300001.htm',
# Summary maps for report series
'http://oig.hhs.gov/oas/jurisdiction-map/',
'http://oig.hhs.gov/oas/map/',
'http://oig.hhs.gov/oei/maps/ccdf',
'http://oig.hhs.gov/oei/maps/nursing-home/',
# Landing pages
'http://oig.hhs.gov/fraud/',
'http://oig.hhs.gov/compliance/',
'http://oig.hhs.gov/exclusions/index.asp',
]
TITLE_NORMALIZATION = {
"ReportingAbuses of Persons with Disabilities":
"Reporting Abuses of Persons with Disabilities",
"Officeof Inspector General's Partnership Plan - New York StateComptroller Report on Controlling Medicaid Paymentsfor School and Preschool Supportive Health Services":
"Office of Inspector General's Partnership Plan - New York State Comptroller Report on Controlling Medicaid Payments for School and Preschool Supportive Health Services",
"OIG Partnership Plan: Drug Delivery System for Montana's Medicaid Program":
"Office of Inspector General's Partnership Plan: Drug Delivery System for Montana's Medicaid Program",
"Partnership Audit of Medicaid Paymentsfor Oxygen Related Durable Medical Equipment and Supplies - January 1, 1998 through December 31, 2000 Kentucky Department for Medicaid Services, Frankfort, Kentucky":
"Partnership Audit of Medicaid Payments for Oxygen Related Durable Medical Equipment and Supplies - January 1, 1998 through December 31, 2000 Kentucky Department for Medicaid Services, Frankfort, Kentucky",
"Partnership Audit of Medicaid Paymentsfor Oxygen Related Durable Medical Equipment and Supplies - January 1, 1998 through December31, 2000 Kentucky Department for Medicaid Services, Frankfort, Kentucky":
"Partnership Audit of Medicaid Payments for Oxygen Related Durable Medical Equipment and Supplies - January 1, 1998 through December 31, 2000 Kentucky Department for Medicaid Services, Frankfort, Kentucky",
"OIG Partnership Plan: Medicaid Payments for Clinical Laboratory Tests in Eight States":
"Office of Inspector General's Partnership Plan: Medicaid Payments for Clinical Laboratory Tests in Eight States",
"OIG Partnership Plan: Montana Legislative Auditor's Office Report on Medicaid Expenditures for Durable Medical Equipment":
"Office of Inspector General's Partnership Plan: Montana Legislative Auditor's Office Report on Medicaid Expenditures for Durable Medical Equipment",
"OIG Partnership Plan: Transportation Services for Montana's Medicaid Program":
"Office of Inspector General's Partnership Plan: Transportation Services for Montana's Medicaid Program",
"Office of Inspector General's Partnership Plan-State of Montana's Medicaid Third Party Liability Program":
"Office of Inspector General's Partnership Plan - State of Montana's Medicaid Third Party Liability Program",
"Review of the Food and Drug Administration's Processing of a new Drug Application for Therafectin":
"Review of the Food and Drug Administration's Processing of a New Drug Application for Therafectin",
"OIG Partnership Plan: Medicaid Payments for Clinical Laboratory Tests in 14 States":
"Office of Inspector General's Partnership Plan: Medicaid Payments for Clinical Laboratory Tests in 14 States",
"OIG Partnership Plan: Review of the North Carolina Division of Medical Assistance's Reimbursement for Clinical Laboratory Services Under the Medicaid Program":
"Office of Inspector General's Partnership Plan - Review of the North Carolina Division of Medical Assistance's Reimbursement for Clinical Laboratory Services Under the Medicaid Program",
"Officeof Inspector General's Partnership Efforts - Texas StateAuditor's Office Report on the Department of Protectiveand Regulatory Services' Administration of Foster CareContracts":
"Office of Inspector General's Partnership Efforts - Texas State Auditor's | |
<gh_stars>1-10
""" Destination.
Do not edit this file by hand.
This is generated by parsing api.html service doc.
"""
from ambra_sdk.exceptions.service import CanNotTrack
from ambra_sdk.exceptions.service import DupAetitle
from ambra_sdk.exceptions.service import FilterNotFound
from ambra_sdk.exceptions.service import InsufficientCriteria
from ambra_sdk.exceptions.service import InvalidCdBurnInfo
from ambra_sdk.exceptions.service import InvalidCondition
from ambra_sdk.exceptions.service import InvalidDistributedDestination
from ambra_sdk.exceptions.service import InvalidField
from ambra_sdk.exceptions.service import InvalidFieldName
from ambra_sdk.exceptions.service import InvalidFlag
from ambra_sdk.exceptions.service import InvalidGatewayType
from ambra_sdk.exceptions.service import InvalidInteger
from ambra_sdk.exceptions.service import InvalidNodeType
from ambra_sdk.exceptions.service import InvalidRegexp
from ambra_sdk.exceptions.service import InvalidSchedule
from ambra_sdk.exceptions.service import InvalidSortField
from ambra_sdk.exceptions.service import InvalidSortOrder
from ambra_sdk.exceptions.service import InvalidType
from ambra_sdk.exceptions.service import InvalidValue
from ambra_sdk.exceptions.service import MissingFields
from ambra_sdk.exceptions.service import NodeNotFound
from ambra_sdk.exceptions.service import NotFound
from ambra_sdk.exceptions.service import NotPermitted
from ambra_sdk.exceptions.service import NotSupported
from ambra_sdk.exceptions.service import NotSysadmin
from ambra_sdk.service.query import QueryO
from ambra_sdk.service.query import AsyncQueryO
from ambra_sdk.service.query import QueryOPSF
from ambra_sdk.service.query import AsyncQueryOPSF
class Destination:
"""Destination."""
def __init__(self, api):
self._api = api
def list(
self,
account_id,
uuid,
node_id=None,
serial_no=None,
):
"""List.
:param account_id: uuid of the account
:param uuid: uuid of the destination
:param node_id: node_id
:param serial_no: serial_no
"""
request_data = {
'account_id': account_id,
'node_id': node_id,
'serial_no': serial_no,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('FILTER_NOT_FOUND', None)] = FilterNotFound('The filter can not be found. The error_subtype will hold the filter UUID')
errors_mapping[('INVALID_CONDITION', None)] = InvalidCondition('The condition is not support. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_FIELD', None)] = InvalidField('The field is not valid for this object. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_SORT_FIELD', None)] = InvalidSortField('The field is not valid for this object. The error_subtype will hold the field name this applies to')
errors_mapping[('INVALID_SORT_ORDER', None)] = InvalidSortOrder('The sort order for the field is invalid. The error_subtype will hold the field name this applies to')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The account can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to view this list')
query_data = {
'api': self._api,
'url': '/destination/list',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
query_data['paginated_field'] = 'destinations'
return QueryOPSF(**query_data)
def add(
self,
account_id,
address,
aetitle,
distributed_destinations,
linked_destination,
linked_qr_activity_in_referred_account,
linked_qr_to_referred_account,
name,
node_id,
path,
port,
c_echo_interval=None,
c_echo_schedule=None,
can_mwl_search=None,
can_push_hl7=None,
can_query_retrieve=None,
can_retrieve_thin=None,
can_search=None,
cd_burn_info=None,
cd_burn_name=None,
cd_burn_priority=None,
default_query_retrieve_level=None,
fire_webhooks=None,
gateway_settings=None,
hl7_address=None,
hl7_fetch_filter=None,
hl7_port=None,
manual_push_roles=None,
push_related_studies=None,
sort_order=None,
sqlch_psh_if_img_unchg=None,
sqlch_psh_if_route_hl7=None,
type=None,
ui_json=None,
):
"""Add.
:param account_id: uuid of the account
:param address: Address of the destination (required if DICOM type)
:param aetitle: Aetitle of the destination (required if DICOM type)
:param distributed_destinations: A JSON array of destination ids. This list will be used to process requests in round robin manner. Meaningful for DISTRIBUTING destination type only (opional)
:param linked_destination: uuid of the destination for LINKED destinations
:param linked_qr_activity_in_referred_account: A flag to create DESTINATION_SEARCH activities in the linked destination's account. Meaningful for LINKED destinations only (opional)
:param linked_qr_to_referred_account: A flag to create resultant studies in the linked destination's account (not the account of LINKED destination where the search was initiated). Meaningful for LINKED destinations only (opional)
:param name: Name of the destination
:param node_id: uuid of the node that handles the destination
:param path: Path of the folder for a FOLDER type of destination (required if FOLDER type)
:param port: Port of the destination (required if DICOM type)
:param c_echo_interval: Interval in seconds to C echo the destination (optional)
:param c_echo_schedule: C echo schedule (optional)
:param can_mwl_search: Can this destination support searching a modality work list (optional)
:param can_push_hl7: Can this destination support pushong Hl7 messages (optional)
:param can_query_retrieve: Can this destination support query retrieve from HL7 messages (optional)
:param can_retrieve_thin: Can this destination support retrieving thin studies (optional)
:param can_search: Can this destination support searching (optional)
:param cd_burn_info: A JSON hash with the CD burning information (optional)
:param cd_burn_name: Name for the CD burner software (optional)
:param cd_burn_priority: Integer value for the burner priority (optional)
:param default_query_retrieve_level: Default query retrieve level this can be either (study|series|image) and defaults to study if not specified (optional)
:param fire_webhooks: Fire webhooks for events associated with this destination (optional)
:param gateway_settings: Gateway settings (optional)
:param hl7_address: Address of an attached HL7 destination (optional except for VIRTUAL destinations)
:param hl7_fetch_filter: A transform condition expression (see /transform/add for format) to match against the HL7 message. Only fire a query retrieve if the message matches the condition (optional)
:param hl7_port: Port of an attached HL7 destination (optional except for VIRTUAL destinations)
:param manual_push_roles: A comma separated list of role uuids, a user is required to have one of them to manually push to this destination (optional)
:param push_related_studies: Push all the related studies (same MRN/patientid) in the namespace when a study is pushed (optional)
:param sort_order: Integer value for sorting (optional)
:param sqlch_psh_if_img_unchg: Squelch pushes to the destination if the image count has not changed and the push is by a routing rule (optional)
:param sqlch_psh_if_route_hl7: Squelch pushes to the destination if the push was generated by HL7 triggered routing (optional)
:param type: Type of the destination either DICOM, FOLDER, ACCELERATOR,VIRTUAL, BURNER, XDS, LINKED, DISTRIBUTING or UPLOADER. Defaults to DICOM (optional)
:param ui_json: JSON for UI settings (optional)
"""
request_data = {
'account_id': account_id,
'address': address,
'aetitle': aetitle,
'c_echo_interval': c_echo_interval,
'c_echo_schedule': c_echo_schedule,
'can_mwl_search': can_mwl_search,
'can_push_hl7': can_push_hl7,
'can_query_retrieve': can_query_retrieve,
'can_retrieve_thin': can_retrieve_thin,
'can_search': can_search,
'cd_burn_info': cd_burn_info,
'cd_burn_name': cd_burn_name,
'cd_burn_priority': cd_burn_priority,
'default_query_retrieve_level': default_query_retrieve_level,
'distributed_destinations': distributed_destinations,
'fire_webhooks': fire_webhooks,
'gateway_settings': gateway_settings,
'hl7_address': hl7_address,
'hl7_fetch_filter': hl7_fetch_filter,
'hl7_port': hl7_port,
'linked_destination': linked_destination,
'linked_qr_activity_in_referred_account': linked_qr_activity_in_referred_account,
'linked_qr_to_referred_account': linked_qr_to_referred_account,
'manual_push_roles': manual_push_roles,
'name': name,
'node_id': node_id,
'path': path,
'port': port,
'push_related_studies': push_related_studies,
'sort_order': sort_order,
'sqlch_psh_if_img_unchg': sqlch_psh_if_img_unchg,
'sqlch_psh_if_route_hl7': sqlch_psh_if_route_hl7,
'type': type,
'ui_json': ui_json,
}
errors_mapping = {}
errors_mapping[('DUP_AETITLE', None)] = DupAetitle('Duplicate aetitle. All destinations for the same node must have a unique aetitle')
errors_mapping[('INVALID_CD_BURN_INFO', None)] = InvalidCdBurnInfo('Invalid cd_burn_info. The error_subtype holds more detail')
errors_mapping[('INVALID_DISTRIBUTED_DESTINATION', None)] = InvalidDistributedDestination('distributed_destinations configuration is invalid')
errors_mapping[('INVALID_FLAG', None)] = InvalidFlag('An invalid flag was passed. The error_subtype holds the name of the invalid flag')
errors_mapping[('INVALID_GATEWAY_TYPE', None)] = InvalidGatewayType('The type is wrong for the gateway it is getting attached to')
errors_mapping[('INVALID_INTEGER', None)] = InvalidInteger('An invalid integer was passed. The error_subtype holds the name of the invalid integer')
errors_mapping[('INVALID_NODE_TYPE', None)] = InvalidNodeType('The node is not a harvester')
errors_mapping[('INVALID_NODE_TYPE', None)] = InvalidNodeType('The node type is invalid for this type of destination')
errors_mapping[('INVALID_SCHEDULE', None)] = InvalidSchedule('The schedule is invalid. The error_subtype holds the error detail')
errors_mapping[('INVALID_TYPE', None)] = InvalidType('An invalid type was passed')
errors_mapping[('INVALID_VALUE', None)] = InvalidValue('An invalid value was passed. The error_subtype holds the value')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NODE_NOT_FOUND', None)] = NodeNotFound('The node can not be found')
errors_mapping[('NOT_FOUND', None)] = NotFound('The account can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to add a destination to this account')
errors_mapping[('NOT_SYSADMIN', None)] = NotSysadmin('The user is not a sysadmin user')
query_data = {
'api': self._api,
'url': '/destination/add',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def set(
self,
distributed_destinations,
linked_qr_activity_in_referred_account,
linked_qr_to_referred_account,
uuid,
address=None,
aetitle=None,
c_echo_interval=None,
c_echo_schedule=None,
can_mwl_search=None,
can_push_hl7=None,
can_query_retrieve=None,
can_retrieve_thin=None,
can_search=None,
cd_burn_info=None,
cd_burn_name=None,
cd_burn_priority=None,
default_query_retrieve_level=None,
fire_webhooks=None,
gateway_settings=None,
hl7_address=None,
hl7_fetch_filter=None,
hl7_port=None,
manual_push_roles=None,
name=None,
node_id=None,
path=None,
port=None,
push_related_studies=None,
sort_order=None,
sqlch_psh_if_img_unchg=None,
sqlch_psh_if_route_hl7=None,
ui_json=None,
):
"""Set.
:param distributed_destinations: A JSON array of destination ids. This list will be used to process requests in round robin manner. Meaningful for DISTRIBUTING destination type only (opional)
:param linked_qr_activity_in_referred_account: A flag to create DESTINATION_SEARCH activities in the linked destination's account. Meaningful for LINKED destinations only (opional)
:param linked_qr_to_referred_account: A flag to create resultant studies in the linked destination's account (not the account of LINKED destination where the search was initiated). Meaningful for LINKED destinations only (opional)
:param uuid: uuid of the destination
:param address: Address of the destination (optional)
:param aetitle: Aetitle of the destination (optional)
:param c_echo_interval: Interval in seconds to C echo the destination (optional)
:param c_echo_schedule: C echo schedule (optional)
:param can_mwl_search: Can this destination support searching a modality work list (optional)
:param can_push_hl7: Can this destination support pushong Hl7 | |
#!/usr/bin/env python3
import re
import os
import sys
import json
import yaml
import glob
import shutil
import random
import logging
from pathlib import Path
logger = logging.getLogger('apiLogger')
path_to_src = Path(__file__, '../..').resolve()
path_to_data = path_to_src.joinpath('../data').resolve()
def find_path_to_cfg(func):
def wrapper(*a, **kw):
config = kw.pop('config', None)
# If config was not specified in kwargs
if config is None:
# Then it must be declared in args
if len(a) == 0:
logger.critical('YOU NEED TO PASS THE NAME OF THE CONFIG FILE!!!')
raise Exception('You must pass the name of the config to a function with this decerator.')
else:
config = a[-1]
a = a[:-1]
path_to_cfg_file = path_to_src.joinpath('configs', config)
return func(*a, config=path_to_cfg_file, **kw)
return wrapper
@find_path_to_cfg
def generate_code_prompt(config: str) -> tuple[list[str], list[str]]:
"""This will generate code generation prompts according to the config file passed in.
Args:
config (str): The yaml file holding the generation config.
Returns:
tuple[list[str], list[str]]: The first is the list of prompt strings to pass to the model.
The second is the output dir for each problem.
"""
with open(config) as f:
cfg = yaml.safe_load(f)
prompts, num_remaining = validate_prompts(cfg, cfg['promptFile'])
human_probs, model_probs = get_completed_problems()
available_prompts = human_probs if cfg['humanOnly'] else human_probs | model_probs
# These are the prompt we want to summarize
prompts = select_code_prompts(prompts, num_remaining, available_prompts, cfg)
# If we include original we return a new set of prompts
prompt_texts, prompts = generate_example_code(prompts, available_prompts, cfg)
return prompt_texts, prompts
@find_path_to_cfg
def generate_summary_prompt(api: str, config: str) -> tuple[str, str, str]:
"""This will generate a prompt for summarization based on the
configuration file passed in.
Args:
api (str): The api calling this function.
cfg (str): The name of the config file to use.
Returns:
tuple[str, str, str]: The full prompt, the remaining section to be
added to the output, and the path to the output directory.
"""
with open(config) as f:
cfg = yaml.safe_load(f)
human_probs, model_probs = get_completed_problems(api=api)
# Select a summary prompt that hasn't been summarized
ignore_intro = cfg.get('ignoreIntro', True)
ignore_train = cfg.get('ignoreTrain', True)
original_prompt_fname = select_summary_prompt(
human_probs | model_probs,
ignore_intro=ignore_intro,
ignore_train=ignore_train)
output_dir = save_config(original_prompt_fname, config, f'data/{api}_generated', cfg['promptFile'])
prompt_type = detect_type(original_prompt_fname)
prompt, remainder = split_prompt(original_prompt_fname, cfg['splitFile'])
priming_examples = generate_example_prompt(prompt_type, cfg, human_probs, cfg['promptFile'])
full_example = priming_examples + f'{cfg["fewShotSuffix"]}{cfg["originalPrefix"]}{prompt}\n{cfg["summaryPrefix"]}'
# Remove ending whitespace to increase accuracy
full_example = remove_ending_whitespace(full_example)
full_example = ensure_ascii(full_example)
return full_example, remainder, output_dir
def get_completed_problems(api: str = '*') -> tuple[set[str], set[str]]:
"""Get the problems that have already been summarized.
Args:
api (str, optional): Allows us to select specific model probs.
Defaults to '*' which gets all model generated problems.
Returns:
tuple(set[str], set[str]): The first set is the problems that
a human has summarized. The second set is the problems that a
model has summarized.
"""
human_probs = set(glob.glob(f'{path_to_data}/[ic]*/*'))
train_probs = set(glob.glob(f'{path_to_data}/{api}_generated/[ic]*/*'))
test_probs = set(glob.glob(f'{path_to_data}/{api}_generated/test/[ic]*/*'))
model_probs = train_probs | test_probs
logger.debug(f'Found {len(human_probs)} human generated summaries.')
logger.debug(f'Found {len(model_probs)} model generated summaries.')
return human_probs, model_probs
def select_summary_prompt(probs: set[str], ignore_intro: bool = True, ignore_train: bool = True) -> str:
"""Select a random problem to be summarized by the model.
Args:
probs (set[str]): A set of problems we have already done.
ignore_intro (bool, optional): If you want to avoid summarizing
introductory problems. Defaults to True.
ignore_train (bool, optional): If you want to avoid summarizing
problems from the training set. Defaults to True.
Returns:
str: The path to the question we are summarizing.
"""
# introductory problems in the train set are from 2361 - 4999.
# In the test set are 4000 - 4999, which count as 9000 - 9999.
available_probs = set(range(10000))
if ignore_intro:
available_probs -= set(range(9000, 10000))
available_probs -= set(range(2361, 5000))
logger.info('Ignoring introductory problems.')
if ignore_train:
available_probs -= set(range(5000))
logger.info('Ignoring training problems.')
# This is limiting the number of examples we will generate with GPT.
# There are 250 competitive and 250 interview available.
# Not counting the ones already generated w/ 3 few shot.
#available_probs -= set(range(8000, 8761))
#available_probs -= set(range(5000, 7785))
def get_num(x):
num = int(os.path.basename(x))
if 'test' in x:
num += 5000
return num
probs = set(map(get_num, probs))
available_probs -= probs
logger.debug(f'There are {len(available_probs)} remaining problems to summarize.')
if len(available_probs) == 0:
raise BaseException("There are no more problems!")
available_probs = list(available_probs)
prob_to_summarize = random.choice(available_probs)
path_to_apps = path_to_src.joinpath('../APPS').resolve()
if prob_to_summarize >= 5000:
prob_to_summarize -= 5000
fname = f'{path_to_apps}/test/*/{str(prob_to_summarize).zfill(4)}/question.txt'
else:
fname = f'{path_to_apps}/*/{str(prob_to_summarize).zfill(4)}/question.txt'
original_prompt = glob.glob(fname)[0]
return original_prompt
@find_path_to_cfg
def save_config(prompt_fname: str, cfg_fname: str, output_path: str, config: str) -> str:
"""We will copy the original files and config used for this generation.
Args:
prompt_fname (str): Name of the file we are summarizing.
cfg_fname (str): Name of the config file.
output_path (str): Prefix of the path to save to.
config (str): Name of the prompt categories config file.
Returns:
str: The path to where the example was saved.
"""
# Copy the original directory to the output directory
prompt_dir = os.path.split(prompt_fname)[0]
output_dir = prompt_dir.replace('APPS', output_path)
shutil.copytree(prompt_dir, output_dir)
logger.debug(f'Saved original prompt directory to {output_dir}')
# Save config files in output dir
shutil.copy(cfg_fname, output_dir)
shutil.copy(config, output_dir)
return output_dir
def detect_type(fname: str) -> str:
logger.info(f'Summarizing the file: {fname}')
return 'general'
@find_path_to_cfg
def split_prompt(fname: str, config: str) -> tuple[str, str]:
"""This will split the original prompt into the question and
the information section.
Args:
fname (str): The file we want to split.
config (str): The file that defines the different splits.
Returns:
tuple(str, str): The question and the information section.
"""
with open(fname) as f:
prompt = f.read().splitlines()
with open(config) as f:
re_str = f.read().splitlines()
re_str = '|'.join(re_str)
regex = re.compile(re_str)
# Only join non empty lines
prompt = [t for t in prompt if t]
prompt = ' '.join(prompt)
# Find the information section
prompt_idx = -1
match = regex.search(prompt)
if match:
prompt_idx = match.span()[0]
else:
logger.error(f'A split was not found for file {fname}')
return prompt[:prompt_idx], prompt[prompt_idx:]
@find_path_to_cfg
def generate_example_prompt(prompt_type: str, cfg: dict[str, any], probs: set[int], config: str) -> str:
"""Generating the examples that will be passed to the model before the new summary.
Args:
prompt_type (str): The determined type of the problem.
cfg (dict[str, any]): The configuration for this generation.
probs (set[int]): The human problems to choose from.
config (str): The prompt category config file.
Returns:
str: The formatted examples for this generation.
"""
# TODO: Implement the problem categories
with open(config) as f:
prompt_cfg = yaml.safe_load(f)
output = []
if cfg['header']:
output = [cfg['header']]
probs = list(probs)
# If it's a general problem choose any examples
if prompt_type == 'general':
example_prompts = random.sample(probs, cfg['numPrompts'])
for ex in example_prompts:
# Get the original prompt and the summarized
orig = os.path.join(ex, 'question.txt')
summary = os.path.join(ex, f'{cfg["summaryType"]}.txt')
logger.debug(f'Using summary {summary} for priming model')
orig, _ = split_prompt(orig, cfg['splitFile'])
summary, _ = split_prompt(summary, cfg['splitFile'])
output.append(f'{cfg["originalPrefix"]}{orig}\n{cfg["summaryPrefix"]}{summary}')
for text, num in zip(output, example_prompts):
if not check_ascii(text):
logger.warning(f'Problem {num} was not ascii.')
output = cfg["fewShotSuffix"].join(output)
return output
def remove_ending_whitespace(prompt: str) -> str:
"""Remove ending whitespace from string.
Args:
prompt (str): prompt to remove from
Returns:
str: The resulting string
"""
return prompt.rstrip()
def ensure_ascii(text: str) -> str:
"""Convert the text into ASCII.
Args:
text (str): The text to convert.
Returns:
str: The converted text.
"""
# TODO: Should we use python's ascii method?
# That method will escape any non ascii character and
# potentially confuse the model and increase token count.
text = text.encode('ascii', 'replace')
text = text.decode('ascii')
text = text.replace('?', ' ')
if not check_ascii(text):
logger.error(f'THE FINAL PROMPT IS NOT ASCII!!!')
return text
def check_ascii(text: str) -> bool:
"""Check is a string is ASCII.
Args:
text (str): The string to check.
Returns:
bool: False means the text was not ASCII.
"""
if any(ord(c) >= 128 for c in text):
return False
return True
@find_path_to_cfg
def validate_prompts(cfg: dict[str, any], config: str) -> tuple[set[str], int]:
"""Read the specified prompts and determine how many more we need to choose.
Args:
cfg (dict[str, any]): The configuration dict.
config (str): The path to the prompt config file, that is
specified in the codex config file.
Returns:
tuple[set[str], int]: The set of paths to the problems we want to generate for,
and the number of remaining prompts we have | |
# coding: utf-8
# Author: <NAME>
# Contact: <EMAIL>
# Python modules
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import pyqtSignal
import os
import time
import logging
import traceback
import copy
# Wizard modules
from wizard.core import launch
from wizard.core import assets
from wizard.core import user
from wizard.core import project
from wizard.core import tools
from wizard.core import path_utils
from wizard.core import subtasks_library
from wizard.vars import ressources
from wizard.vars import user_vars
from wizard.vars import assets_vars
# Wizard gui modules
from wizard.gui import gui_utils
from wizard.gui import gui_server
from wizard.gui import confirm_widget
from wizard.gui import drop_files_widget
from wizard.gui import comment_widget
from wizard.gui import batch_settings_widget
logger = logging.getLogger(__name__)
class versions_widget(QtWidgets.QWidget):
version_changed_signal = pyqtSignal(str)
def __init__(self, parent=None):
super(versions_widget, self).__init__(parent)
self.work_env_id = None
self.versions_rows = None
self.version_list_ids = dict()
self.version_icon_ids = dict()
self.check_existence_thread = check_existence_thread()
self.search_thread = search_thread()
self.icon_mode = 0
self.list_mode = 1
self.build_ui()
self.connect_functions()
self.show_info_mode("Select or create a stage\nin the project tree !", ressources._select_stage_info_image_)
def dragEnterEvent(self, event):
self.drop_widget.setVisible(1)
event.accept()
def dragLeaveEvent(self, event):
self.drop_widget.setVisible(0)
event.accept()
def dropEvent(self, event):
self.drop_widget.setVisible(0)
data = event.mimeData()
urls = data.urls()
files = []
for url in urls:
if url and url.scheme() == 'file':
path = str(url.path())[1:]
files.append(path)
if len(files) != 0:
self.merge_files(files)
def focus_work_version(self, work_version_id):
if self.icon_mode:
ids = self.version_icon_ids
view = self.icon_view
else:
ids = self.version_list_ids
view = self.list_view
if work_version_id in ids.keys():
item = ids[work_version_id]
view.scrollToItem(item)
view.setCurrentItem(item)
def open_files(self):
options = QtWidgets.QFileDialog.Options()
fileList, _ = QtWidgets.QFileDialog.getOpenFileNames(self, "Select files", "",
"All Files (*);", options=options)
if fileList:
self.merge_files(fileList)
def merge_files(self, files=[]):
for file in files:
assets.merge_file(file, self.work_env_id, "Manually merged file", 0)
gui_server.refresh_team_ui()
def change_work_env(self, work_env_id):
self.check_existence_thread.running = False
self.version_list_ids = dict()
self.version_icon_ids = dict()
self.list_view.clear()
self.icon_view.clear()
self.work_env_id = work_env_id
self.refresh_camera_button()
self.refresh()
def refresh_camera_button(self):
if self.work_env_id:
stage = assets.get_stage_data_from_work_env_id(self.work_env_id, 'name')
if stage in assets_vars._camera_export_stages_:
self.batch_camera_button.setVisible(True)
else:
self.batch_camera_button.setVisible(False)
else:
self.batch_camera_button.setVisible(False)
def show_info_mode(self, text, image):
self.views_widget.setVisible(0)
self.info_widget.setVisible(1)
self.info_widget.setText(text)
self.info_widget.setImage(image)
self.setAcceptDrops(False)
def hide_info_mode(self):
self.info_widget.setVisible(0)
self.views_widget.setVisible(1)
self.setAcceptDrops(True)
def refresh(self):
start_time = time.time()
if self.isVisible():
self.refresh_list_view()
self.refresh_icons_view()
self.update_search()
self.update_refresh_time(start_time)
def update_refresh_time(self, start_time):
refresh_time = str(round((time.time()-start_time), 3))
self.refresh_label.setText(f"- refresh : {refresh_time}s")
def refresh_list_view(self):
if self.list_mode:
if self.work_env_id is not None and self.work_env_id != 0:
software_name = project.get_work_env_data(self.work_env_id, 'name')
software_icon = QtGui.QIcon(ressources._sofwares_icons_dic_[software_name])
self.versions_rows = project.get_work_versions(self.work_env_id)
project_versions_id = []
if self.versions_rows is not None:
self.hide_info_mode()
for version_row in self.versions_rows:
project_versions_id.append(version_row['id'])
if version_row['id'] not in self.version_list_ids.keys():
version_item = custom_version_tree_item(version_row, software_icon, self.list_view.invisibleRootItem())
self.version_list_ids[version_row['id']] = version_item
else:
self.version_list_ids[version_row['id']].refresh(version_row)
version_list_ids = list(self.version_list_ids.keys())
for version_id in version_list_ids:
if version_id not in project_versions_id:
self.remove_tree_version(version_id)
self.check_existence_thread.update_versions_rows(self.versions_rows)
elif self.work_env_id is None:
self.show_info_mode("Init the work environment\nto create the first version !", ressources._init_work_env_info_image_)
else:
self.show_info_mode("Select or create a stage\nin the project tree !", ressources._select_stage_info_image_)
self.refresh_infos()
def refresh_icons_view(self):
if self.icon_mode:
if self.work_env_id is not None and self.work_env_id != 0:
self.versions_rows = project.get_work_versions(self.work_env_id)
project_versions_id = []
if self.versions_rows is not None:
self.hide_info_mode()
for version_row in self.versions_rows:
project_versions_id.append(version_row['id'])
if version_row['id'] not in self.version_icon_ids.keys():
version_item = custom_version_icon_item(version_row)
self.icon_view.addItem(version_item)
self.version_icon_ids[version_row['id']] = version_item
version_icon_ids = list(self.version_icon_ids.keys())
for version_id in version_icon_ids:
if version_id not in project_versions_id:
self.remove_icon_version(version_id)
self.check_existence_thread.update_versions_rows(self.versions_rows)
elif self.work_env_id is None:
self.show_info_mode("Init the work environment\nto create the first version !", ressources._init_work_env_info_image_)
else:
self.show_info_mode("Select or create a stage\nin the project tree !", ressources._select_stage_info_image_)
self.refresh_infos()
def missing_file(self, version_id):
if self.list_mode:
if version_id in self.version_list_ids.keys():
self.version_list_ids[version_id].set_missing()
elif self.icon_mode:
if version_id in self.version_icon_ids.keys():
self.version_icon_ids[version_id].set_missing()
def not_missing_file(self, version_id):
if self.list_mode:
if version_id in self.version_list_ids.keys():
self.version_list_ids[version_id].set_not_missing()
elif self.icon_mode:
if version_id in self.version_icon_ids.keys():
self.version_icon_ids[version_id].set_not_missing()
def hide_all(self):
if self.list_mode:
for version_id in self.version_list_ids.keys():
self.version_list_ids[version_id].setHidden(True)
elif self.icon_mode:
for version_id in self.version_icon_ids.keys():
self.version_icon_ids[version_id].setHidden(True)
def show_all(self):
if self.list_mode:
for version_id in self.version_list_ids.keys():
self.version_list_ids[version_id].setHidden(False)
elif self.icon_mode:
for version_id in self.version_icon_ids.keys():
self.version_icon_ids[version_id].setHidden(False)
def update_search(self):
search_data = self.search_bar.text()
if search_data != '':
self.search_thread.update_search(self.versions_rows, search_data)
else:
self.show_all()
def show_search_version(self, version_id):
if self.list_mode:
if version_id in self.version_list_ids.keys():
self.version_list_ids[version_id].setHidden(False)
elif self.icon_mode:
if version_id in self.version_icon_ids.keys():
self.version_icon_ids[version_id].setHidden(False)
def hide_search_version(self, version_id):
if self.list_mode:
if version_id in self.version_list_ids.keys():
self.version_list_ids[version_id].setHidden(True)
elif self.icon_mode:
if version_id in self.version_icon_ids.keys():
self.version_icon_ids[version_id].setHidden(True)
def connect_functions(self):
self.list_view_scrollBar.rangeChanged.connect(lambda: self.list_view_scrollBar.setValue(self.list_view_scrollBar.maximum()))
self.icon_view_scrollBar.rangeChanged.connect(lambda: self.icon_view_scrollBar.setValue(self.icon_view_scrollBar.maximum()))
self.list_view.itemSelectionChanged.connect(self.version_changed)
self.list_view.itemDoubleClicked.connect(self.launch)
self.list_view.itemSelectionChanged.connect(self.refresh_infos)
self.list_view.customContextMenuRequested.connect(self.context_menu_requested)
self.icon_view.itemSelectionChanged.connect(self.version_changed)
self.icon_view.itemDoubleClicked.connect(self.launch)
self.icon_view.itemSelectionChanged.connect(self.refresh_infos)
self.icon_view.customContextMenuRequested.connect(self.context_menu_requested)
self.archive_button.clicked.connect(self.archive)
self.manual_merge_button.clicked.connect(self.open_files)
self.batch_button.clicked.connect(self.batch_export)
self.batch_camera_button.clicked.connect(self.batch_export_camera)
self.duplicate_button.clicked.connect(self.duplicate_version)
self.new_version_button.clicked.connect(self.add_empty_version)
self.folder_button.clicked.connect(self.open_folder)
self.toggle_view_button.clicked.connect(self.toggle_view)
self.launch_button.clicked.connect(self.launch)
self.comment_button.clicked.connect(self.modify_comment)
self.check_existence_thread.missing_file_signal.connect(self.missing_file)
self.check_existence_thread.not_missing_file_signal.connect(self.not_missing_file)
self.search_bar.textChanged.connect(self.update_search)
self.search_thread.show_id_signal.connect(self.show_search_version)
self.search_thread.hide_id_signal.connect(self.hide_search_version)
def batch_export(self):
selection = self.get_selection()
version_id = None
if len(selection) == 1:
version_id = selection[0].version_row['id']
elif len(selection) == 0:
last_version_id = project.get_last_work_version(self.work_env_id, 'id')
if last_version_id:
version_id = last_version_id[0]
if version_id:
domain = assets.get_domain_data_from_work_env_id(self.work_env_id, 'name')
stage = assets.get_stage_data_from_work_env_id(self.work_env_id, 'name')
self.batch_settings_widget = batch_settings_widget.batch_settings_widget(self.work_env_id, stage)
if self.batch_settings_widget.exec_() == QtWidgets.QDialog.Accepted:
settings_dic = dict()
settings_dic['frange'] = self.batch_settings_widget.frange
settings_dic['refresh_assets'] = self.batch_settings_widget.refresh_assets
settings_dic['nspace_list'] = self.batch_settings_widget.nspace_list
settings_dic['stage_to_export'] = stage
if self.batch_settings_widget.need_render_type:
settings_dic['render_type'] = self.batch_settings_widget.render_type
if project.get_work_env_data(self.work_env_id, 'name') == 'guerilla_render':
settings_dic['farm'] = self.batch_settings_widget.guerilla_deadline
else:
settings_dic['farm'] = False
if self.batch_settings_widget.deadline:
subtasks_library.deadline_batch_export(version_id, settings_dic)
else:
subtasks_library.batch_export(version_id, settings_dic)
def batch_export_camera(self):
selection = self.get_selection()
version_id = None
if len(selection) == 1:
version_id = selection[0].version_row['id']
elif len(selection) == 0:
last_version_id = project.get_last_work_version(self.work_env_id, 'id')
if last_version_id:
version_id = last_version_id[0]
if version_id:
domain = assets.get_domain_data_from_work_env_id(self.work_env_id, 'name')
stage = assets.get_stage_data_from_work_env_id(self.work_env_id, 'name')
self.batch_settings_widget = batch_settings_widget.batch_settings_widget(self.work_env_id, 'camera')
if self.batch_settings_widget.exec_() == QtWidgets.QDialog.Accepted:
settings_dic = dict()
settings_dic['frange'] = self.batch_settings_widget.frange
settings_dic['refresh_assets'] = self.batch_settings_widget.refresh_assets
settings_dic['nspace_list'] = self.batch_settings_widget.nspace_list
settings_dic['stage_to_export'] = 'camera'
if self.batch_settings_widget.deadline:
subtasks_library.deadline_batch_export(version_id, settings_dic)
else:
subtasks_library.batch_export(version_id, settings_dic)
def build_ui(self):
self.setObjectName('dark_widget')
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.setContentsMargins(0,0,0,0)
self.main_layout.setSpacing(0)
self.setLayout(self.main_layout)
self.info_widget = gui_utils.info_widget()
self.info_widget.setVisible(0)
self.main_layout.addWidget(self.info_widget)
self.views_widget = QtWidgets.QWidget()
self.views_layout = QtWidgets.QHBoxLayout()
self.views_layout.setContentsMargins(0,0,0,0)
self.views_layout.setSpacing(0)
self.views_widget.setLayout(self.views_layout)
self.main_layout.addWidget(self.views_widget)
self.list_view = QtWidgets.QTreeWidget()
self.list_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.list_view.setObjectName('tree_as_list_widget')
self.list_view.setColumnCount(7)
self.list_view.setIndentation(0)
self.list_view.setAlternatingRowColors(True)
self.list_view.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.list_view.setHeaderLabels(['Version', 'Software', 'User', 'Date', 'Comment', 'File', 'ID'])
self.list_view.header().resizeSection(3, 150)
self.list_view.header().resizeSection(4, 250)
self.list_view.header().resizeSection(5, 400)
self.list_view.header().resizeSection(6, 50)
self.list_view_scrollBar = self.list_view.verticalScrollBar()
self.views_layout.addWidget(self.list_view)
self.icon_view = QtWidgets.QListWidget()
self.icon_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.icon_view.setObjectName('icon_view')
self.icon_view.setSpacing(4)
self.icon_view.setIconSize(QtCore.QSize(200,200))
self.icon_view.setMovement(QtWidgets.QListView.Static)
self.icon_view.setResizeMode(QtWidgets.QListView.Adjust)
self.icon_view.setViewMode(QtWidgets.QListView.IconMode)
self.icon_view.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.icon_view_scrollBar = self.icon_view.verticalScrollBar()
self.views_layout.addWidget(self.icon_view)
self.icon_view.setVisible(0)
self.infos_widget = QtWidgets.QWidget()
self.infos_widget.setObjectName('dark_widget')
self.infos_layout = QtWidgets.QHBoxLayout()
self.infos_layout.setContentsMargins(8,8,8,0)
self.infos_layout.setSpacing(4)
self.infos_widget.setLayout(self.infos_layout)
self.main_layout.addWidget(self.infos_widget)
self.infos_layout.addSpacerItem(QtWidgets.QSpacerItem(0,0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed))
self.versions_count_label = QtWidgets.QLabel()
self.versions_count_label.setObjectName('gray_label')
self.infos_layout.addWidget(self.versions_count_label)
self.selection_count_label = QtWidgets.QLabel()
self.infos_layout.addWidget(self.selection_count_label)
self.refresh_label = QtWidgets.QLabel()
self.refresh_label.setObjectName('gray_label')
self.infos_layout.addWidget(self.refresh_label)
self.buttons_widget = QtWidgets.QWidget()
self.buttons_widget.setObjectName('dark_widget')
self.buttons_layout = QtWidgets.QHBoxLayout()
self.buttons_layout.setContentsMargins(8,8,8,8)
self.buttons_layout.setSpacing(4)
self.buttons_widget.setLayout(self.buttons_layout)
self.main_layout.addWidget(self.buttons_widget)
self.buttons_layout.addSpacerItem(QtWidgets.QSpacerItem(0,0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed))
self.search_bar = gui_utils.search_bar()
gui_utils.application_tooltip(self.search_bar, "Search for a specific version")
self.search_bar.setPlaceholderText('"0023", "j.smith&maya", "retake eye"')
self.buttons_layout.addWidget(self.search_bar)
self.toggle_view_button = QtWidgets.QPushButton()
gui_utils.application_tooltip(self.toggle_view_button, "Switch to list view")
self.toggle_view_button.setFixedSize(35,35)
self.toggle_view_button.setIconSize(QtCore.QSize(25,25))
self.toggle_view_button.setIcon(QtGui.QIcon(ressources._tool_icon_view_))
self.buttons_layout.addWidget(self.toggle_view_button)
self.manual_merge_button = QtWidgets.QPushButton()
gui_utils.application_tooltip(self.manual_merge_button, "Manually merge a file")
self.manual_merge_button.setFixedSize(35,35)
self.manual_merge_button.setIconSize(QtCore.QSize(25,25))
self.manual_merge_button.setIcon(QtGui.QIcon(ressources._tool_manually_publish_))
self.buttons_layout.addWidget(self.manual_merge_button)
self.batch_button = QtWidgets.QPushButton()
gui_utils.application_tooltip(self.batch_button, "Batch export")
self.batch_button.setFixedSize(35,35)
self.batch_button.setIconSize(QtCore.QSize(25,25))
self.batch_button.setIcon(QtGui.QIcon(ressources._tool_batch_publish_))
self.buttons_layout.addWidget(self.batch_button)
self.batch_camera_button = QtWidgets.QPushButton()
gui_utils.application_tooltip(self.batch_camera_button, "Batch export cameras")
self.batch_camera_button.setFixedSize(35,35)
self.batch_camera_button.setIconSize(QtCore.QSize(25,25))
self.batch_camera_button.setIcon(QtGui.QIcon(ressources._tool_batch_camera_))
self.buttons_layout.addWidget(self.batch_camera_button)
self.comment_button = QtWidgets.QPushButton()
gui_utils.application_tooltip(self.comment_button, "Modify comment")
self.comment_button.setFixedSize(35,35)
self.comment_button.setIconSize(QtCore.QSize(25,25))
self.comment_button.setIcon(QtGui.QIcon(ressources._tool_comment_))
self.buttons_layout.addWidget(self.comment_button)
self.launch_button = QtWidgets.QPushButton()
gui_utils.application_tooltip(self.launch_button, "Launch selection")
self.launch_button.setFixedSize(35,35)
self.launch_button.setIconSize(QtCore.QSize(25,25))
self.launch_button.setIcon(QtGui.QIcon(ressources._launch_icon_))
self.buttons_layout.addWidget(self.launch_button)
self.duplicate_button = QtWidgets.QPushButton()
gui_utils.application_tooltip(self.duplicate_button, "Duplicate selection")
self.duplicate_button.setFixedSize(35,35)
self.duplicate_button.setIconSize(QtCore.QSize(25,25))
self.duplicate_button.setIcon(QtGui.QIcon(ressources._tool_duplicate_))
self.buttons_layout.addWidget(self.duplicate_button)
self.new_version_button = QtWidgets.QPushButton()
gui_utils.application_tooltip(self.new_version_button, "Create empty version")
self.new_version_button.setFixedSize(35,35)
self.new_version_button.setIconSize(QtCore.QSize(25,25))
self.new_version_button.setIcon(QtGui.QIcon(ressources._tool_add_))
self.buttons_layout.addWidget(self.new_version_button)
self.folder_button = QtWidgets.QPushButton()
gui_utils.application_tooltip(self.folder_button, "Open versions folder")
self.folder_button.setFixedSize(35,35)
self.folder_button.setIconSize(QtCore.QSize(25,25))
self.folder_button.setIcon(QtGui.QIcon(ressources._tool_folder_))
self.buttons_layout.addWidget(self.folder_button)
self.archive_button = QtWidgets.QPushButton()
gui_utils.application_tooltip(self.archive_button, "Archive selection")
self.archive_button.setFixedSize(35,35)
self.archive_button.setIconSize(QtCore.QSize(25,25))
self.archive_button.setIcon(QtGui.QIcon(ressources._tool_archive_))
self.buttons_layout.addWidget(self.archive_button)
self.drop_widget = drop_files_widget.drop_widget(self)
self.drop_widget.setText('Merge file as new version')
self.drop_widget.setVisible(0)
def context_menu_requested(self):
selection = self.get_selection()
menu = gui_utils.QMenu(self)
folder_action = menu.addAction(QtGui.QIcon(ressources._tool_folder_), 'Open folder')
empty_version_action = menu.addAction(QtGui.QIcon(ressources._tool_add_), 'Create new empty version')
merge_action = menu.addAction(QtGui.QIcon(ressources._tool_manually_publish_), 'Manually merge a file')
duplicate_action = None
archive_action = None
comment_action = None
batch_action = None
if len(selection)>=1:
duplicate_action = menu.addAction(QtGui.QIcon(ressources._tool_duplicate_), 'Duplicate version(s)')
archive_action = menu.addAction(QtGui.QIcon(ressources._tool_archive_), 'Archive version(s)')
comment_action = menu.addAction(QtGui.QIcon(ressources._tool_comment_), 'Modify comment')
launch_action = None
if len(selection)==1:
launch_action = menu.addAction(QtGui.QIcon(ressources._launch_icon_), 'Launch version')
batch_action = menu.addAction(QtGui.QIcon(ressources._tool_batch_publish_), 'Batch export version')
action = menu.exec_(QtGui.QCursor().pos())
if action is not None:
if action == folder_action:
self.open_folder()
elif action == empty_version_action:
self.add_empty_version()
elif action == duplicate_action:
self.duplicate_version()
elif action == archive_action:
self.archive()
elif action == launch_action:
self.launch()
elif action == comment_action:
self.modify_comment()
elif action == merge_action:
self.open_files()
elif action == batch_action:
self.batch_export()
def modify_comment(self):
items = self.get_selection()
if items is not None:
if len(items) > 0:
self.comment_widget = comment_widget.comment_widget()
if self.comment_widget.exec_() == QtWidgets.QDialog.Accepted:
comment = self.comment_widget.comment
for item in items:
assets.modify_version_comment(item.version_row['id'], comment)
gui_server.refresh_team_ui()
def version_changed(self):
selection = self.get_selection()
if len(selection) == 1:
if selection[0] is not None:
self.version_changed_signal.emit(selection[0].version_row['name'])
def toggle_view(self):
selection = self.get_selection()
if self.icon_mode:
self.icon_view.setVisible(0)
self.list_view.setVisible(1)
self.list_mode = 1
self.icon_mode = 0
elif self.list_mode:
self.icon_view.setVisible(1)
self.list_view.setVisible(0)
self.list_mode = 0
self.icon_mode = 1
if self.icon_mode:
self.toggle_view_button.setIcon(QtGui.QIcon(ressources._tool_list_view_))
gui_utils.modify_application_tooltip(self.toggle_view_button, "Switch to list view")
elif self.list_mode:
self.toggle_view_button.setIcon(QtGui.QIcon(ressources._tool_icon_view_))
gui_utils.modify_application_tooltip(self.toggle_view_button, "Switch to icon view")
self.refresh()
self.set_selection(selection)
def set_view_as_icon_mode(self):
if self.icon_mode == 1:
pass
else:
self.toggle_view()
def set_view_as_list_mode(self):
if self.list_mode == 1:
pass
else:
self.toggle_view()
def set_context(self):
if self.icon_mode == 1:
current_mode = 'icon'
else:
current_mode = 'list'
context_dic = dict()
context_dic['view_mode'] = current_mode
user.user().add_context(user_vars._versions_context_, context_dic)
def get_context(self):
context_dic = user.user().get_context(user_vars._versions_context_)
if context_dic is not None and context_dic != dict():
if context_dic['view_mode'] == 'icon':
self.set_view_as_icon_mode()
elif context_dic['view_mode'] == 'list':
self.set_view_as_list_mode()
def | |
#########
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
#
from flask_restful_swagger import swagger
from sqlalchemy import (
asc,
bindparam,
desc,
func,
literal_column,
)
from toolz import dicttoolz
from manager_rest import manager_exceptions, utils
from manager_rest.rest.rest_decorators import (
exceptions_handled,
insecure_rest_method,
)
from manager_rest.rest.rest_utils import get_json_and_verify_params
from manager_rest.security import SecuredResource
from manager_rest.security.authorization import authorize
from manager_rest.storage.models_base import db
from manager_rest.storage.resource_models import (
Blueprint,
Deployment,
Execution,
Event,
Log,
Node,
NodeInstance,
)
class Events(SecuredResource):
"""Events resource.
Through the events endpoint a user can retrieve both events and logs as
stored in the SQL database.
"""
DEFAULT_SEARCH_SIZE = 10000
ALLOWED_FILTERS = {
'blueprint_id': (Blueprint.id, 'in'),
'execution_id': (Execution.id, 'in'),
'deployment_id': (Deployment.id, 'in'),
'event_type': (Event.event_type, 'in'),
'level': (Log.level, 'in'),
'message': ('message', 'ilike'),
}
# Map from old Elasticsearch field name to PostgreSQL one
ES_TO_PG_FILTER_FIELD = {
'message.text': 'message',
}
@staticmethod
def _apply_filters(query, model, filters):
"""Apply filters to the query.
:param query: Base query to update with filters
:type query: :class:`sqlalchemy.orm.query.Query`
:param model: Model used to filter by default
:type model:
:class:`manager_rest.storage.resource_models.Event`
:class:`manager_rest.storage.resource_models.Log`
:param filters:
Dictionary of filters where the key is the column to filter and the
value is a list of elements that can be matched using the `IN`
operator.
:type filters: dict(str, list(str))
"""
for filter_field, filter_ in filters.items():
filter_field = Events.ES_TO_PG_FILTER_FIELD.get(
filter_field, filter_field)
if filter_field == 'type':
# Filter by type is handled while building the query
continue
if filter_field not in Events.ALLOWED_FILTERS:
raise manager_exceptions.BadParametersError(
'Unknown field to filter by: {0}. '
'Allowed values: {1}'
.format(
filter_field,
', '.join(sorted(Events.ALLOWED_FILTERS.keys())),
))
model_field, filter_type = Events.ALLOWED_FILTERS[filter_field]
if isinstance(model_field, str):
model_field = getattr(model, model_field)
if filter_type == 'in':
query = query.filter(model_field.in_(filter_))
elif filter_type == 'ilike':
for filter_element in filter_:
query = query.filter(model_field.ilike(filter_element))
else:
raise ValueError(
'Unknown filter type: {0}. '
'Allowed values: ilike, in'
.format(filter_type)
)
return query
@staticmethod
def _apply_sort(query, sort):
"""Apply sorting criteria.
Sorting will be rejected if the field doesn't match any of the column
names that has been selected for the query. Note that the query
involves two models at the same time, it is not possible to just check
a model.
:param query: Query in which the sorting should be applied
:type query: :class:`sqlalchemy.orm.query.Query`
:param sort: Sorting criteria passed as a request argument
:type sort: dict(str, str)
:returns: Query with sorting criteria applied
:rtype: :class:`sqlalchemy.orm.query.Query`
"""
column_names = set(
column_description['name']
for column_description in query.column_descriptions
)
for field, order in sort.items():
# Drop `@` prefix for compatibility
# with old Elasticsearch based implementation
field = field.lstrip('@')
if field not in column_names:
raise manager_exceptions.BadParametersError(
'Unknown field to sort by: {}'.format(field))
order_func = asc if order == 'asc' else desc
query = query.order_by(order_func(field))
return query
@staticmethod
def _apply_range_filters(query, model, range_filters):
"""Apply range filters to query.
:param query: Query in which the filtering should be applied
:type query: :class:`sqlalchemy.orm.query.Query`
:param model: Model to use to apply the filtering
:type model:
:class:`manager_rest.storage.resource_models.Event`
:class:`manager_rest.storage.resource_models.Log`
:param range_filters: Range filters passed as a request argument
:type range_filters: dict(str, dict(str))
:returns: Query with filtering applied
:rtype: :class:`sqlalchemy.orm.query.Query`
"""
for field, range_filter in range_filters.items():
# Drop `@` prefix for compatibility
# with old Elasticsearch based implementation
field = field.lstrip('@')
if not hasattr(model, field):
raise manager_exceptions.BadParametersError(
'Unknown field to filter by range: {}'.format(field))
query = Events._apply_range_filter(
query, model, field, range_filter)
return query
@staticmethod
def _apply_range_filter(query, model, field, range_filter):
"""Apply a range filter to query.
:param query: Query in which the filtering should be applied
:type query: :class:`sqlalchemy.orm.query.Query`
:param model: Model to use to apply the filtering
:type model:
:class:`manager_rest.storage.resource_models.Event`
:class:`manager_rest.storage.resource_models.Log`
:param field: Field in the model that should be filtered
:type field: str
:param range_filter: Range filter passed as a request argument
:type range_filter: dict(str)
:returns: Query with filtering applied
:rtype: :class:`sqlalchemy.orm.query.Query`
"""
if 'from' in range_filter:
query = query.filter(getattr(model, field) >= range_filter['from'])
if 'to' in range_filter:
query = query.filter(getattr(model, field) <= range_filter['to'])
return query
@staticmethod
def _build_select_query(filters, sort, range_filters, tenant_id):
"""Build query used to list events for a given execution.
:param filters:
Filters selection.
Valid filtering criteria are:
- Type (return events or both events and logs):
{'type': ['cloudify_event', 'cloudify_log']}
- Execution:
{'execution_id': <some_id>}
- Deployment:
{'deployment_id': <some_id>}
Results must match every the filtering criteria. In particular,
filtering by a deployment and an execution that doesn't belong to
that deployment won't return any result.
:type filters: dict(str, str)
:param sort:
Result sorting order.
The only field that is supported for now is @timestamp (note the
`@` inherited from the old Elasticsearch implementation):
{'timestamp': 'asc'}
:type sort: dict(str, str)
:param range_filters:
Filter out events that don't fall in a given range.
The only field that is supported for now is @timestamp (note the
`@` inherited from the old Elasticsearch implementation):
{'timestamp': {'from': <iso8601-date>, 'to': <iso8601-date>}}
:type range_filters: dict(str, str)
:returns:
A SQL query that returns the events found that match the conditions
passed as arguments.
:rtype: :class:`sqlalchemy.orm.query.Query`
"""
assert isinstance(filters, dict), \
'Filters is expected to be a dictionary'
subqueries = []
if (('type' not in filters or 'cloudify_event' in filters['type']) and
('level' not in filters)):
events_query = Events._build_select_subquery(
Event, filters, range_filters, tenant_id)
subqueries.append(events_query)
if (('type' not in filters or 'cloudify_log' in filters['type']) and
('event_type' not in filters)):
logs_query = Events._build_select_subquery(
Log, filters, range_filters, tenant_id)
subqueries.append(logs_query)
if subqueries:
query = reduce(
lambda left, right: left.union_all(right),
subqueries,
)
query = Events._apply_sort(query, sort)
query = (
query
.limit(bindparam('limit'))
.offset(bindparam('offset'))
)
else:
# Simple query that returns no results
# Used when filtering by a field that doesn't exist for a type
query = (
db.session.query(Event.timestamp)
.filter(Event.timestamp is None)
)
return query
@staticmethod
def _build_select_subquery(model, filters, range_filters, tenant_id):
"""Build select subquery.
:param model: Model used to build the query (either Event or Log)
:type model:
:class:`manager_rest.storage.resource_models.Event`
:class:`manager_rest.storage.resource_models.Log`
:param filters: Filters passed as request argument
:type filters: dict(str, list(str))
:param range_filters: Range filtres passed as request argument
:type range_filters: dict(str, dict(str))
:returns: Select events query
:rtype: :class:`sqlalchemy.orm.query.Query`
"""
def select_column(column_name):
"""Select column from model by name.
If column is not present in the model, then select `NULL` value
instead.
:param column_name: Name of the column to select
:type column_name: str
:return: Selected colum
:rtype: :class:``
"""
if hasattr(model, column_name):
return getattr(model, column_name).label(column_name)
return literal_column('NULL').label(column_name)
query = (
db.session.query(
select_column('id'),
select_column('timestamp'),
select_column('reported_timestamp'),
Blueprint.id.label('blueprint_id'),
Deployment.id.label('deployment_id'),
Execution.id.label('execution_id'),
Execution.workflow_id.label('workflow_id'),
select_column('message'),
select_column('message_code'),
select_column('error_causes'),
select_column('event_type'),
select_column('operation'),
select_column('node_id'),
NodeInstance.id.label('node_instance_id'),
Node.id.label('node_name'),
select_column('logger'),
select_column('level'),
literal_column("'cloudify_{}'".format(model.__name__.lower()))
.label('type'),
)
.filter(model._tenant_id == tenant_id)
.outerjoin(NodeInstance, NodeInstance.id == model.node_id)
.outerjoin(Node, Node._storage_id == NodeInstance._node_fk)
.outerjoin(Execution, Execution._storage_id == model._execution_fk)
.outerjoin(Deployment,
Deployment._storage_id == Execution._deployment_fk)
.outerjoin(
Blueprint, Blueprint._storage_id == Deployment._blueprint_fk)
)
query = Events._apply_filters(query, model, filters)
query = Events._apply_range_filters(query, model, range_filters)
return query
@staticmethod
def _build_count_query(filters, range_filters, tenant_id):
"""Build query used to count events for a given execution.
:param filters:
Filters selection.
Valid filtering criteria are:
- Type (return events or both events and logs):
{'type': ['cloudify_event', 'cloudify_log']}
- Execution:
{'execution_id': <some_id>}
- Deployment:
{'deployment_id': <some_id>}
Results must match every the filtering criteria. In particular,
filtering by a deployment and an execution that doesn't belong to
that deployment won't return any result.
:type filters: dict(str, str)
:param range_filters:
Filter out events that don't fall in a given range.
The only field that is supported for now is @timestamp (note the
`@` inherited from the old Elasticsearch implementation):
{'timestamp': {'from': <iso8601-date>, 'to': <iso8601-date>}}
:type range_filters: dict(str, str)
:returns:
A SQL query that returns the number of events found that match the
conditions passed as arguments.
:rtype: :class:`sqlalchemy.orm.query.Query`
"""
assert isinstance(filters, dict), \
'Filters is expected to be a dictionary'
subqueries = []
if (('type' not in filters or 'cloudify_event' | |
except Exception as ex:
print(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def check_index_type(self, queue=None):
check_index_type = self.input.param("check_index_type", None)
try:
for index in self.fts_obj.fts_indexes:
type = index.get_index_type()
if type == check_index_type:
self.log.info("SUCCESS: Index type check returns {0}".format(type))
else:
self.fail("FAIL: Index {0} of type {1} is expected to be of type {2}".
format(index.name,
type,
check_index_type))
except Exception as ex:
print(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
""" Use n1ql callable to create, query n1ql """
def create_n1ql_index_and_query(self, queue=None):
"""
Call this before upgrade
1. creates a gsi index, one per bucket
2. Loads fts json data
3. Runs queries and compares the results against ElasticSearch
"""
try:
n1ql_obj = N1QLCallable(self.servers)
for bucket in self.buckets:
n1ql_obj.create_gsi_index(keyspace=bucket.name, name="test_idx1",
fields="email", using="gsi", is_primary=False,
index_condition="")
result = n1ql_obj.run_n1ql_query("select * from system:indexes")
n1ql_obj.drop_gsi_index(keyspace=bucket.name, name="test_idx1",
is_primary=False)
# return self.n1ql_obj
except Exception as ex:
print(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def run_n1ql_query(self, queue=None):
try:
self.log.info("Run queries again")
n1ql_obj = N1QLCallable(self.servers)
result = n1ql_obj.run_n1ql_query("select * from system:indexes")
except Exception as ex:
print(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
""" for cbas test """
def load_sample_buckets(self, servers=None, bucketName=None, total_items=None,
rest=None):
""" Load the specified sample bucket in Couchbase """
self.assertTrue(rest.load_sample(bucketName),
"Failure while loading sample bucket: {0}".format(bucketName))
""" check for load data into travel-sample bucket """
if total_items:
import time
end_time = time.time() + 600
while time.time() < end_time:
self.sleep(20)
num_actual = 0
if not servers:
num_actual = self.get_item_count(self.master, bucketName)
else:
for server in servers:
num_actual += self.get_item_count(server, bucketName)
if int(num_actual) == total_items:
self.log.info("{0} items are loaded in the {1} bucket" \
.format(num_actual, bucketName))
break
self.log.info("{0} items are loaded in the {1} bucket" \
.format(num_actual, bucketName))
if int(num_actual) != total_items:
return False
else:
self.sleep(120)
return True
def execute_statement_on_cbas_via_rest(self, statement, mode=None, rest=None,
timeout=120, client_context_id=None,
username=None, password=<PASSWORD>):
"""
Executes a statement on CBAS using the REST API using REST Client
"""
pretty = "true"
if not rest:
rest = RestConnection(self.cbas_node)
try:
self.log.info("Running query on cbas: {0}".format(statement))
response = rest.execute_statement_on_cbas(statement, mode, pretty,
timeout, client_context_id,
username, password)
response = json.loads(response)
if "errors" in response:
errors = response["errors"]
else:
errors = None
if "results" in response:
results = response["results"]
else:
results = None
if "handle" in response:
handle = response["handle"]
else:
handle = None
return response["status"], response[
"metrics"], errors, results, handle
except Exception as e:
raise Exception(str(e))
def create_bucket_on_cbas(self, cbas_bucket_name, cb_bucket_name,
cb_server_ip=None,
validate_error_msg=False,
username=None, password=None):
"""
Creates a bucket on CBAS
"""
if cb_server_ip:
cmd_create_bucket = "create bucket " + cbas_bucket_name + \
" with {\"name\":\"" + cb_bucket_name + \
"\",\"nodes\":\"" + cb_server_ip + "\"};"
else:
'''DP3 doesn't need to specify cb server ip as cbas node is
part of the cluster.
'''
cmd_create_bucket = "create bucket " + cbas_bucket_name + \
" with {\"name\":\"" + cb_bucket_name + "\"};"
status, metrics, errors, results, _ = \
self.execute_statement_on_cbas_via_rest(cmd_create_bucket,
username=username,
password=password)
if validate_error_msg:
return self.validate_error_in_response(status, errors)
else:
if status != "success":
return False
else:
return True
def create_dataset_on_bucket(self, cbas_bucket_name, cbas_dataset_name,
where_field=None, where_value=None,
validate_error_msg=False, username=None,
password=<PASSWORD>):
"""
Creates a shadow dataset on a CBAS bucket
"""
cmd_create_dataset = "create shadow dataset {0} on {1};".format(
cbas_dataset_name, cbas_bucket_name)
if where_field and where_value:
cmd_create_dataset = "create shadow dataset {0} on {1} WHERE `{2}`=\"{3}\";" \
.format(cbas_dataset_name, cbas_bucket_name,
where_field, where_value)
status, metrics, errors, results, _ = \
self.execute_statement_on_cbas_via_rest(cmd_create_dataset,
username=username,
password=password)
if validate_error_msg:
return self.validate_error_in_response(status, errors)
else:
if status != "success":
return False
else:
return True
def test_create_dataset_on_bucket(self):
# Create bucket on CBAS
self.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name,
cb_bucket_name=self.cb_bucket_name,
cb_server_ip=self.cb_server_ip)
# Create dataset on the CBAS bucket
result = self.create_dataset_on_bucket(
cbas_bucket_name=self.cbas_bucket_name_invalid,
cbas_dataset_name=self.cbas_dataset_name,
validate_error_msg=self.validate_error)
if not result:
self.fail("FAIL : Actual error msg does not match the expected")
def create_scope(self, num_scope=2, rest=None, cli=None):
bucket_name = self.buckets[0].name
if rest:
rest_client = rest
else:
rest_client = self.rest_col
if cli:
cli_client = cli
else:
cli_client = self.cli_col
for x in range(num_scope):
scope_name = "scope{0}".format(x)
if self.non_ascii_name:
scope_name = self.non_ascii_name + str(x)
if self.use_rest:
rest_client.create_scope(bucket=bucket_name, scope=scope_name,
params=None)
else:
cli_client.create_scope(bucket=bucket_name, scope=scope_name)
def delete_scope(self, num_scope=2):
bucket_name = self.buckets[0].name
for x in range(num_scope):
scope_name = "scope{0}".format(x)
if self.non_ascii_name:
scope_name = self.non_ascii_name + str(x)
if self.use_rest:
self.rest_col.delete_scope(bucket_name, scope_name)
else:
self.cli_col.delete_scope(scope_name, bucket=bucket_name)
def create_collection(self, num_collection=1, rest=None, cli=None):
bucket_name = self.buckets[0].name
if rest:
rest_client = rest
else:
rest_client = self.rest_col
if cli:
cli_client = cli
else:
cli_client = self.cli_col
scopes = self.get_bucket_scope(rest_client, cli_client)
if scopes:
for x in range(num_collection):
for scope in scopes:
if bucket_name in scope:
continue
if self.use_rest:
rest_client.create_collection(bucket=bucket_name, scope=scope,
collection="mycollection_{0}_{1}".format(scope, x))
else:
cli_client.create_collection(bucket=bucket_name, scope=scope,
collection="mycollection_{0}_{1}".format(scope, x))
self.sleep(10, "time needs for stats up completely")
def delete_collection(self, num_collection=1):
bucket_name = self.buckets[0].name
for x in range(num_collection):
if self.use_rest:
self.rest_col.delete_collection(bucket=bucket_name, scope="_{0}".format(bucket_name),
collection="_{0}".format(bucket_name))
else:
self.cli_col.delete_collection(bucket=bucket_name, scope="_{0}".format(bucket_name),
collection="_{0}".format(bucket_name))
def get_col_item_count(self, server=None, bucket=None, scope=None, collection=None, cluster_stats=None):
if not server:
raise("Need to pass which server to get item count")
if not cluster_stats:
raise("Need to pass cluster stats to get item count")
if not scope:
scope = "_default"
if not collection:
collection = "_default"
if not bucket:
bucket = "default"
return cluster_stats.get_collection_item_count(bucket, scope, collection, server, cluster_stats)
def _create_scope_collection(self, rest=None, cli=None, bucket=None):
if rest:
self.rest_col = rest
if cli:
self.cli_col = cli
if bucket:
bucket_name = bucket
else:
bucket_name = self.buckets[0].name
for x in range(self.num_scopes):
scope_name = "scope{0}".format(x)
if self.non_ascii_name:
scope_name = self.non_ascii_name + str(x)
if self.use_rest:
self.rest_col.create_scope_collection(bucket_name, scope_name,
"mycollection_{0}".format(scope_name))
else:
self.cli_col.create_scope_collection(bucket_name, scope_name,
"mycollection_{0}".format(scope_name))
def get_bucket_scope(self, rest=None, cli=None):
bucket_name = self.buckets[0].name
if rest:
rest_client = rest
else:
rest_client = self.rest_col
if cli:
cli_client = cli
else:
cli_client = self.cli_col
if self.use_rest:
scopes = rest_client.get_bucket_scopes(bucket_name)
else:
scopes = cli_client.get_bucket_scopes(bucket_name)[0]
return scopes
def get_bucket_collection(self,):
bucket_name = self.buckets[0].name
collections = None
if self.use_rest:
collections = self.rest_col.get_bucket_collections(bucket_name)
else:
collections = self.cli_col.get_bucket_collections(bucket_name)
return collections
def get_collection_stats(self, buckets=None):
""" return output, error """
if buckets:
bucket = buckets[0]
else:
bucket = self.buckets[0]
return self.stat_col.get_collection_stats(bucket)
def get_collection_names(self):
bucket = self.buckets[0]
output, error = self.shell.execute_cbstats(bucket, "collections",
cbadmin_user="Administrator",
options=" | grep ':name'")
collection_names = []
if output:
for x in output:
if "_default" in x:
continue
collection_names.append(x.split(":name:")[1].strip())
return collection_names, error
def get_scopes_id(self, scope, bucket=None, stat_col=None):
if bucket:
get_bucket = bucket
else:
get_bucket = self.buckets[0]
if stat_col:
self.stat_col = stat_col
return self.stat_col.get_scope_id(get_bucket, scope)
def get_collections_id(self, scope, collection, bucket=None, stat_col=None):
if bucket:
get_bucket = bucket
else:
get_bucket = self.buckets[0]
if stat_col:
self.stat_col = stat_col
return self.stat_col.get_collection_id(get_bucket, scope, collection)
def get_collection_load_id(self):
scopes = self.get_bucket_scope()
scopes_id = []
scopes_names_ids = {}
for scope in scopes:
if scope == "_default":
scopes.remove(scope)
continue
self.log.info("get scope id of scope: {0}".format(scope))
scope_id = self.get_scopes_id(scope)
if scope_id is None:
self.sleep(5, "wait for stats is up completely")
scope_id = self.get_scopes_id(scope)
scopes_names_ids[scope] = scope_id
scopes_id.append(scope_id)
self.load_scope = scopes[0]
collections = self.get_bucket_collection()
collections_id = []
for collection in collections:
if collection == "_default":
collections.remove(collection)
continue
if self.load_scope not in collection:
continue
collection_id = self.get_collections_id(self.load_scope,collection)
collections_id.append(self.get_collections_id(self.load_scope,collection))
collections_id = list(filter(None, collections_id))
if collections_id:
self.load_scope_id = scopes_names_ids[self.load_scope]
return collections_id[0]
else:
return "0x0"
def load_collection_all_buckets(self, cluster=None, item_size=125, ratio=0.9, command_options=""):
cluster = self.master
shell = RemoteMachineShellConnection(self.master)
for bucket in self.buckets:
self.sleep(7)
shell.execute_cbworkloadgen(cluster.rest_username,
cluster.rest_password,
self.num_items,
ratio,
bucket.name,
item_size,
command_options)
def load_to_collections_bucket(self):
self.load_collection_id = self.get_collection_load_id()
option = " -c {0} ".format(self.load_collection_id)
self.sleep(10)
self.load_collection_all_buckets(command_options=option )
def _verify_collection_data(self):
items_match = False
self.sleep(10)
upgrade_nodes = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=True,
master=self.upgrade_master_node)
items = self.stat_col.get_scope_item_count(self.buckets[0], self.load_scope,
node=upgrade_nodes)
if int(self.num_items) == int(items):
items_match = True
self.log.info("data loaded to collecion")
if not items_match:
self.log.error("Failed to load to collection")
def pre_upgrade(self, servers):
if self.rest is None:
self._new_master(self.master)
self.ddocs_num = self.input.param("ddocs_num", 0)
if int(self.ddocs_num) > 0:
self.create_ddocs_and_views()
verify_data = False
if self.scan_consistency != "request_plus":
verify_data = True
self.gens_load = self.generate_docs(self.docs_per_day)
self.load(self.gens_load, flag=self.item_flag,
verify_data=verify_data, batch_size=self.batch_size)
rest = RestConnection(servers[0])
output, rq_content, header = rest.set_auto_compaction(dbFragmentThresholdPercentage=20, viewFragmntThresholdPercentage=20)
self.assertTrue(output, "Error in set_auto_compaction... {0}".format(rq_content))
status, content, header = rest.set_indexer_compaction(mode="full", fragmentation=20)
self.assertTrue(status, "Error in setting Append Only Compaction... {0}".format(content))
operation_type = self.input.param("pre_upgrade", "")
if operation_type:
self.n1ql.run_async_index_operations(operation_type)
def during_upgrade(self, servers):
self.ddocs_num = self.input.param("ddocs_num", 0)
if int(self.ddocs_num) > 0:
self.create_ddocs_and_views()
kv_tasks = self.async_run_doc_ops()
operation_type = self.input.param("during_upgrade", "")
self.n1ql.run_async_index_operations(operation_type)
for task in kv_tasks:
task.result()
def post_upgrade(self, servers):
self.log.info(" Doing post upgrade")
self.ddocs_num = self.input.param("ddocs_num", | |
# -*- coding: utf-8 -*-
import base64
import uuid
#from boto.s3.connection import S3Connection
#from boto.s3.key import Key
#from boto.exception import S3ResponseError
from mongoengine import (connect, Document, DictField, MapField,
ListField, StringField, IntField, URLField,
DynamicField)
from SuperGLU.Services.StorageService.Storage_Service_Interface import (SerializedStorage,
DATA_TYPE_DB, DATA_TYPE_MEDIA, SERIALIZABLE_DATA_TYPE)
from SuperGLU.Util.ErrorHandling import logError, logWarning
from SuperGLU.Util.Serialization import nativizeObject, serializeObject, NamedSerializable, JSON_FORMAT
VALID_STORAGE_TYPES = (DATA_TYPE_DB, DATA_TYPE_MEDIA)
CONTENT_TYPES = {'image/jpeg': 'jpg',
'application/json': 'json'}
# WARNING: In dealing with MongoEngine, all objects are cached views
# If you have multiple such views, they will NOT be informed
# when you change the other views. Be advised.
class StorageObject(Document):
"""
Data in the storage service
@param key: A unique id (GUID) for the object
@param name: A name for the object. Should be unique within bucket.
@param value: Stored value
@param description: Description of this value
@param content_type: A content type (e.g., Serializable)
"""
key = StringField(unique=True, primary_key=True)
name = StringField(unique=True)
value = DynamicField()
data_type = StringField()
description = StringField()
class S3StorageObject(Document):
"""
Stub for data in the S3 Storage service
"""
key = StringField(unique=True, primary_key=True)
name = StringField(unique=True)
value = DynamicField()
data_type = StringField()
description = StringField()
link = URLField()
class MongoStorageService(SerializedStorage):
""" Service that wraps Mongo-backed Buckets """
def __init__(self, conn=None, *args, **kwds):
super(MongoStorageService, self).__init__(*args, **kwds)
if isinstance(conn, str):
conn = connect(conn)
self._conn = conn
def hasBucket(self, name):
return (name in self.getBucketNames())
def getBucket(self, name):
return Bucket.objects(bucket_name=name).first()
def getBucketNames(self):
return [bucket.bucket_name for bucket in Bucket.objects]
def addBucket(self, name):
if not self.hasBucket(name):
bucket = Bucket.make(name)
bucket.save()
def _renameBucket(self, oldName, newName):
bucket = self.getBucket(oldName)
if bucket:
bucket.setBucketName(newName)
else:
logWarning("Could not rename missing bucket: %s"(oldName,))
def delBucket(self, name):
if name is not None:
buckets = Bucket.objects(bucket_name=name)
buckets.delete()
else:
logWarning("Could not remove missing bucket: %s"(name,))
class Bucket(Document):
"""
A bucket (e.g., project) where data is stored
"""
STORAGE_ADAPTORS = (StorageObject, S3StorageObject)
bucket_name = StringField(max_length=120, unique=True,
required=True, primary_key=True)
name_map = MapField(StringField()) # {name: key} (Can just search for this, if needed)
adaptor_map = MapField(IntField()) # {key: STORAGE_ADAPTORS index}
tag_maps = MapField(ListField(StringField())) # {tag_name: keys[]}
@classmethod
def make(cls, bucketName, tagMaps=None, adaptorMap=None, nameMap=None,
storageAdaptors=None):
if tagMaps is None: tagMaps = {}
if adaptorMap is None: adaptorMap = {}
if nameMap is None: nameMap = {}
return cls(**{'bucket_name' : bucketName, 'tag_maps' : tagMaps,
'name_map' : nameMap, 'adaptor_map' : adaptorMap})
# Bucket Naming
def getBucketName(self):
return self.bucket_name
def setBucketName(self, name):
self.reload()
self.bucket_name = name
self.save()
self.reload()
# Resolving Reference ID's and Names
def hasKey(self, key):
return isinstance(key, basestring) and key in self.adaptor_map
def hasName(self, name):
return isinstance(name, basestring) and name in self.name_map
def hasRef(self, key=None, name=None):
key = self.resolveRef(key, name)
return self.hasKey(key)
def resolveRef(self, key=None, name=None):
ref = None
if name == '':
name = None
if (key is not None or name is not None):
if ((name is None and self.hasKey(key)) or
((name is not None) and (self.getName(key) == name))):
ref = key
elif (key is None and self.hasName(name)):
ref = self.name_map[name]
return ref
def getMatchingKeys(self, tags=None, storageType=None):
if tags is not None:
keys = set()
for tag in tags:
if isinstance(tag, basestring) and tag in self.tag_maps:
keys.update([k for k in self.tag_maps[tag] if
((storageType is None) or
(storageType == self.adaptor_map.get(k, None)))])
keys = list(keys)
elif (storageType is not None):
keys = [k for k in self.adaptor_map.keys() if
(storageType == self.adaptor_map.get(k, None))]
else:
keys = self.adaptor_map.keys()
return keys
# Managing Tags
def hasTag(self, tag):
return isinstance(tag, basestring) and tag in self.tag_maps
def hasTagKey(self, tag, key=None, name=None):
key = self.resolveRef(key, name)
if self.hasTag(tag) and self.hasKey(key):
return key in self.tag_maps[tag]
return False
def getObjectTags(self, key=None, name=None):
key = self.resolveRef(key, name)
if self.hasKey(key):
tags = [tag for tag in self.tag_maps
if key in self.tag_maps[tag]]
return tags
else:
return []
def addTagKey(self, tag, key=None, name=None, delaySave=False):
if not delaySave:
self.reload()
key = self.resolveRef(key, name)
try:
tag = str(tag)
except Exception:
logWarning("ERROR: Invalid tag couldn't convert from unicode")
tag = None
if key is not None and isinstance(tag, str) and not self.hasTagKey(tag, key):
if self.hasTag(tag):
self.tag_maps[tag].append(key)
else:
self.tag_maps[tag] = [key]
if not delaySave:
self.save()
self.reload()
def delTagKey(self, tag, key=None, name=None, delaySave=False):
if not delaySave:
self.reload()
key = self.resolveRef(key, name)
if self.hasTagKey(tag, key):
self.tag_maps[tag].remove(key)
if not delaySave:
self.save()
self.reload()
def changeTags(self, tags, key=None, name=None, delaySave=False):
if not delaySave:
self.reload()
key = self.resolveRef(key, name)
oldTags = self.getObjectTags(key)
obseleteTags = set(oldTags)
# Add New Tags
for tag in tags:
self.addTagKey(tag, key, delaySave=delaySave)
if tag in obseleteTags:
obseleteTags.remove(tag)
# Remove ones not in the new set
for tag in obseleteTags:
self.delTagKey(tag, key, delaySave=delaySave)
if not delaySave:
self.save()
self.reload()
def getStorageAdaptor(self, key=None, name=None):
if name is not None:
key = self.resolveRef(key, name)
if self.hasKey(key):
adaptorId = self.adaptor_map[key]
return self.STORAGE_ADAPTORS[adaptorId]
return None
def _getData(self, key):
if key is None:
return None
storage = self.getStorageAdaptor(key)
if storage is not None:
data = storage.objects(key=key).first()
if data is None: logWarning("No data for: ", key)
return data
logWarning("No data for: ", key)
return None
def getValue(self, key=None, name=None):
key = self.resolveRef(key, name)
data = self._getData(key)
if data is not None:
return data.value
else:
return None
def getLink(self, key=None, name=None):
key = self.resolveRef(key, name)
data = self._getData(key)
if data is not None and isinstance(data, S3StorageObject):
# Replace this with the S3 link in the future
return "prod.x-in-y.com"
else:
return None
def getName(self, key):
data = self._getData(key)
if data is not None:
if data.name != '':
return data.name
else:
return None
else:
return None
def changeName(self, newName, key=None, name=None):
self.reload()
key = self.resolveRef(key, name)
data = self._getData(key)
isChanged, data = self._changeName(newName, data)
if isChanged:
data.save()
self.save()
self.reload()
return True
else:
return False
def _changeName(self, newName, data):
if newName is None:
newName = ''
if ((data is not None) and
(newName == '' or not self.hasName(newName))):
key = data.key
name = self.getName(key)
# Update any internal naming data
if data.data_type == SERIALIZABLE_DATA_TYPE:
value = data.value
value = nativizeObject(value, None, JSON_FORMAT)
if isinstance(value, NamedSerializable):
if newName == '':
value.setName(None)
else:
value.setName(newName)
value = serializeObject(value, JSON_FORMAT)
data.value = value
# Update the storage service data
data.name = newName
if (name is not None):
del self.name_map[name]
if newName != '':
self.name_map[newName] = key
isChanged = True
else:
isChanged = False
return isChanged, data
def getDescription(self, key=None, name=None):
key = self.resolveRef(key, name)
data = self._getData(key)
if data is not None:
return data.description
else:
return None
def getDataType(self, key=None, name=None):
key = self.resolveRef(key, name)
data = self._getData(key)
if data is not None:
return data.data_type
else:
return None
def setValue(self, key=None, value=None, name=None, description=None, tags=None,
storageType=None, dataType=None, allowOverwrite=False, allowCreate=True):
self.reload()
logWarning("SETTING VALUE")
hasKey = self.hasKey(key)
hasName = self.hasName(name)
ref = self.resolveRef(key, name)
# Make sure reference is valid, if any given
if (ref is None) and ((hasKey and hasName) or
(hasName and key is not None)):
logWarning("INVALID: Mismatched unique keys in set value: (key=%s, name=%s)"%(key, name))
return False
# Overwrite existing data
# This is aborted if another entry uses has the new 'name'
elif (ref is not None) and allowOverwrite:
return self._updateValue(key, value, name, description,
tags, storageType, dataType)
# Create a new entry
# The key must not already exist and a non-None value must be given
elif (ref is None) and allowCreate:
return self._createValue(key, value, name, description,
tags, storageType, dataType)
else:
logWarning('INVALID CONDITION')
return False
def _updateValue(self, key, value=None, name=None, description=None, tags=None,
storageType=None, dataType=None):
key = self.resolveRef(key, name)
currentName = self.getName(key)
data = self._getData(key)
if key is not None and data is not None:
if name is not None and currentName != name:
isChanged, data = self._changeName(name, data)
if not isChanged:
# Failed on change name attempt
logWarning("Failed to update, rename failed: ", name)
return False
if value is not None:
data.value = value
if dataType is not None:
data.data_type = dataType
if description is not None:
data.description = description
if storageType is not None:
# @TODO: Fix this so it works appropriately
# (e.g., changes the stored object type)
# For now, no-op
# self.adaptor_map[key] = storageType
pass
if tags is not None:
self.changeTags(tags, key, delaySave=True)
data.save()
self.save()
self.reload()
return True
else:
| |
of the variables
sage: TateAlgebra.create_key(ZZ)
Traceback (most recent call last):
...
TypeError: the base ring must be a p-adic ring or a p-adic field
sage: TateAlgebra.create_key(Zp(2), names=['x','y'], log_radii=[1])
Traceback (most recent call last):
...
ValueError: the number of radii does not match the number of variables
sage: TateAlgebra.create_key(Zp(2), names=['x','y'], log_radii=[0, 1/2])
Traceback (most recent call last):
...
NotImplementedError: only integral log_radii are implemented
sage: TateAlgebra.create_key(Zp(2), names=['x','y'], order='myorder')
Traceback (most recent call last):
...
ValueError: unknown term order 'myorder'
"""
if not isinstance(base, pAdicGeneric):
raise TypeError("the base ring must be a p-adic ring or a p-adic field")
# TODO: allow for arbitrary CDVF
base = base.fraction_field()
if names is None:
raise ValueError("you must specify the names of the variables")
names = normalize_names(-1, names)
ngens = len(names)
if not isinstance(log_radii, (list, tuple)):
try:
log_radii = [ZZ(log_radii)] * ngens
except TypeError:
raise NotImplementedError("only integral log_radii are implemented")
elif len(log_radii) != ngens:
raise ValueError("the number of radii does not match the number of variables")
else:
try:
log_radii = [ ZZ(r) for r in log_radii ]
except TypeError:
raise NotImplementedError("only integral log_radii are implemented")
order = TermOrder(order, ngens)
if prec is None:
prec = base.precision_cap()
key = (base, prec, tuple(log_radii), names, order)
return key
def create_object(self, version, key):
"""
Create an object using the given key.
TESTS::
sage: key = TateAlgebra.create_key(Zp(2), names=('x','y'))
sage: TateAlgebra.create_object((8,4,6), key)
Tate Algebra in x (val >= 0), y (val >= 0) over 2-adic Field with capped relative precision 20
"""
(base, prec, log_radii, names, order) = key
return TateAlgebra_generic(base, prec, log_radii, names, order)
TateAlgebra = TateAlgebraFactory("TateAlgebra")
# Parent for terms
##################
class TateTermMonoid(Monoid_class, UniqueRepresentation):
r"""
A base class for Tate algebra terms
A term in a Tate algebra `K\{X_1,\dots,X_n\}` (resp. in its ring of
integers) is a monomial in this ring.
Those terms form a pre-ordered monoid, with term multiplication and the
term order of the parent Tate algebra.
"""
Element = TateAlgebraTerm
def __init__(self, A):
r"""
Initialize the Tate term monoid
INPUT:
- ``A`` -- a Tate algebra
EXAMPLES::
sage: R = pAdicRing(2, 10)
sage: A.<x,y> = TateAlgebra(R, log_radii=1)
sage: T = A.monoid_of_terms(); T
Monoid of terms in x (val >= -1), y (val >= -1) over 2-adic Field with capped relative precision 10
TESTS::
sage: A.<x,y> = TateAlgebra(Zp(2), log_radii=1)
sage: T = A.monoid_of_terms()
sage: TestSuite(T).run()
"""
# This function is not exposed to the user
# so we do not check the inputs
names = A.variable_names()
Monoid_class.__init__(self, names)
self._base = A.base_ring()
self._field = A._field
self._names = names
self._latex_names = A._latex_names
self._ngens = len(names)
self._log_radii = ETuple(A.log_radii())
self._order = A.term_order()
self._sortkey = self._order.sortkey
self._integral = A._integral
self._parent_algebra = A
def _repr_(self):
r"""
Return a string representation of this Tate term monoid
EXAMPLES::
sage: R = pAdicRing(2, 10)
sage: A.<x,y> = TateAlgebra(R, log_radii=[1,1], order="lex")
sage: A.monoid_of_terms() # indirect doctest
Monoid of terms in x (val >= -1), y (val >= -1) over 2-adic Field with capped relative precision 10
"""
if self._ngens == 0:
return "Monoid of terms over %s" % self._base
vars = ", ".join("%s (val >= %s)" % (var, -r)
for var, r in zip(self._names, self._log_radii))
return "Monoid of terms in %s over %s" % (vars, self._base)
def _latex_(self):
r"""
Return a LaTeX representation of this Tate term monoid
EXAMPLES::
sage: R = pAdicRing(2, 10)
sage: A.<x,y> = TateAlgebra(R, log_radii=[1,1], order="lex")
sage: M = A.monoid_of_terms()
sage: M._latex_()
'\\verb"Terms"(\\Bold{Q}_{2}\\{x,y\\}_{(1,1)})'
"""
return '\\verb"Terms"(%s)' % self._parent_algebra._latex_()
def _coerce_map_from_(self, R):
r"""
Return ``True`` if ``R`` coerces to this monoid.
EXAMPLES::
sage: R = Zp(2, 10, print_mode='digits')
sage: A.<x,y> = TateAlgebra(R)
sage: T = A.monoid_of_terms()
A ring coerces into a monoid of terms if and only if
it coerces into its base ring::
sage: T.has_coerce_map_from(ZZ) # indirect doctest
True
sage: T.has_coerce_map_from(GF(2)) # indirect doctest
False
::
sage: S.<a> = Zq(4)
sage: B.<x,y> = TateAlgebra(S)
sage: U = B.monoid_of_terms()
sage: U.has_coerce_map_from(T) # indirect doctest
True
sage: T.has_coerce_map_from(U) # indirect doctest
False
Note that a Tate algebra does not coerce into a monoid of terms::
sage: U.has_coerce_map_from(A) # indirect doctest
False
sage: T.has_coerce_map_from(B) # indirect doctest
False
Variable names must match exactly::
sage: B.<x,z> = TateAlgebra(R)
sage: U = B.monoid_of_terms()
sage: T.has_coerce_map_from(U) # indirect doctest
False
sage: U.has_coerce_map_from(T) # indirect doctest
False
and appear in the same order::
sage: B.<y,x> = TateAlgebra(R); B
Tate Algebra in y (val >= 0), x (val >= 0) over 2-adic Field with capped relative precision 10
sage: U = B.monoid_of_terms()
sage: T.has_coerce_map_from(U) # indirect doctest
False
sage: U.has_coerce_map_from(T) # indirect doctest
False
Term orders must also match::
sage: B.<x,y> = TateAlgebra(R, order="lex")
sage: U = B.monoid_of_terms()
sage: T.has_coerce_map_from(U) # indirect doctest
False
sage: U.has_coerce_map_from(T) # indirect doctest
False
"""
base = self._base
if base.has_coerce_map_from(R):
return True
if isinstance(R, TateTermMonoid):
return self._parent_algebra.has_coerce_map_from(R.algebra_of_series())
def prime(self):
"""
Return the prime, that is the characteristic of the residue field.
EXAMPLES::
sage: R = Zp(3)
sage: A.<x,y> = TateAlgebra(R)
sage: T = A.monoid_of_terms()
sage: T.prime()
3
"""
return self._base.prime()
def algebra_of_series(self):
r"""
Return the Tate algebra corresponding to this Tate term monoid.
EXAMPLES::
sage: R = Zp(2, 10)
sage: A.<x,y> = TateAlgebra(R)
sage: T = A.monoid_of_terms()
sage: T.algebra_of_series()
Tate Algebra in x (val >= 0), y (val >= 0) over 2-adic Field with capped relative precision 10
sage: T.algebra_of_series() is A
True
"""
return self._parent_algebra
def base_ring(self):
r"""
Return the base ring of this Tate term monoid.
EXAMPLES::
sage: R = Zp(2, 10)
sage: A.<x,y> = TateAlgebra(R)
sage: T = A.monoid_of_terms()
sage: T.base_ring()
2-adic Field with capped relative precision 10
We observe that the base field is not ``R`` but its
fraction field::
sage: T.base_ring() is R
False
sage: T.base_ring() is R.fraction_field()
True
If we really want to create an integral Tate algebra,
we have to invoke the method :meth:`integer_ring`::
sage: Ao = A.integer_ring(); Ao
Integer ring of the Tate Algebra in x (val >= 0), y (val >= 0) over 2-adic Field with capped relative precision 10
sage: Ao.base_ring()
2-adic Ring with capped relative precision 10
sage: Ao.base_ring() is R
True
"""
return self._base
def variable_names(self):
r"""
Return the names of the variables of this Tate term monoid.
EXAMPLES::
sage: R = Zp(2, 10)
sage: A.<x,y> = TateAlgebra(R)
sage: T = A.monoid_of_terms()
sage: T.variable_names()
('x', 'y')
"""
return self._names
def log_radii(self):
r"""
Return the log radii of convergence of this Tate term monoid.
EXAMPLES::
sage: R = Zp(2, 10)
sage: A.<x,y> = TateAlgebra(R)
sage: T = A.monoid_of_terms()
sage: T.log_radii()
(0, 0)
sage: B.<x,y> = TateAlgebra(R, log_radii=[1,2])
sage: B.monoid_of_terms().log_radii()
(1, 2)
"""
return tuple(self._log_radii)
def term_order(self):
r"""
Return the term order on this Tate term monoid.
EXAMPLES::
sage: R = Zp(2, 10)
sage: A.<x,y> = TateAlgebra(R)
sage: T = A.monoid_of_terms()
sage: T.term_order() # default term order is grevlex
Degree reverse lexicographic term order
sage: A.<x,y> = TateAlgebra(R, order='lex')
sage: T = A.monoid_of_terms()
sage: T.term_order()
Lexicographic term order
"""
return self._order
def ngens(self):
r"""
Return the number of variables in the Tate term monoid
EXAMPLES::
sage: R = Zp(2, 10)
sage: A.<x,y> = TateAlgebra(R)
sage: T = A.monoid_of_terms()
sage: T.ngens()
2
"""
return self._ngens
def gens(self):
r"""
Return the list of generators of this monoid of terms.
EXAMPLES::
sage: R = Zp(2, 10, print_mode='digits')
sage: A.<x,y> = TateAlgebra(R)
sage: T = A.monoid_of_terms()
sage: T.gens()
(...0000000001*x, ...0000000001*y)
"""
return tuple([self(g) for g in self._parent_algebra.gens()])
def gen(self, n=0):
r"""
Return the ``n``-th generator of this monoid of terms.
INPUT:
- ``n`` - an integer (default: ``0``), the index of
the requested generator
EXAMPLES::
sage: R = Zp(2, 10, print_mode='digits')
sage: A.<x,y> = TateAlgebra(R)
sage: T = A.monoid_of_terms()
sage: T.gen()
...0000000001*x
sage: T.gen(0)
...0000000001*x
sage: T.gen(1)
...0000000001*y
sage: T.gen(2)
Traceback (most recent call last):
...
ValueError: generator not defined
"""
return self(self._parent_algebra.gen(n))
def some_elements(self):
"""
Return a list of elements in this monoid of terms.
EXAMPLES::
sage: R = Zp(2, 10, print_mode='digits')
sage: A.<x,y> = TateAlgebra(R)
sage: T = A.monoid_of_terms()
sage: T.some_elements()
| |
96: I11i + iIii1I11I1II1 % II111iiii
O00 = True
for Oo0o0ooOoO in oOoO0oOO . rloc_set :
if ( O00 ) :
oO0Ooo0OO = oOoO0oOO . print_eid_tuple ( )
oO0Ooo0OO = oOoO0oOO . star_secondary_iid ( oO0Ooo0OO )
oO0Ooo0OO += OOOOoO0O
Oo000o = i1
O00 = False
else :
oO0Ooo0OO , Oo000o , IIiiI1iii1 = ( "" , "" , "" )
if 45 - 45: I1Ii111 * I11i / iIii1I11I1II1 / I1IiiI % II111iiii
if 49 - 49: Ii1I / iII111i . iII111i . iII111i + i11iIiiIii % I11i
O0oo0ooo0 = "" if Oo0o0ooOoO . rloc . is_null ( ) else Oo0o0ooOoO . rloc . print_address_no_iid ( )
if 7 - 7: IiII * ooOoO0o + OoOoOO00
if 22 - 22: iII111i
if ( Oo0o0ooOoO . interface != None ) :
O0oo0ooo0 += " ({})" . format ( Oo0o0ooOoO . interface )
if 48 - 48: I1ii11iIi11i . I1IiiI
if ( O0oo0ooo0 != "" ) : O0oo0ooo0 += "<br>"
if 73 - 73: O0 . I1Ii111 - OoooooooOO % I11i % i1IIi
if ( Oo0o0ooOoO . translated_rloc . not_set ( ) == False ) :
O0oo0ooo0 += "translated RLOC: {}<br>" . format ( Oo0o0ooOoO . translated_rloc . print_address_no_iid ( ) )
if 14 - 14: I1Ii111 + Ii1I * Oo0Ooo
if 49 - 49: Oo0Ooo
if 57 - 57: O0 * ooOoO0o - iII111i - iIii1I11I1II1 * iII111i
i1o0oOoooOoo0 = Oo0o0ooOoO . print_rloc_name ( True )
if ( i1o0oOoooOoo0 != "" ) : O0oo0ooo0 += i1o0oOoooOoo0 + "<br>"
if 26 - 26: O0 * Ii1I - I1IiiI - iII111i / iIii1I11I1II1
if ( Oo0o0ooOoO . geo_name != None ) :
O0oo0ooo0 += "geo: " + Oo0o0ooOoO . geo_name + "<br>"
if 57 - 57: I1ii11iIi11i - OoO0O00 * iIii1I11I1II1
if ( Oo0o0ooOoO . elp_name != None ) :
O0oo0ooo0 += "elp: " + Oo0o0ooOoO . elp_name + "<br>"
if 26 - 26: OoO0O00 % ooOoO0o % o0oOOo0O0Ooo % OoOoOO00 . iII111i % O0
if ( Oo0o0ooOoO . rle_name != None ) :
O0oo0ooo0 += "rle: " + Oo0o0ooOoO . rle_name + "<br>"
if 91 - 91: II111iiii . Oo0Ooo . oO0o - OoooooooOO / OoOoOO00
if ( Oo0o0ooOoO . json_name != None ) :
O0oo0ooo0 += "json: " + Oo0o0ooOoO . json_name + "<br>"
if 30 - 30: I11i % o0oOOo0O0Ooo + i1IIi * OoooooooOO * OoO0O00 - II111iiii
if 55 - 55: OoO0O00
if ( itr_or_etr == "ITR" ) :
output += lisp_table_row ( oO0Ooo0OO , Oo000o , O0oo0ooo0 ,
str ( Oo0o0ooOoO . priority ) + "/" + str ( Oo0o0ooOoO . weight ) ,
str ( Oo0o0ooOoO . mpriority ) + "/" + str ( Oo0o0ooOoO . mweight ) ,
oOoO0oOO . use_mr_name )
else :
ii1111I = Oo0o0ooOoO . stats . get_stats ( True , True )
output += lisp_table_row ( oO0Ooo0OO , Oo000o , O0oo0ooo0 ,
str ( Oo0o0ooOoO . priority ) + "/" + str ( Oo0o0ooOoO . weight ) ,
str ( Oo0o0ooOoO . mpriority ) + "/" + str ( Oo0o0ooOoO . mweight ) , ii1111I ,
IIiiI1iii1 , oOoO0oOO . use_ms_name )
if 20 - 20: ooOoO0o * I1Ii111 * o0oOOo0O0Ooo - ooOoO0o
if 32 - 32: Ii1I * oO0o
if 85 - 85: i11iIiiIii . OoO0O00 + OoO0O00
output += lisp_table_footer ( )
if 28 - 28: Oo0Ooo
if 62 - 62: Oo0Ooo + OoooooooOO / iII111i
if 60 - 60: Ii1I / OoOoOO00 . I11i % OOooOOo
if 61 - 61: O0 . Ii1I . O0 * i11iIiiIii * II111iiii / I1Ii111
if ( len ( lisp . lisp_geo_list ) != 0 ) :
IIIiiiiiI1I = "Configured Geo-Coordinates:"
output += lisp_table_header ( IIIiiiiiI1I , "Geo Name" ,
"Geo-Prefix or Geo-Point" )
ooo0Oo000o = sorted ( lisp . lisp_geo_list )
for OO in ooo0Oo000o :
oo0OoO = lisp . lisp_geo_list [ OO ]
output += lisp_table_row ( OO , oo0OoO . print_geo_url ( ) )
if 18 - 18: O0
output += lisp_table_footer ( )
if 14 - 14: Ii1I / IiII - O0
return ( output )
if 16 - 16: I1Ii111 % iIii1I11I1II1 . i1IIi
if 72 - 72: ooOoO0o * OOooOOo
if 69 - 69: oO0o - i11iIiiIii
if 29 - 29: Ii1I + iII111i % I1ii11iIi11i + I11i * Oo0Ooo - i11iIiiIii
if 24 - 24: i11iIiiIii . ooOoO0o + ooOoO0o - i11iIiiIii % OOooOOo
if 58 - 58: I1IiiI
if 94 - 94: o0oOOo0O0Ooo + Ii1I % o0oOOo0O0Ooo . I1Ii111 - ooOoO0o * I1IiiI
def lisp_interface_command ( kv_pair ) :
OO0oOOoOoo0OO = None
o00oO0O = None
IIi1OOoO0OooO = None
Iii = None
OoiiiIiii11i1i = None
ooo0O0Oo0O = None
Oo0o = None
i1i = None
if 68 - 68: OoOoOO00 * ooOoO0o % ooOoO0o - IiII + O0 * I1ii11iIi11i
for ii1 in kv_pair . keys ( ) :
oo00oO0O0 = kv_pair [ ii1 ]
if ( ii1 == "interface-name" ) : OO0oOOoOoo0OO = oo00oO0O0
if ( ii1 == "device" ) : o00oO0O = oo00oO0O0
if ( ii1 == "instance-id" ) : IIi1OOoO0OooO = oo00oO0O0
if ( ii1 == "dynamic-eid" ) : Iii = oo00oO0O0
if ( ii1 == "multi-tenant-eid" ) : Oo0o = oo00oO0O0
if ( ii1 == "dynamic-eid-device" ) : OoiiiIiii11i1i = oo00oO0O0
if ( ii1 == "dynamic-eid-timeout" ) : ooo0O0Oo0O = oo00oO0O0
if ( ii1 == "lisp-nat" ) : i1i = ( oo00oO0O0 == "yes" )
if 60 - 60: i11iIiiIii / i1IIi * OOooOOo
if 89 - 89: iIii1I11I1II1 * o0oOOo0O0Ooo + OoOoOO00 . i11iIiiIii + I1ii11iIi11i
if ( o00oO0O == None ) : return
if 1 - 1: I1IiiI . I11i . I1ii11iIi11i
if 19 - 19: O0 * I11i % OoooooooOO
if 36 - 36: o0oOOo0O0Ooo % I11i * I1ii11iIi11i % Ii1I + i1IIi - Oo0Ooo
if 56 - 56: I1ii11iIi11i
if 32 - 32: OoOoOO00 % O0 % i11iIiiIii - ooOoO0o . I1IiiI
if 24 - 24: oO0o % o0oOOo0O0Ooo / I1Ii111 + o0oOOo0O0Ooo
if 59 - 59: II111iiii % I1IiiI * O0 . OoooooooOO - OoooooooOO % O0
if 56 - 56: oO0o - i1IIi * OoooooooOO - II111iiii
if ( lisp . lisp_myinterfaces . has_key ( o00oO0O ) and Oo0o == None ) :
iii1I = lisp . lisp_myinterfaces [ o00oO0O ]
else :
iii1I = lisp . lisp_interface ( o00oO0O )
lisp . lisp_myinterfaces [ o00oO0O ] = iii1I
if 11 - 11: Oo0Ooo * OoooooooOO - i11iIiiIii
if 13 - 13: i11iIiiIii . O0 / OOooOOo * i1IIi
if 14 - 14: IiII + IiII . I11i / Ii1I . iIii1I11I1II1
if 10 - 10: II111iiii . OOooOOo / iII111i
if 35 - 35: iII111i / Oo0Ooo + O0 * iIii1I11I1II1 - O0
iii1I . interface_name = OO0oOOoOoo0OO
if ( IIi1OOoO0OooO != None ) :
if ( IIi1OOoO0OooO . isdigit ( ) == False ) : IIi1OOoO0OooO = "0"
iii1I . instance_id = int ( IIi1OOoO0OooO )
if 3 - 3: I1ii11iIi11i
if ( Iii != None ) :
iii1I . dynamic_eid . store_prefix ( Iii )
iii1I . dynamic_eid . instance_id = iii1I . instance_id
if 42 - 42: I11i % Oo0Ooo + IiII - I11i . iIii1I11I1II1 - Ii1I
if ( OoiiiIiii11i1i != None ) :
iii1I . dynamic_eid_device = OoiiiIiii11i1i
if 27 - 27: iII111i % Oo0Ooo . I1ii11iIi11i . i1IIi % OoOoOO00 . o0oOOo0O0Ooo
if ( ooo0O0Oo0O != None ) :
iii1I . dynamic_eid_timeout = int ( ooo0O0Oo0O )
if 37 - 37: iII111i + I1Ii111 * Ii1I + IiII
if ( Oo0o != None ) :
iii1I . multi_tenant_eid . store_prefix ( Oo0o )
iii1I . multi_tenant_eid . instance_id = int ( iii1I . instance_id )
lisp . lisp_multi_tenant_interfaces . append ( iii1I )
if 39 - 39: O0 * Oo0Ooo - I1IiiI + Ii1I / II111iiii
if ( i1i ) :
| |
# Copyright © 2019. <NAME>. All rights reserved.
import numpy as np
import pandas as pd
from collections import OrderedDict
import math
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import cdist
from scipy.stats import chi2
from scipy.ndimage.filters import gaussian_filter1d
from .utils import Epoch
from .utils import printProgressBar, get_spike_depths
def calculate_metrics(spike_times, spike_clusters, amplitudes, pc_features, pc_feature_ind, params, \
cluster_ids=None, epochs = None, seed=0):
""" Calculate metrics for all units on one probe
Inputs:
------
spike_times : numpy.ndarray (num_spikes x 0)
Spike times in seconds (same timebase as epochs)
spike_clusters : numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time
pc_features : numpy.ndarray (num_spikes x num_pcs x num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind : numpy.ndarray (num_units x num_channels)
Channel indices of PCs for each unit
epochs : list of Epoch objects
contains information on Epoch start and stop times
params : dict of parameters
'isi_threshold' : minimum time for isi violations
'min_isi'
'num_channels_to_compare'
'max_spikes_for_unit'
'max_spikes_for_nn'
'n_neighbors'
'drift_metrics_interval_s'
'drift_metrics_min_spikes_per_interval'
Outputs:
--------
metrics : pandas.DataFrame
one column for each metric
one row per unit per epoch
"""
metrics = pd.DataFrame()
if epochs is None:
epochs = [Epoch('complete_session', 0, np.inf)]
total_units = np.max(spike_clusters) + 1
total_epochs = len(epochs)
for epoch in epochs:
in_epoch = np.logical_and(spike_times > epoch.start_time, spike_times < epoch.end_time)
spikes_in_epoch = np.sum(in_epoch)
spikes_for_nn = min(spikes_in_epoch, params['max_spikes_for_nn'])
spikes_for_silhouette = min(spikes_in_epoch, params['n_silhouette'])
print("Calculating isi violations")
isi_viol = calculate_isi_violations(spike_times[in_epoch], spike_clusters[in_epoch], total_units, params['isi_threshold'], params['min_isi'])
print("Calculating presence ratio")
presence_ratio = calculate_presence_ratio(spike_times[in_epoch], spike_clusters[in_epoch], total_units)
print("Calculating firing rate")
firing_rate, num_spikes = calculate_firing_rate_and_spikes(spike_times[in_epoch], spike_clusters[in_epoch], total_units)
print("Calculating amplitude cutoff")
amplitude_cutoff = calculate_amplitude_cutoff(spike_clusters[in_epoch], amplitudes[in_epoch], total_units)
print("Calculating PC-based metrics")
isolation_distance, l_ratio, d_prime, nn_hit_rate, nn_miss_rate = calculate_pc_metrics(spike_clusters[in_epoch],
total_units,
pc_features[in_epoch,:,:],
pc_feature_ind,
params['num_channels_to_compare'],
params['max_spikes_for_unit'],
spikes_for_nn,
params['n_neighbors'],
seed=seed)
print("Calculating silhouette score")
silhouette_score = calculate_silhouette_score(spike_clusters[in_epoch],
total_units,
pc_features[in_epoch,:,:],
pc_feature_ind,
spikes_for_silhouette,
seed=seed)
print("Calculating drift metrics")
max_drift, cumulative_drift = calculate_drift_metrics(spike_times[in_epoch],
spike_clusters[in_epoch],
total_units,
pc_features[in_epoch,:,:],
pc_feature_ind,
params['drift_metrics_interval_s'],
params['drift_metrics_min_spikes_per_interval'])
if(cluster_ids is None):
cluster_ids_out = np.arange(total_units)
else:
cluster_ids_out = cluster_ids
epoch_name = [epoch.name] * len(cluster_ids)
metrics = pd.concat((metrics, pd.DataFrame(data= OrderedDict((('cluster_id', cluster_ids_out),
('num_spikes' , num_spikes),
('firing_rate' , firing_rate),
('presence_ratio' , presence_ratio),
('isi_viol' , isi_viol),
('amplitude_cutoff' , amplitude_cutoff),
('isolation_distance' , isolation_distance),
('l_ratio' , l_ratio),
('d_prime' , d_prime),
('nn_hit_rate' , nn_hit_rate),
('nn_miss_rate' , nn_miss_rate),
('silhouette_score', silhouette_score),
('max_drift', max_drift),
('cumulative_drift', cumulative_drift),
('epoch_name' , epoch_name),
)))))
return metrics
# ===============================================================
# HELPER FUNCTIONS TO LOOP THROUGH CLUSTERS:
# ===============================================================
def calculate_isi_violations(spike_times, spike_clusters, total_units, isi_threshold, min_isi):
cluster_ids = np.unique(spike_clusters)
viol_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
viol_rates[cluster_id], num_violations = isi_violations(spike_times[for_this_cluster],
min_time = np.min(spike_times),
max_time = np.max(spike_times),
isi_threshold=isi_threshold,
min_isi = min_isi)
return viol_rates
def calculate_presence_ratio(spike_times, spike_clusters, total_units):
cluster_ids = np.unique(spike_clusters)
ratios = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
ratios[cluster_id] = presence_ratio(spike_times[for_this_cluster],
min_time = np.min(spike_times),
max_time = np.max(spike_times))
return ratios
def calculate_firing_rate_and_spikes(spike_times, spike_clusters, total_units):
cluster_ids = np.unique(spike_clusters)
firing_rates = np.zeros((total_units,))
num_spikes = np.zeros((total_units,))
min_time = np.min(spike_times)
max_time = np.max(spike_times)
for idx, cluster_id in enumerate(cluster_ids):
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
firing_rates[cluster_id] = firing_rate(spike_times[for_this_cluster],
min_time = np.min(spike_times),
max_time = np.max(spike_times))
num_spikes[cluster_id] = len(spike_times[for_this_cluster])
return firing_rates, num_spikes
def calculate_amplitude_cutoff(spike_clusters, amplitudes, total_units):
cluster_ids = np.unique(spike_clusters)
amplitude_cutoffs = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
amplitude_cutoffs[cluster_id] = amplitude_cutoff(amplitudes[for_this_cluster])
return amplitude_cutoffs
def calculate_pc_metrics(spike_clusters,
total_units,
pc_features,
pc_feature_ind,
num_channels_to_compare,
max_spikes_for_cluster,
spikes_for_nn,
n_neighbors,
metric_names=['isolation_distance', 'l_ratio', 'd_prime', 'nearest_neighbor'],
seed=0):
assert(num_channels_to_compare % 2 == 1)
half_spread = int((num_channels_to_compare - 1) / 2)
cluster_ids = np.unique(spike_clusters)
peak_channels = np.zeros((total_units,), dtype='uint16')
isolation_distances = np.zeros((total_units,))
l_ratios = np.zeros((total_units,))
d_primes = np.zeros((total_units,))
nn_hit_rates = np.zeros((total_units,))
nn_miss_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
for_unit = np.squeeze(spike_clusters == cluster_id)
pc_max = np.argmax(np.mean(pc_features[for_unit, 0, :],0))
peak_channels[cluster_id] = pc_feature_ind[cluster_id, pc_max]
for idx, cluster_id in enumerate(cluster_ids):
printProgressBar(cluster_id + 1, total_units)
peak_channel = peak_channels[cluster_id]
half_spread_down = peak_channel \
if peak_channel < half_spread \
else half_spread
half_spread_up = np.max(pc_feature_ind) - peak_channel \
if peak_channel + half_spread > np.max(pc_feature_ind) \
else half_spread
units_for_channel, channel_index = np.unravel_index(np.where(pc_feature_ind.flatten() == peak_channel)[0], pc_feature_ind.shape)
units_in_range = (peak_channels[units_for_channel] >= peak_channel - half_spread_down) * \
(peak_channels[units_for_channel] <= peak_channel + half_spread_up)
units_for_channel = units_for_channel[units_in_range]
channel_index = channel_index[units_in_range]
channels_to_use = np.arange(peak_channel - half_spread_down, peak_channel + half_spread_up + 1)
spike_counts = np.zeros(units_for_channel.shape)
for idx2, cluster_id2 in enumerate(units_for_channel):
spike_counts[idx2] = np.sum(spike_clusters == cluster_id2)
this_unit_idx = np.where(units_for_channel == cluster_id)[0]
if spike_counts[this_unit_idx] > max_spikes_for_cluster:
relative_counts = spike_counts / spike_counts[this_unit_idx] * max_spikes_for_cluster
else:
relative_counts = spike_counts
all_pcs = np.zeros((0, pc_features.shape[1], channels_to_use.size))
all_labels = np.zeros((0,))
for idx2, cluster_id2 in enumerate(units_for_channel):
try:
channel_mask = make_channel_mask(cluster_id2, pc_feature_ind, channels_to_use)
except IndexError:
# Occurs when pc_feature_ind does not contain all channels of interest
# In that case, we will exclude this unit for the calculation
pass
else:
subsample = int(relative_counts[idx2])
index_mask = make_index_mask(spike_clusters, cluster_id2, min_num = 0, max_num = subsample, seed=seed)
pcs = get_unit_pcs(pc_features, index_mask, channel_mask)
labels = np.ones((pcs.shape[0],)) * cluster_id2
all_pcs = np.concatenate((all_pcs, pcs),0)
all_labels = np.concatenate((all_labels, labels),0)
all_pcs = np.reshape(all_pcs, (all_pcs.shape[0], pc_features.shape[1]*channels_to_use.size))
if all_pcs.shape[0] > 10:
if 'isolation_distance' in metric_names or 'l_ratio' in metric_names:
isolation_distances[cluster_id], l_ratios[cluster_id] = mahalanobis_metrics(all_pcs, all_labels, cluster_id)
else:
isolation_distances[cluster_id] = np.nan
l_ratios[cluster_id] = np.nan
if 'd_prime' in metric_names:
d_primes[cluster_id] = lda_metrics(all_pcs, all_labels, cluster_id)
else:
d_primes[cluster_id] = np.nan
if 'nearest_neighbor' in metric_names:
nn_hit_rates[cluster_id], nn_miss_rates[cluster_id] = nearest_neighbors_metrics(all_pcs, all_labels, cluster_id, spikes_for_nn, n_neighbors)
else:
nn_hit_rates[cluster_id] = np.nan
nn_miss_rates[cluster_id] = np.nan
else:
isolation_distances[cluster_id] = np.nan
l_ratios[cluster_id] = np.nan
d_primes[cluster_id] = np.nan
nn_hit_rates[cluster_id] = np.nan
nn_miss_rates[cluster_id] = np.nan
return isolation_distances, l_ratios, d_primes, nn_hit_rates, nn_miss_rates
def calculate_silhouette_score(spike_clusters,
total_units,
pc_features,
pc_feature_ind,
spikes_for_silhouette,
seed=0):
random_spike_inds = np.random.RandomState(seed=seed).permutation(spike_clusters.size)
random_spike_inds = random_spike_inds[:spikes_for_silhouette]
num_pc_features = pc_features.shape[1]
all_pcs = np.zeros((spikes_for_silhouette, np.max(pc_feature_ind) * num_pc_features + 1))
for idx, i in enumerate(random_spike_inds):
unit_id = spike_clusters[i]
channels = pc_feature_ind[unit_id,:]
for j in range(0,num_pc_features):
all_pcs[idx, channels + np.max(pc_feature_ind) * j] = pc_features[i,j,:]
cluster_labels = spike_clusters[random_spike_inds]
cluster_ids = np.unique(cluster_labels)
SS = np.empty((total_units, total_units))
SS[:] = np.nan
for idx1, i in enumerate(cluster_ids):
printProgressBar(idx1+1, len(cluster_ids))
for idx2, j in enumerate(cluster_ids):
if j > i:
inds = np.in1d(cluster_labels, np.array([i,j]))
X = all_pcs[inds,:]
labels = cluster_labels[inds]
if len(labels) > 2:
SS[i,j] = silhouette_score(X, labels)
return np.nanmin(SS,0)
def calculate_drift_metrics(spike_times,
spike_clusters,
total_units,
pc_features,
pc_feature_ind,
interval_length,
min_spikes_per_interval):
max_drift = np.zeros((total_units,))
cumulative_drift = np.zeros((total_units,))
depths = get_spike_depths(spike_clusters, pc_features, pc_feature_ind)
interval_starts = np.arange(np.min(spike_times), np.max(spike_times), interval_length)
interval_ends = interval_starts + interval_length
cluster_ids = np.unique(spike_clusters)
for idx, cluster_id in enumerate(cluster_ids):
printProgressBar(cluster_id + 1, len(cluster_ids))
in_cluster = spike_clusters == cluster_id
times_for_cluster = spike_times[in_cluster]
depths_for_cluster = depths[in_cluster]
median_depths = []
for t1, t2 in zip(interval_starts, interval_ends):
in_range = (times_for_cluster > t1) * (times_for_cluster < t2)
if np.sum(in_range) >= min_spikes_per_interval:
median_depths.append(np.median(depths_for_cluster[in_range]))
else:
median_depths.append(np.nan)
median_depths = np.array(median_depths)
max_drift[cluster_id] = np.around(np.nanmax(median_depths) - np.nanmin(median_depths),2)
cumulative_drift[cluster_id] = np.around(np.nansum(np.abs(np.diff(median_depths))),2)
return max_drift, cumulative_drift
# ==========================================================
# IMPLEMENTATION OF ACTUAL METRICS:
# ==========================================================
def isi_violations(spike_train, min_time, max_time, isi_threshold, min_isi=0):
"""Calculate ISI violations for a spike train.
Based on metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
modified by <NAME> from cortex-lab/sortingQuality GitHub by <NAME>
Inputs:
-------
spike_train : array of spike times
min_time : minimum time for potential spikes
max_time : maximum time for potential spikes
isi_threshold : threshold for isi violation
min_isi : threshold for duplicate spikes
Outputs:
--------
fpRate : rate of contaminating spikes as a fraction of overall rate
A perfect unit has a fpRate = 0
A unit with some contamination has a fpRate < 0.5
A unit with lots of contamination has a fpRate > 1.0
num_violations : total number of violations
"""
duplicate_spikes = np.where(np.diff(spike_train) <= min_isi)[0]
spike_train = np.delete(spike_train, duplicate_spikes + 1)
isis = np.diff(spike_train)
num_spikes = len(spike_train)
num_violations = sum(isis < isi_threshold)
violation_time = 2*num_spikes*(isi_threshold - min_isi)
total_rate = firing_rate(spike_train, min_time, max_time)
violation_rate = num_violations/violation_time
fpRate = violation_rate/total_rate
return fpRate, num_violations
def presence_ratio(spike_train, min_time, max_time, num_bins=100):
"""Calculate fraction of time the unit is present within an epoch.
Inputs:
-------
spike_train : array of spike times
min_time : minimum time for potential spikes
max_time : maximum time for potential spikes
Outputs:
--------
presence_ratio : fraction of time bins in which this unit is spiking
"""
h, b = np.histogram(spike_train, np.linspace(min_time, max_time, num_bins))
return np.sum(h > 0) / num_bins
def firing_rate(spike_train, min_time = | |
crypto, address, confirmations=1):
payload = {
"method": "get_credits",
"params": {
"filters": [
{"field": "address", "op": "==", "value": address},
{"field": "asset", "op": "==", "value": crypto.upper()},
]
},
"jsonrpc": "2.0",
"id": 0,
}
credits = self.authed_post_url(payload)['result']
payload = {
"method": "get_debits",
"params": {
"filters": [
{"field": "address", "op": "==", "value": address},
{"field": "asset", "op": "==", "value": crypto.upper()},
]
},
"jsonrpc": "2.0",
"id": 0,
}
debits = self.authed_post_url(payload)['result']
return self._format_txs(credits, debits)
def get_transactions_multi(self, crypto, addresses):
payload = {
"method": "get_credits",
"params": {
"filters": [
{"field": "address", "op": "==", "value": address} for address in addresses
],
"filterop": "or"
},
"jsonrpc": "2.0",
"id": 0,
}
credits = self.authed_post_url(payload)['result']
payload = {
"method": "get_debits",
"params": {
"filters": [
{"field": "address", "op": "==", "value": address} for address in addresses
],
"filterop": "or"
},
"jsonrpc": "2.0",
"id": 0,
}
debits = self.authed_post_url(payload)['result']
return self._format_txs(credits, debits)
def get_single_transaction(self, crypto, txid):
from moneywagon import get_single_transaction
tx = get_single_transaction('btc', txid)
return tx
def make_unsigned_move_tx(self, crypto, amount, from_address, to_address):
payload = {
"method": "create_send",
"params": {
"source": from_address,
"destination": to_address,
"asset": crypto.upper(),
"quantity": int(amount * 1e8),
'encoding': "opreturn"
},
"jsonrpc": "2.0",
"id": 0,
}
return self.authed_post_url(payload)['result']
def get_unspent_outputs(self, crypto, address, confirmations=1):
raise Exception("CounterParty does not use unspent outputs")
def push_tx(self, crypto, tx_hex):
from moneywagon import push_tx
return push_tx('btc', tx_hex, random=True)
class CoinDaddy1(CounterParty):
service_id = 52
port = 4000
domain = "public.coindaddy.io"
username = 'rpc'
password = '<PASSWORD>'
name = "Coin Daddy #1"
class CoinDaddy2(CoinDaddy1):
service_id = 53
port = 4100
name = "Coin Daddy #2"
class CounterPartyChain(Service):
service_id = 54
api_homepage = "https://counterpartychain.io/api"
def get_balance(self, crypto, address, confirmations=1):
url = "https://counterpartychain.io/api/balances/%s" % address
response = self.get_url(url).json()
if response['error']:
return 0
for balance in response['data']:
if balance['asset'].upper() == crypto.upper():
return float(balance['amount'])
def push_tx(self, crypto, tx_hex):
from moneywagon import push_tx
return push_tx('btc', tx_hex, random=True)
class EtherChain(Service):
service_id = 55
name = "EtherChain"
api_homepage = "https://etherchain.org/documentation/api/"
def get_current_price(self, crypto, fiat):
url = "https://etherchain.org/api/basic_stats"
price = self.get_url(url).json()['data']['price']
if fiat.lower() in ['btc', 'usd']:
return price[fiat.lower()]
return self.convert_currency('usd', price['usd'], fiat)
def get_balance(self, crypto, address, confirmations=1):
url = "https://etherchain.org/api/account/%s" % address
data = self.get_url(url).json()['data']
return data[0]['balance'] / 1e18
class VTConline(Iquidus):
service_id = 57
name = "VTCOnline.org"
base_url = "https://explorer.vtconline.org"
supported_cryptos = ['vtc']
class Etherscan(Service):
service_id = 58
name = "Etherscan"
supported_cryptos = ['eth']
def get_balance(self, crypto, address, confirmations=1):
url = "https://api.etherscan.io/api?module=account&action=balance&address=%s&tag=latest" % address
response = self.get_url(url).json()
return int(response['result']) / 1e18
class GDAX(Service):
service_id = 59
name = "GDAX"
base_url = "https://api.gdax.com"
api_homepage = "https://docs.gdax.com/"
supported_cryptos = ['btc', 'ltc', 'eth']
def check_error(self, response):
if response.status_code != 200:
j = response.json()
raise ServiceError("GDAX returned %s error: %s" % (
response.status_code, j['message'])
)
super(GDAX, self).check_error(response)
def get_current_price(self, crypto, fiat):
url = "%s/products/%s-%s/ticker" % (self.base_url, crypto.upper(), fiat.upper())
response = self.get_url(url).json()
return float(response['price'])
def get_pairs(self):
url = "%s/products" % self.base_url
r = self.get_url(url).json()
return [x['id'].lower() for x in r]
class OKcoin(Service):
service_id = 60
name = "OKcoin"
exchange_base_url = "https://www.okcoin.cn"
block_base_url = "http://block.okcoin.cn"
supported_cryptos = ['btc', 'ltc']
api_homepage = "https://www.okcoin.cn/rest_getStarted.html"
def get_current_price(self, crypto, fiat):
if not fiat == 'cny':
raise SkipThisService("Only fiat=CNY supported")
url = "%s/api/v1/ticker.do?symbol=%s_%s" % (
self.exchange_base_url, crypto.lower(), fiat.lower()
)
response = self.get_url(url).json()
return float(response['ticker']['last'])
def check_error(self, response):
j = response.json()
if 'error_code' in j:
raise ServiceError("OKcoin returned error code %s" % j['error_code'])
super(OKcoin, self).check_error(response)
def get_pairs(self):
return ['btc-cny', 'ltc-cny']
def get_block(self, crypto, block_hash=None, block_number=None, latest=False):
if latest:
args = 'latest_block.do?'
if block_number or block_number == 0:
args = "block_height.do?block_height=%s&" % block_number
if block_hash:
raise SkipThisService("Block by hash not supported")
url = "%s/api/v1/%ssymbol=%s" % (
self.block_base_url, args, crypto.upper()
)
r = self.get_url(url).json()
ret = dict(
block_number=r['height'],
size=r['size'],
time=arrow.get(r['time'] / 1000).datetime,
hash=r['hash'],
txids=r['txid'],
tx_count=r['txcount'],
version=r['version'],
mining_difficulty=r['difficulty'],
total_fees=r['fee'],
sent_value=r['totalOut']
)
if r.get('relayed_by'):
ret['miner'] = r['relayed_by']
if r.get('previousblockhash'):
ret['previous_hash'] = r['previousblockhash']
if r.get('nextblockhash'):
ret['next_hash'] = r['nextblockhash']
return ret
class FreeCurrencyConverter(Service):
service_id = 61
base_url = "http://free.currencyconverterapi.com"
api_homepage = "http://www.currencyconverterapi.com/docs"
def get_fiat_exchange_rate(self, from_fiat, to_fiat):
pair = "%s_%s" % (to_fiat.upper(), from_fiat.upper())
url = "%s/api/v3/convert?q=%s&compact=y" % (
self.base_url, pair
)
response = self.get_url(url).json()
return response[pair]['val']
class BTCChina(Service):
service_id = 62
api_homepage = "https://www.btcc.com/apidocs/spot-exchange-market-data-rest-api#ticker"
name = "BTCChina"
def get_current_price(self, crypto, fiat):
if fiat == 'usd':
url = "https://spotusd-data.btcc.com/data/pro/ticker?symbol=%sUSD" % crypto.upper()
key = "Last"
else:
url = "https://data.btcchina.com/data/ticker?market=%s%s" % (
crypto.lower(), fiat.lower()
)
key = "last"
response = self.get_url(url).json()
if not response:
raise ServiceError("Pair not supported (blank response)")
return float(response['ticker'][key])
class Gemini(Service):
service_id = 63
api_homepage = "https://docs.gemini.com/rest-api/"
name = "Gemini"
def get_current_price(self, crypto, fiat):
url = "https://api.gemini.com/v1/pubticker/%s%s" % (
crypto.lower(), fiat.lower()
)
response = self.get_url(url).json()
return float(response['last'])
class CexIO(Service):
service_id = 64
api_homepage = "https://cex.io/rest-api"
name = "Cex.io"
def check_error(self, response):
super(CexIO, self).check_error(response)
j = response.json()
if 'error' in j:
raise ServiceError("CexIO returned error: %s" % j['error'])
def get_current_price(self, crypto, fiat):
url = "https://c-cex.com/t/%s-%s.json" % (
crypto.lower(), fiat.lower()
)
response = self.get_url(url).json()
return float(response['ticker']['lastprice'])
def get_pairs(self):
url = "https://c-cex.com/t/pairs.json"
r = self.get_url(url).json()
return r['pairs']
class Poloniex(Service):
service_id = 65
api_homepage = "https://poloniex.com/support/api/"
name = "Poloniex"
def get_current_price(self, crypto, fiat):
url = "https://poloniex.com/public?command=returnTicker"
response = self.get_url(url).json()
is_usd = False
if fiat.lower() == 'usd':
fiat = 'usdt'
is_usd = True
find_pair = "%s_%s" % (fiat.upper(), crypto.upper())
for pair, data in response.items():
if pair == find_pair:
return float(data['last'])
reverse_pair = "%s_%s" % (crypto.upper(), fiat.upper())
for pair, data in response.items():
if pair == reverse_pair:
return 1 / float(data['last'])
btc_pair = "BTC_%s" % crypto.upper()
if is_usd and btc_pair in response:
btc_rate = float(response['USDT_BTC']['last'])
fiat_exchange = float(response[btc_pair]['last'])
return fiat_exchange * btc_rate
raise SkipThisService("Pair %s not supported" % find_pair)
def get_pairs(self):
url = "https://poloniex.com/public?command=returnTicker"
r = self.get_url(url).json()
ret = []
for pair in r.keys():
fiat, crypto = pair.lower().split('_')
if fiat == 'usdt': fiat = 'usd'
ret.append("%s-%s" % (crypto, fiat))
return ret
class Bittrex(Service):
service_id = 66
api_homepage = "https://bittrex.com/Home/Api"
def check_error(self, response):
j = response.json()
if not j['success']:
raise ServiceError("Bittrex returned error: %s" % j['message'])
super(Bittrex, self).check_error(response)
def get_current_price(self, crypto, fiat):
if fiat.lower() == 'usd':
fiat = 'usdt'
if crypto == 'xmy':
crypto = 'myr'
url = "https://bittrex.com/api/v1.1/public/getticker?market=%s-%s" % (
fiat.upper(), crypto.upper()
)
r = self.get_url(url).json()
return r['result']['Last']
def get_pairs(self):
url = "https://bittrex.com/api/v1.1/public/getmarkets"
r = self.get_url(url).json()['result']
ret = []
for x in r:
crypto = x['MarketCurrency'].lower()
fiat = x['BaseCurrency'].lower()
if fiat == 'usdt':
fiat = 'usd'
ret.append("%s-%s" % (crypto, fiat))
return ret
class Huobi(Service):
service_id = 67
api_homepage = "https://github.com/huobiapi/API_Docs_en/wiki"
name = "Huobi"
def check_error(self, response):
if response.status_code != 200:
j = response.json()
raise ServiceError("Huobi returned error: %s" % j['error'])
super(Huobi, self).check_error(response)
def get_current_price(self, crypto, fiat):
if fiat.lower() == "cny":
fiat = 'static'
elif fiat.lower() == 'usd':
pass
else:
raise SkipThisService("CNY and USD only fiat supported")
url = "http://api.huobi.com/%smarket/detail_%s_json.js" % (
fiat.lower(), crypto.lower()
)
r = self.get_url(url).json()
return r['p_last']
class FeathercoinCom2(BitcoinAbe):
service_id = 68
supported_cryptos = ['ftc']
base_url = "http://explorer.feathercoin.com/chain/feathercoin"
name = "Feathercoin.com (Abe)"
class ChainTips(BitpayInsight):
service_id = 69
domain = "fsight.chain.tips"
supported_cryptos = ['ftc']
name = "Chain Tips (Insight))"
class Vircurex(Service):
service_id = 70
base_url = "https://api.vircurex.com/api"
api_homepage = "https://vircurex.com/welcome/api"
def check_error(self, response):
j = response.json()
if j['status'] != 0:
raise ServiceError("Vircurex returned error: %s" % j['status_text'])
super(Vircurex, self).check_error(response)
def get_current_price(self, crypto, fiat):
if crypto == 'blk':
crypto = 'bc'
url = "%s/get_last_trade.json?base=%s&alt=%s" % (
self.base_url, crypto.upper(), fiat.upper()
)
r = self.get_url(url).json()
return float(r['value'])
def get_pairs(self):
url = "%s/get_info_for_currency.json" % self.base_url
r = self.get_url(url).json()
ret = []
for fiat, data in r.items():
if fiat == 'status':
continue
for crypto, exchange_data in data.items():
pair = "%s-%s" % (crypto.lower(), fiat.lower())
ret.append(pair)
return ret
class TradeBlock(Service):
service_id = 71
def get_single_transaction(self, crypto, txid):
raise SkipThisService("No scriptPubKey in output")
url = "https://tradeblock.com/api/blockchain/tx/%s/p" % txid
tx = self.get_url(url).json()['data']
ins = [{'txid': x['prev_out']['hash'], 'amount': x['value']} for x in tx['ins']]
outs = [x for x in tx['outs']]
return dict(
txid=txid,
size=tx['size'],
time=arrow.get(tx['time_received']).datetime,
block_hash=tx.get('block_hash', None),
block_number=tx['block'],
inputs=ins,
outputs=outs,
fees=tx['fee'],
)
class MasterNodeIO(BitpayInsight):
service_id = 72
domain = "blockchain.masternode.io"
supported_cryptos = ['dash']
name = "Masternode.io (Insight)"
class DashOrgInsight(BitpayInsight):
service_id = 73
domain = "insight.dash.org"
api_tag = "insight-api-dash"
supported_cryptos = ['dash']
protocol = "http"
name = "Dash.org (Insight)"
class LocalBitcoinsChain(BitpayInsight):
service_id = 74
domain = "localbitcoinschain.com"
name = "LocalBitcoinsChain (Insight)"
class ETCchain(Service):
service_id = 75
base_url = "https://etcchain.com/api/v1/"
def get_balance(self, crypto, address, confirmations=1):
url = "%sgetAddressBalance?address=%s" % (self.base_url, | |
Shown in eq. 13
dii_avg : numpy.ndarray
Effective hard sphere diameter of the beads (i.e. groups or segments) in component (i.e. molecule) i.
epsilonii_avg : numpy.ndarray
Average bead (i.e. group or segment) potential well depth in component (i.e. molecule) i.
x0ii : numpy.ndarray
Matrix of sigmaii_avg/dii_eff
zetax : numpy.ndarray
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
Returns
-------
Bii_avg : numpy.ndarray
Bii_avg(rho*Cmol2seg,l_ii_avg) in K as defined in eq. 20.
"""
rhos = Cmol2seg * rho
ncomp = len(dii_avg)
# compute Iii_avg(l_ii_avg), eq. 23
Iii_avg = (1.0 - (x0ii ** (3.0 - l_ii_avg))) / (l_ii_avg - 3.0)
# compute Jii_avg(l_ii_avg), eq. 24
Jii_avg = (
1.0
- ((x0ii ** (4.0 - l_ii_avg)) * (l_ii_avg - 3.0))
+ ((x0ii ** (3.0 - l_ii_avg)) * (l_ii_avg - 4.0))
) / ((l_ii_avg - 3.0) * (l_ii_avg - 4.0))
tmp11 = rhos * (2.0 * np.pi)
tmp12 = (dii_avg ** 3 * constants.molecule_per_nm3 ** 2) * epsilonii_avg
tmp2 = (1.0 - (zetax / 2.0)) / ((1.0 - zetax) ** 3)
tmp3 = (9.0 * zetax * (1.0 + zetax)) / (2.0 * ((1 - zetax) ** 3))
Bii_avg = np.zeros((len(rho), ncomp))
for k in np.arange(ncomp):
Bii_avg[:, k] = tmp11 * tmp12[k] * (tmp2 * Iii_avg[k] - tmp3 * Jii_avg[k])
# Bii_avg = tmp11*tmp12*(tmp2*Iii_avg - tmp3*Jii_avg)
return Bii_avg
def calc_da1sii_drhos(rho, Cmol2seg, l_ii_avg, zetax, epsilonii_avg, dii_avg):
r"""
Return the derivative of a1s,ii_avg(rho*Cmol2seg,l_ii_avg) with represent to number density in Kelvin
Used in the calculation of the first order term of the perturbation expansion corresponding to the mean-attractive energy.
Parameters
----------
rho : numpy.ndarray
Number density of system [mol/m^3]
Cmol2seg : float
Conversion factor from from molecular number density, :math:`\rho`, to segment (i.e. group) number density, :math:`\rho_S`. Shown in eq. 13
l_ii_avg : numpy.ndarray
Average bead (i.e. group or segment) exponent in component (i.e. molecule) i.
zetax : numpy.ndarray
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
epsilonii_avg : numpy.ndarray
Average bead (i.e. group or segment) potential well depth in component (i.e. molecule) i.
dii_avg : numpy.ndarray
Effective hard sphere diameter of the beads (i.e. groups or segments) in component (i.e. molecule) i.
Returns
-------
calc_da1sii_drhos : numpy.ndarray
Matrix used in the calculation of :math:`A_1` the first order term of the perturbation expansion corresponding to the mean-attractive energy
"""
ncomp = len(dii_avg)
zetax_pow = np.zeros((len(rho), 4), dtype=rho.dtype)
zetax_pow[:, 0] = zetax
for i in range(1, 4):
zetax_pow[:, i] = zetax_pow[:, i - 1] * zetax_pow[:, 0]
# check if you have more than 1 bead types
etaii_avg = np.zeros((len(rho), ncomp), dtype=rho.dtype)
rhos_detaii_avg_drhos = np.zeros((len(rho), ncomp), dtype=rho.dtype)
for k in range(ncomp):
ciii_avg = np.dot(
ckl_coef,
np.array(
(
1.0,
1.0 / l_ii_avg[k],
1.0 / l_ii_avg[k] ** 2,
1.0 / l_ii_avg[k] ** 3,
),
dtype=ckl_coef.dtype,
),
)
etaii_avg[:, k] = np.dot(zetax_pow, ciii_avg)
rhos_detaii_avg_drhos[:, k] = np.dot(
zetax_pow, ciii_avg * np.array([1.0, 2.0, 3.0, 4.0])
)
tmp1 = (1.0 - (etaii_avg / 2.0)) / ((1.0 - etaii_avg) ** 3) + (
5.0 - 2.0 * etaii_avg
) / (2.0 * (1.0 - etaii_avg) ** 4) * rhos_detaii_avg_drhos
tmp2 = (
-2.0
* np.pi
* (
(epsilonii_avg * (dii_avg ** 3 * constants.molecule_per_nm3 ** 2))
/ (l_ii_avg - 3.0)
)
)
da1s_drhos = tmp1 * tmp2
return da1s_drhos
def calc_dBkl_drhos(l_ii_avg, dii_avg, epsilonii_avg, x0ii, zetax):
r"""
Return derivative of Bkl(rho*Cmol2seg,l_ii_avg) with respect to :math:`\rho_S`.
Used in the calculation of :math:`A_1` the first order term of the perturbation expansion corresponding to the mean-attractive energy.
Parameters
----------
l_aii_avg : numpy.ndarray
Average bead (i.e. group or segment) attractive exponent in component (i.e. molecule) i.
dii_avg : numpy.ndarray
Effective hard sphere diameter of the beads (i.e. groups or segments) in component (i.e. molecule) i.
epsilonii_avg : numpy.ndarray
Average bead (i.e. group or segment) potential well depth in component (i.e. molecule) i.
x0ii : numpy.ndarray
Matrix of sigmaii_avg/dii_eff
zetax : numpy.ndarray
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
Returns
-------
dBkl_drhos : numpy.ndarray
Matrix used in the calculation of :math:`A_1` the first order term of the perturbation expansion corresponding to the mean-attractive energy, size is rho x l_ii_avg.shape
"""
ncomp = len(dii_avg)
nrho = len(zetax)
# compute Iii_avg(l_ii_avg), eq. 23
Iii_avg = (1.0 - (x0ii ** (3.0 - l_ii_avg))) / (l_ii_avg - 3.0)
# compute Jii_avg(l_ii_avg), eq. 24
Jii_avg = (
1.0
- ((x0ii ** (4.0 - l_ii_avg)) * (l_ii_avg - 3.0))
+ ((x0ii ** (3.0 - l_ii_avg)) * (l_ii_avg - 4.0))
) / ((l_ii_avg - 3.0) * (l_ii_avg - 4.0))
tmp = 2.0 * np.pi * dii_avg ** 3 * epsilonii_avg
tmp1 = np.zeros((nrho, ncomp))
tmp2 = np.zeros((nrho, ncomp))
for k in np.arange(ncomp):
tmp1[:, k] = ((1.0 - (zetax / 2.0)) / ((1.0 - zetax) ** 3) * Iii_avg[k]) - (
((9.0 * zetax * (1.0 + zetax)) / (2.0 * ((1 - zetax) ** 3))) * Jii_avg[k]
)
tmp2[:, k] = (
(5.0 - 2.0 * zetax) * zetax / (2 * (1.0 - zetax) ** 4) * Iii_avg[k]
) - (
(
(9.0 * zetax * (zetax ** 2 + 4.0 * zetax + 1))
/ (2.0 * ((1 - zetax) ** 4))
)
* Jii_avg[k]
)
dBkl_drhos = tmp * (tmp1 + tmp2) * constants.molecule_per_nm3 ** 2
return dBkl_drhos
def calc_da1iidrhos(
rho, Cmol2seg, dii_eff, l_aii_avg, l_rii_avg, x0ii, epsilonii_avg, zetax
):
r"""
Compute derivative of the term, :math:`\bar{a}_{1,ii}` with respect to :math:`\rho_s`
Parameters
----------
rho : numpy.ndarray
Number density of system [mol/m^3]
Cmol2seg : float
Conversion factor from from molecular number density, :math:`\rho`, to segment (i.e. group) number density, :math:`\rho_S`. Shown in eq. 13
dii_eff : numpy.ndarray
Effective hard sphere diameter of the beads (i.e. groups or segments) in component (i.e. molecule) i.
l_aii_avg : numpy.ndarray
Average bead (i.e. group or segment) attractive exponent in component (i.e. molecule) i.
l_rii_avg : numpy.ndarray
Average bead (i.e. group or segment) attractive exponent in component (i.e. molecule) i.
x0ii : numpy.ndarray
Matrix of sigmaii_avg/dii_eff
epsilonii_avg : numpy.ndarray
Average bead (i.e. group or segment) potential well depth in component (i.e. molecule) i.
zetax : numpy.ndarray
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
Returns
-------
da1iidrhos : numpy.ndarray
Derivative of term with respect to segment density
"""
Cii = prefactor(l_rii_avg, l_aii_avg)
das1_drhos_a = calc_da1sii_drhos(
rho, Cmol2seg, l_aii_avg, zetax, epsilonii_avg, dii_eff
)
das1_drhos_r = calc_da1sii_drhos(
rho, Cmol2seg, l_rii_avg, zetax, epsilonii_avg, dii_eff
)
dB_drhos_a = calc_dBkl_drhos(l_aii_avg, dii_eff, epsilonii_avg, x0ii, zetax)
dB_drhos_r = calc_dBkl_drhos(l_rii_avg, dii_eff, epsilonii_avg, x0ii, zetax)
da1iidrhos = Cii * (
((x0ii ** l_aii_avg) * (das1_drhos_a + dB_drhos_a))
- ((x0ii ** l_rii_avg) * (das1_drhos_r + dB_drhos_r))
)
return da1iidrhos
def calc_da2ii_1pchi_drhos(
rho, Cmol2seg, epsilonii_avg, dii_eff, x0ii, l_rii_avg, l_aii_avg, zetax
):
r"""
Compute derivative of the term, :math:`\frac{\bar{a}_{2,ii}}{1+\bar{\chi}_{ii}}` with respect to :math:`\rho_s`.
Parameters
----------
rho : numpy.ndarray
Number density of system [mol/m^3]
Cmol2seg : float
Conversion factor from from molecular number density, :math:`\rho`, to segment (i.e. group) number density, :math:`\rho_S`. Shown in eq. 13
epsilonii_avg : numpy.ndarray
Average bead (i.e. group or segment) potential well depth in component (i.e. molecule) i.
dii_eff : numpy.ndarray
Effective hard sphere diameter of the beads (i.e. groups or segments) in component (i.e. molecule) i.
x0ii : numpy.ndarray
Matrix of sigmaii_avg/dii_eff
l_rii_avg : numpy.ndarray
Average bead (i.e. group or segment) attractive exponent in component (i.e. molecule) i.
l_aii_avg : numpy.ndarray
Average bead (i.e. group or segment) attractive exponent in component (i.e. molecule) i.
zetax : numpy.ndarray
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
Returns
-------
da2ii_1pchi_drhos : numpy.ndarray
Term used in the calculation of the second-order term from the macroscopic compressibility
"""
# Calculate terms and derivatives used in derivative chain rule
KHS = ((1.0 - zetax) ** 4) / (
1.0 + (4.0 * zetax) + (4.0 * (zetax ** 2)) - (4.0 | |
"""Tests for UI view permissions in the samplesheets app"""
from urllib.parse import urlencode
from django.conf import settings
from django.test import override_settings
from django.urls import reverse
from unittest import skipIf
# Projectroles dependency
from projectroles.app_settings import AppSettingAPI
from projectroles.tests.test_permissions import TestProjectPermissionBase
from projectroles.utils import build_secret
from samplesheets.models import ISATab
from samplesheets.tests.test_io import SampleSheetIOMixin, SHEET_DIR
from samplesheets.tests.test_views_ajax import IrodsAccessTicketMixin
app_settings = AppSettingAPI()
# Local constants
APP_NAME = 'samplesheets'
SHEET_PATH = SHEET_DIR + 'i_small.zip'
REMOTE_SITE_NAME = 'Test site'
REMOTE_SITE_URL = 'https://sodar.bihealth.org'
REMOTE_SITE_SECRET = build_secret()
INVALID_SECRET = build_secret()
IRODS_ENABLED = (
True if 'omics_irods' in settings.ENABLED_BACKEND_PLUGINS else False
)
IRODS_SKIP_MSG = 'Irodsbackend not enabled in settings'
class TestSampleSheetsPermissions(
SampleSheetIOMixin, IrodsAccessTicketMixin, TestProjectPermissionBase
):
"""Tests for samplesheets view permissions"""
def setUp(self):
super().setUp()
self.investigation = self._import_isa_from_file(
SHEET_PATH, self.project
)
self.study = self.investigation.studies.first()
self.assay = self.study.assays.first()
self.ticket = self.make_ticket(
project=self.project,
path='/some/path',
study=self.study,
assay=self.assay,
user=self.user_owner,
)
def test_project_sheets(self):
"""Test the project sheets view"""
url = reverse(
'samplesheets:project_sheets',
kwargs={'project': self.project.sodar_uuid},
)
good_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
self.guest_as.user,
]
bad_users = [self.user_no_roles, self.anonymous]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
# Test public project
self.project.set_public()
self.assert_response(url, self.user_no_roles, 200)
self.assert_response(url, self.anonymous, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_project_sheets_anon(self):
"""Test project sheets view with anonymous guest access"""
self.project.set_public()
url = reverse(
'samplesheets:project_sheets',
kwargs={'project': self.project.sodar_uuid},
)
self.assert_response(url, self.anonymous, 200)
def test_sheet_import(self):
"""Test sheet import view"""
url = reverse(
'samplesheets:import', kwargs={'project': self.project.sodar_uuid}
)
good_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
]
bad_users = [self.guest_as.user, self.user_no_roles, self.anonymous]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, bad_users, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_sheet_import_anon(self):
"""Test sheet import view with anonymous guest access"""
self.project.set_public()
url = reverse(
'samplesheets:import',
kwargs={'project': self.project.sodar_uuid},
)
self.assert_response(url, self.anonymous, 302)
def test_sheet_import_sync(self):
"""Test sheet import view with sync enabled"""
app_settings.set_app_setting(
APP_NAME, 'sheet_sync_enable', True, project=self.project
)
url = reverse(
'samplesheets:import', kwargs={'project': self.project.sodar_uuid}
)
bad_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
self.guest_as.user,
self.user_no_roles,
self.anonymous,
]
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, bad_users, 302)
def test_sheet_template_select(self):
"""Test sheet template select view"""
self.investigation.delete()
url = reverse(
'samplesheets:template_select',
kwargs={'project': self.project.sodar_uuid},
)
good_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
]
bad_users = [self.guest_as.user, self.user_no_roles, self.anonymous]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, bad_users, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_sheet_template_select_anon(self):
"""Test sheet template select view with anonymous guest access"""
self.project.set_public()
url = reverse(
'samplesheets:template_select',
kwargs={'project': self.project.sodar_uuid},
)
self.assert_response(url, self.anonymous, 302)
def test_sheet_template_select_sync(self):
"""Test sheet template select view with sync enabled"""
app_settings.set_app_setting(
APP_NAME, 'sheet_sync_enable', True, project=self.project
)
url = reverse(
'samplesheets:template_select',
kwargs={'project': self.project.sodar_uuid},
)
bad_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
self.guest_as.user,
self.user_no_roles,
self.anonymous,
]
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, bad_users, 302)
def test_sheet_template_create(self):
"""Test sheet template creation view"""
self.investigation.delete()
url = (
reverse(
'samplesheets:template_create',
kwargs={'project': self.project.sodar_uuid},
)
+ '?'
+ urlencode({'sheet_tpl': 'generic'})
)
good_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
]
bad_users = [self.guest_as.user, self.user_no_roles, self.anonymous]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, bad_users, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_sheet_template_create_anon(self):
"""Test sheet template creation view with anonymous guest access"""
self.investigation.delete()
url = (
reverse(
'samplesheets:template_create',
kwargs={'project': self.project.sodar_uuid},
)
+ '?'
+ urlencode({'sheet_tpl': 'generic'})
)
self.project.set_public()
self.assert_response(url, self.anonymous, 302)
def test_sheet_template_create_sync(self):
"""Test sheet template create view with sync enabled"""
app_settings.set_app_setting(
APP_NAME, 'sheet_sync_enable', True, project=self.project
)
url = (
reverse(
'samplesheets:template_create',
kwargs={'project': self.project.sodar_uuid},
)
+ '?'
+ urlencode({'sheet_tpl': 'generic'})
)
bad_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
self.guest_as.user,
self.user_no_roles,
self.anonymous,
]
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, bad_users, 302)
def test_sheet_export_excel_study(self):
"""Test sheet Excel export view for study table"""
url = reverse(
'samplesheets:export_excel', kwargs={'study': self.study.sodar_uuid}
)
good_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
self.guest_as.user,
]
bad_users = [self.user_no_roles, self.anonymous]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, self.user_no_roles, 200)
self.assert_response(url, self.anonymous, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_sheet_export_excel_study_anon(self):
"""Test Excel export for study table with anonymous guest access"""
url = reverse(
'samplesheets:export_excel', kwargs={'study': self.study.sodar_uuid}
)
self.project.set_public()
self.assert_response(url, self.anonymous, 200)
def test_sheet_export_excel_assay(self):
"""Test sheet Excel export view for assay table"""
url = reverse(
'samplesheets:export_excel', kwargs={'assay': self.assay.sodar_uuid}
)
good_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
self.guest_as.user,
]
bad_users = [self.user_no_roles, self.anonymous]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, self.user_no_roles, 200)
self.assert_response(url, self.anonymous, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_sheet_export_excel_assay_anon(self):
"""Test Excel export for assay table with anonymous guest access"""
url = reverse(
'samplesheets:export_excel', kwargs={'assay': self.assay.sodar_uuid}
)
self.project.set_public()
self.assert_response(url, self.anonymous, 200)
def test_sheet_export_isa(self):
"""Test sheet ISA export view"""
url = reverse(
'samplesheets:export_isa',
kwargs={'project': self.project.sodar_uuid},
)
good_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
self.guest_as.user,
]
bad_users = [self.user_no_roles, self.anonymous]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, self.user_no_roles, 200)
self.assert_response(url, self.anonymous, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_sheet_export_isa_anon(self):
"""Test sheet ISA export view with anonymous guest access"""
url = reverse(
'samplesheets:export_isa',
kwargs={'project': self.project.sodar_uuid},
)
self.project.set_public()
self.assert_response(url, self.anonymous, 200)
def test_sheet_delete(self):
"""Test sheet delete view"""
url = reverse(
'samplesheets:delete', kwargs={'project': self.project.sodar_uuid}
)
good_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
]
bad_users = [self.guest_as.user, self.user_no_roles, self.anonymous]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, bad_users, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_sheet_delete_anon(self):
"""Test sheet delete view with anonymous guest access"""
url = reverse(
'samplesheets:delete', kwargs={'project': self.project.sodar_uuid}
)
self.project.set_public()
self.assert_response(url, self.anonymous, 302)
def test_version_list(self):
"""Test sheet version list view"""
url = reverse(
'samplesheets:versions', kwargs={'project': self.project.sodar_uuid}
)
good_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
self.guest_as.user,
]
bad_users = [self.user_no_roles, self.anonymous]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, self.user_no_roles, 200)
self.assert_response(url, self.anonymous, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_version_list_anon(self):
"""Test sheet version list view with anonymous guest access"""
url = reverse(
'samplesheets:versions', kwargs={'project': self.project.sodar_uuid}
)
self.project.set_public()
self.assert_response(url, self.anonymous, 200)
def test_version_compare(self):
"""Test sheet version compare view"""
isa = ISATab.objects.first()
url = '{}?source={}&target={}'.format(
reverse(
'samplesheets:version_compare',
kwargs={'project': self.project.sodar_uuid},
),
str(isa.sodar_uuid),
str(isa.sodar_uuid),
)
good_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
self.guest_as.user,
]
bad_users = [
self.user_no_roles,
self.anonymous,
]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, self.user_no_roles, 200)
self.assert_response(url, self.anonymous, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_version_compare_anon(self):
"""Test sheet version compare view with anonymous guest access"""
isa = ISATab.objects.first()
url = '{}?source={}&target={}'.format(
reverse(
'samplesheets:version_compare',
kwargs={'project': self.project.sodar_uuid},
),
str(isa.sodar_uuid),
str(isa.sodar_uuid),
)
self.project.set_public()
self.assert_response(url, self.anonymous, 200)
def test_version_compare_file(self):
"""Test sheet version compare file view"""
isa = ISATab.objects.first()
url = '{}?source={}&target={}&filename={}&category={}'.format(
reverse(
'samplesheets:version_compare_file',
kwargs={'project': self.project.sodar_uuid},
),
str(isa.sodar_uuid),
str(isa.sodar_uuid),
's_small.txt',
'studies',
)
good_users = [
self.superuser,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
self.guest_as.user,
]
bad_users = [
self.user_no_roles,
self.anonymous,
]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, self.user_no_roles, 200)
self.assert_response(url, self.anonymous, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_version_compare_file_anon(self):
"""Test sheet version compare file view"""
isa = ISATab.objects.first()
url = '{}?source={}&target={}&filename={}&category={}'.format(
reverse(
'samplesheets:version_compare_file',
kwargs={'project': self.project.sodar_uuid},
),
str(isa.sodar_uuid),
str(isa.sodar_uuid),
's_small.txt',
'studies',
)
self.project.set_public()
self.assert_response(url, self.anonymous, 200)
def test_version_restore(self):
"""Test sheet restoring view"""
isa_version = ISATab.objects.get(
investigation_uuid=self.investigation.sodar_uuid
)
url = reverse(
'samplesheets:version_restore',
kwargs={'isatab': isa_version.sodar_uuid},
)
good_users = [self.superuser, self.owner_as.user, self.delegate_as.user]
bad_users = [
self.contributor_as.user,
self.guest_as.user,
self.user_no_roles,
self.anonymous,
]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, bad_users, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_version_restore_anon(self):
"""Test sheet restoring view"""
isa_version = ISATab.objects.get(
investigation_uuid=self.investigation.sodar_uuid
)
url = reverse(
'samplesheets:version_restore',
kwargs={'isatab': isa_version.sodar_uuid},
)
self.project.set_public()
self.assert_response(url, self.anonymous, 302)
def test_version_update(self):
"""Test sheet update view"""
isa_version = ISATab.objects.get(
investigation_uuid=self.investigation.sodar_uuid
)
url = reverse(
'samplesheets:version_update',
kwargs={'isatab': isa_version.sodar_uuid},
)
good_users = [self.superuser, self.owner_as.user, self.delegate_as.user]
bad_users = [
self.contributor_as.user,
self.guest_as.user,
self.user_no_roles,
self.anonymous,
]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, bad_users, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_version_update_anon(self):
"""Test sheet update view with anonymous guest access"""
isa_version = ISATab.objects.get(
investigation_uuid=self.investigation.sodar_uuid
)
url = reverse(
'samplesheets:version_update',
kwargs={'isatab': isa_version.sodar_uuid},
)
self.project.set_public()
self.assert_response(url, self.anonymous, 302)
def test_version_delete(self):
"""Test sheet delete view"""
isa_version = ISATab.objects.get(
investigation_uuid=self.investigation.sodar_uuid
)
url = reverse(
'samplesheets:version_delete',
kwargs={'isatab': isa_version.sodar_uuid},
)
good_users = [self.superuser, self.owner_as.user, self.delegate_as.user]
bad_users = [
self.contributor_as.user,
self.guest_as.user,
self.user_no_roles,
self.anonymous,
]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
self.project.set_public()
self.assert_response(url, bad_users, 302)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_version_delete_anon(self):
"""Test sheet delete view with anonymous guest access"""
isa_version = ISATab.objects.get(
investigation_uuid=self.investigation.sodar_uuid
)
url = reverse(
'samplesheets:version_delete',
kwargs={'isatab': isa_version.sodar_uuid},
)
self.project.set_public()
self.assert_response(url, self.anonymous, 302)
def test_version_delete_batch(self):
"""Test batch sheet delete view"""
isa_version = ISATab.objects.get(
investigation_uuid=self.investigation.sodar_uuid
)
url = reverse(
'samplesheets:version_delete_batch',
kwargs={'project': self.project.sodar_uuid},
)
data = {'confirm': '1', 'version_check': str(isa_version.sodar_uuid)}
good_users = [self.superuser, self.owner_as.user, self.delegate_as.user]
bad_users = [
self.contributor_as.user,
self.guest_as.user,
self.user_no_roles,
self.anonymous,
]
self.assert_response(url, good_users, 200, method='POST', data=data)
self.assert_response(url, bad_users, 302, method='POST', data=data)
self.project.set_public()
self.assert_response(url, bad_users, 302, method='POST', data=data)
@override_settings(PROJECTROLES_ALLOW_ANONYMOUS=True)
def test_version_delete_batch_anon(self):
"""Test batch sheet delete view with anonymous guest access"""
isa_version = ISATab.objects.get(
investigation_uuid=self.investigation.sodar_uuid
)
url = reverse(
'samplesheets:version_delete_batch',
kwargs={'project': self.project.sodar_uuid},
)
data = {'confirm': '1', 'version_check': str(isa_version.sodar_uuid)}
self.project.set_public()
self.assert_response(url, self.anonymous, 302, method='POST', data=data)
@skipIf(not IRODS_ENABLED, IRODS_SKIP_MSG)
def test_ticket_list(self):
"""Test ticket list view"""
url | |
import sys
import os
import subprocess
import random
import math
import numpy
import sfpy
from .arithmetic import evalctx
from .arithmetic.canonicalize import Canonicalizer, Condenser
from .arithmetic import native, np
from .arithmetic import softfloat, softposit
from .arithmetic import ieee754, posit
from .fpbench import fpcparser, fpcast
fpbench_root = '/home/bill/private/research/origin-FPBench'
fpbench_tools = os.path.join(fpbench_root, 'tools')
fpbench_benchmarks = os.path.join(fpbench_root, 'benchmarks')
def run_tool(toolname, core, *args):
tool = subprocess.Popen(
args=['racket', os.path.join(fpbench_tools, toolname), *args],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = tool.communicate(input=core.sexp.encode('utf-8'))
retval = tool.wait()
if retval != 0:
print('subprocess:\n {}\nreturned {:d}'.format(' '.join(tool.args), retval),
file=sys.stderr, flush=True)
if stderr_data:
print(stderr_data, file=sys.stderr, flush=True)
return stdout_data.decode('utf-8')
def filter_cores(*args, benchmark_dir = fpbench_benchmarks):
if not os.path.isdir(benchmark_dir):
raise ValueError('{}: not a directory'.format(benchmark_dir))
names = os.listdir(benchmark_dir)
benchmark_files = [name for name in names
if name.lower().endswith('.fpcore')
and os.path.isfile(os.path.join(benchmark_dir, name))]
cat = subprocess.Popen(
cwd=benchmark_dir,
args=['cat', *benchmark_files],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
cat.stdin.close()
tool = subprocess.Popen(
args=['racket', os.path.join(fpbench_tools, 'filter.rkt'), *args],
stdin=cat.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = tool.communicate()
# cleanup
for proc in [cat, tool]:
retval = proc.wait()
if retval != 0:
print('subprocess:\n {}\nreturned {:d}'.format(' '.join(proc.args), retval),
file=sys.stderr, flush=True)
cat_stderr_data = cat.stderr.read()
cat.stderr.close()
if cat_stderr_data:
print(cat_stderr_data, file=sys.stderr, flush=True)
if stderr_data:
print(stderr_data, file=sys.stderr, flush=True)
return stdout_data.decode('utf-8')
def random_float(nbits):
if nbits == 64:
return float(sfpy.Float64(random.randint(0, 0xffffffffffffffff)))
elif nbits == 32:
return float(sfpy.Float32(random.randint(0, 0xffffffff)))
elif nbits == 16:
return float(sfpy.Float16(random.randint(0, 0xffff)))
else:
raise ValueError('nbits must be 64, 32, or 16, got: {}'.format(nbits))
def random_posit(nbits):
if nbits == 32:
return float(sfpy.Posit32(random.randint(0, 0xffffffff)))
if nbits == 16:
return float(sfpy.Posit16(random.randint(0, 0xffff)))
if nbits == 8:
return float(sfpy.Posit8(random.randint(0, 0xff)))
else:
raise ValueError('nbits must be 32, 16, or 8, got: {}'.format(nbits))
def type_to_precision(cores):
for core in cores:
if 'type' in core.props and 'precision' not in core.props:
core.props['precision'] = core.props.pop('type')
def strip_precision(cores):
for core in cores:
if 'type' in core.props or 'precision' in core.props:
core.props = core.props.copy()
if 'type' in core.props:
core.props.pop('type')
if 'precision' in core.props:
core.props.pop('precision')
def test_canon(core):
ref = run_tool('canonicalizer.rkt', core)
try:
ref_core = fpcparser.compile1(ref)
except ValueError:
print('could not parse output:\n{}'.format(ref))
return True
ti_canon = Canonicalizer.translate(core)
ti_cond = Condenser.translate(core)
ti_canon2 = Canonicalizer.translate(ti_cond)
ti_cond2 = Condenser.translate(ti_canon)
failed = False
if not ti_canon == ti_canon2:
print('canonicalization failure on {}!'.format(str(core.name)))
print('canon = {}'.format(ti_canon.sexp))
print('canon(cond) = {}'.format(ti_canon2.sexp))
failed = True
if not ti_cond == ti_cond2:
print('condensation failed on {}!'.format(str(core.name)))
print('cond = {}'.format(ti_cond.sexp))
print('cond(canon) = {}'.format(ti_cond2.sexp))
failed = True
if not ti_canon.e == ref_core.e:
print('canonicalization failed vs FPBench on {}!'.format(str(core.name)))
print('canon = {}'.format(ti_canon.sexp))
print('FPBench = {}'.format(ref_core.sexp))
failed = True
return failed
def test_native_np(core):
ctx = evalctx.EvalCtx(props=core.props)
args = []
run_native = True
for name, props in core.inputs:
argctx = ctx.let(props=props)
prec = str(argctx.props.get('precision', 'binary64')).strip().lower()
if prec in evalctx.binary64_synonyms:
args.append(random_float(64))
elif prec in evalctx.binary32_synonyms:
args.append(random_float(32))
run_native = False
else:
return None
if run_native:
native_answer = native.Interpreter.interpret(core, args)
else:
native_answer = None
np_answer = np.Interpreter.interpret(core, args)
if native_answer is None:
return None
else:
isexact = (math.isnan(native_answer) and math.isnan(float(np_answer))) or native_answer == np_answer
isclose = isexact or math.isclose(native_answer, float(np_answer))
if not isclose:
print('failure on {}\n {}\n native={} vs. np={}'.format(
str(core.name), repr(args), repr(native_answer), repr(np_answer),
))
elif not isexact:
print('mismatch on {}\n {}\n native={} vs. np={}'.format(
str(core.name), repr(args), repr(native_answer), repr(np_answer)
))
return not isclose
def test_np_softfloat_ieee754(core):
npctx = evalctx.EvalCtx(props=core.props)
sfctx = evalctx.IEEECtx(props=core.props)
spctx = evalctx.IEEECtx(props=core.props)
ieeectx = evalctx.IEEECtx(props=core.props)
args = []
run_posit = True
for name, props in core.inputs:
np_argctx = npctx.let(props=props)
prec = str(np_argctx.props.get('precision', 'binary64')).strip().lower()
if prec in evalctx.binary64_synonyms:
args.append(random_float(64))
run_posit = False
elif prec in evalctx.binary32_synonyms:
args.append(random_float(32))
else:
return None
np_answer = float(np.Interpreter.interpret(core, args))
sf_answer = float(softfloat.Interpreter.interpret(core, args))
ieee_answer = float(ieee754.Interpreter.interpret(core, args))
isexact = (math.isnan(sf_answer) and math.isnan(np_answer) and math.isnan(ieee_answer)) or sf_answer == np_answer == ieee_answer
isclose = isexact or (math.isclose(sf_answer, np_answer) and math.isclose(ieee_answer, np_answer))
if not isclose:
print('failure on {}\n {}\n np={} vs. sf={} vs. ieee={}'.format(
str(core.name), repr(args), repr(np_answer), repr(sf_answer), repr(ieee_answer)
))
elif not isexact:
print('mismatch on {}\n {}\n np={} vs. sf={} vs. ieee={}'.format(
str(core.name), repr(args), repr(np_answer), repr(sf_answer), repr(ieee_answer)
))
if run_posit:
sp_answer = float(softposit.Interpreter.interpret(core, args))
sp_isclose = math.isinf(sf_answer) or math.isnan(sf_answer) or math.isclose(sf_answer, sp_answer, rel_tol=1e-01)
if not sp_isclose:
print('posit mismatch on {}\n {}\n sp={} vs. sf={}'.format(
str(core.name), repr(args), repr(sp_answer), repr(sf_answer)
))
return not isclose
fctxs = [
evalctx.IEEECtx(props={'precision': fpcast.Var('binary64')}),
evalctx.IEEECtx(props={'precision': fpcast.Var('binary32')}),
evalctx.IEEECtx(props={'precision': fpcast.Var('binary16')}),
]
def test_float(core, ctx):
args = [random_float(ctx.w + ctx.p) for name, props in core.inputs]
sf_answer = float(softfloat.Interpreter.interpret(core, args, ctx=ctx))
ieee_answer = float(ieee754.Interpreter.interpret(core, args, ctx=ctx))
isexact = (math.isnan(sf_answer) and math.isnan(ieee_answer)) or sf_answer == ieee_answer
isclose = isexact or math.isclose(sf_answer, ieee_answer)
if not isclose:
print('failure on {}\n {}\n native={} vs. np={}'.format(
str(core.name), repr(args), repr(sf_answer), repr(ieee_answer),
))
elif not isexact:
print('mismatch on {}\n {}\n native={} vs. np={}'.format(
str(core.name), repr(args), repr(sf_answer), repr(ieee_answer)
))
return not isclose
pctxs = [
evalctx.PositCtx(props={'precision': fpcast.Var('binary32')}),
evalctx.PositCtx(props={'precision': fpcast.Var('binary16')}),
evalctx.PositCtx(props={'precision': fpcast.Var('binary8')}),
]
def test_posit(core, ctx):
args = [random_posit(ctx.nbits) for name, props in core.inputs]
sp_answer = float(softposit.Interpreter.interpret(core, args, ctx=ctx))
posit_answer = float(posit.Interpreter.interpret(core, args, ctx=ctx))
isexact = (((math.isinf(sp_answer) or math.isnan(sp_answer))
and (math.isinf(posit_answer) or math.isnan(posit_answer)))
or sp_answer == posit_answer)
isclose = isexact or math.isclose(sp_answer, float(posit_answer))
if not isclose:
print('failure on {}\n {}\n native={} vs. np={}'.format(
str(core.name), repr(args), repr(sp_answer), repr(posit_answer),
))
elif not isexact:
print('mismatch on {}\n {}\n native={} vs. np={}'.format(
str(core.name), repr(args), repr(sp_answer), repr(posit_answer)
))
return not isclose
setup = """
class A(object):
foo = 0
n_flat_classes = 1000
n_child_classes = 1000
flat_classes = []
for i in range(n_flat_classes):
name = 'B_' + str(i)
flat_classes.append(type(name, (A,), dict(foo=i + 1)))
_last_child_class = A
child_classes = []
for i in range(n_child_classes):
name = 'C_' + str(i)
_last_child_class = type(name, (_last_child_class,), dict(foo=i + 1 + n_child_classes))
child_classes.append(_last_child_class)
B0 = flat_classes[0]()
B69 = flat_classes[69]()
A0 = child_classes[0]()
A69 = child_classes[69]()
A_1 = child_classes[-1]()
flat_dispatch = {cls:cls.foo for cls in flat_classes}
child_dispatch = {cls:cls.foo for cls in child_classes}
"""
import timeit
def run_test(test, cores, reps=10, ctx=None):
print('Running test {} on {:d} cores...'.format(repr(test), len(cores)))
i = 0
attempts = 0
failures = 0
for core in cores:
try:
print('cores[{:d}] {} '.format(i, str(core.name)), end='', flush=True)
any_attempts = False
any_fails = False
for rep in range(reps):
if ctx is None:
attempt = test(core)
else:
attempt = test(core, ctx)
any_attempts = any_attempts or (attempt is not None)
any_fails = any_fails or attempt
if attempt:
print('!', end='', flush=True)
else:
print('.', end='', flush=True)
print('')
if any_attempts:
attempts += 1
if any_fails:
failures += 1
except KeyboardInterrupt:
print('ABORT', flush=True)
continue
finally:
i += 1
print('\n...Done. {:d} attempts, {:d} failures.'.format(attempts, failures))
def test_posit_conversion(es, nbits):
ptype = softposit.softposit_precs[(es, nbits)]
posit_values = [float(ptype(i)) for i in range(1 << nbits)]
posit_values.sort()
nearby_cases = set()
for a in posit_values:
nearby_cases.add(float(numpy.nextafter(a, -numpy.inf)))
nearby_cases.add(float(numpy.nextafter(a, numpy.inf)))
arithmetic_means = set()
geometric_means = set()
for a, b in zip(posit_values, posit_values[1:]):
mean = (a + b) / 2
arithmetic_means.add(float(mean))
nearby_cases.add(float(numpy.nextafter(mean, -numpy.inf)))
nearby_cases.add(float(numpy.nextafter(mean, numpy.inf)))
geomean = math.sqrt(a * b)
geometric_means.add(float(geomean))
nearby_cases.add(float(numpy.nextafter(geomean, -numpy.inf)))
nearby_cases.add(float(numpy.nextafter(geomean, numpy.inf)))
cases = set().union(posit_values, arithmetic_means, geometric_means, nearby_cases)
more_cases = set()
for case in cases:
more_cases.add(case)
more_cases.add(-case)
if case == 0.0:
more_cases.add(float('inf'))
more_cases.add(float('-inf'))
else:
more_cases.add(1/case)
more_cases.add(-1/case)
sorted_cases = sorted(more_cases)
print('{:d} test cases for rounding'.format(len(sorted_cases)))
for f in sorted_cases:
softposit_answer = ptype(f)
posit_answer = posit.Posit(f, ctx=posit.posit_ctx(es, nbits))
if not float(softposit_answer) == float(posit_answer):
print('case {}: {} != {}'.format(repr(f), str(softposit_answer), str(posit_answer)))
def rounding_cases(dtype, nbits, maxcases=None):
if maxcases is None:
values = [float(dtype(i)) for i in range(1 << nbits)]
else:
imax = (1 << nbits) - 1
values = set()
for case in range(maxcases):
i = random.randint(0, imax)
if i > 0:
values.add(float(dtype(i-1)))
values.add(float(dtype(i)))
if i < imax:
values.add(float(dtype(i+1)))
values = sorted(values)
nearby_values = set()
for a in values:
nearby_values.add(float(numpy.nextafter(a, -numpy.inf)))
nearby_values.add(float(numpy.nextafter(a, numpy.inf)))
arithmetic_means = set()
geometric_means = set()
for a, b in zip(values, values[1:]):
mean = (a + b) / 2
arithmetic_means.add(float(mean))
nearby_values.add(float(numpy.nextafter(mean, -numpy.inf)))
nearby_values.add(float(numpy.nextafter(mean, numpy.inf)))
try:
geomean = math.sqrt(a * b)
geometric_means.add(float(geomean))
nearby_values.add(float(numpy.nextafter(geomean, -numpy.inf)))
nearby_values.add(float(numpy.nextafter(geomean, numpy.inf)))
except Exception:
pass
cases = set().union(values, arithmetic_means, geometric_means, nearby_values)
more_cases = set()
for case in cases:
if not math.isnan(case):
more_cases.add(case)
more_cases.add(-case)
if case == 0.0:
more_cases.add(float('inf'))
more_cases.add(float('-inf'))
else:
more_cases.add(1/case)
more_cases.add(-1/case)
return sorted(more_cases)
def test_posit_rounding(es, nbits, maxcases=None):
dtype = softposit.softposit_precs[(es, nbits)]
ctx = posit.posit_ctx(es, nbits)
cases = rounding_cases(dtype, nbits, maxcases=maxcases)
print('Testing posit rounding on {:d} cases...'.format(len(cases)), flush=True)
for f in cases:
softposit_answer = dtype(f)
posit_answer = posit.Posit(f, ctx=ctx)
if not (float(softposit_answer) == float(posit_answer)):
print(' case {}: {} != {}'.format(repr(f), str(softposit_answer), str(posit_answer)))
print('... Done.', flush=True)
def test_float_rounding(w, p, maxcases=None):
dtype = softfloat.softfloat_precs[(w, p)]
ctx = ieee754.ieee_ctx(w, p)
cases = rounding_cases(dtype, w+p, maxcases=maxcases)
print('Testing float rounding on {:d} cases...'.format(len(cases)), flush=True)
for f in cases:
softfloat_answer = dtype(f)
ieee754_answer = | |
<reponame>xyTel/flowanalyzer-kubernetes<filename>flow_collector/netflow_v9.py
# Copyright (c) 2017, Manito Networks, LLC
# All rights reserved.
### Imports ###
import time, datetime, socket, struct, sys, os, json, socket, collections, itertools, logging, logging.handlers, getopt
from struct import *
from elasticsearch import Elasticsearch, helpers
from IPy import IP
# Parsing functions
from parser_modules import mac_address, icmp_parse, ip_parse, netflowv9_parse, int_parse, ports_and_protocols, name_lookups
# Field types, defined ports, etc
from field_types import v9_fields
from netflow_options import *
### Get the command line arguments ###
try:
arguments = getopt.getopt(sys.argv[1:], "hl:", ["--help", "log="])
for option_set in arguments:
for opt, arg in option_set:
if opt in ('-l', '--log'): # Log level
arg = arg.upper() # Uppercase for matching and logging.basicConfig() format
if arg in (["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]):
log_level = arg # Use what was passed in arguments
elif opt in ('-h', '--help'): # Help file
with open("./help.txt") as help_file:
print(help_file.read())
sys.exit()
else: # No options
pass
except Exception:
sys.exit("Unsupported or badly formed options, see -h for available arguments.")
### Logging Level ###
# Per https://docs.python.org/2/howto/logging.html
try:
log_level # Check if log level was passed in from command arguments
except NameError:
log_level = "WARNING" # Use default logging level
logging.basicConfig(level=str(log_level)) # Set the logging level
logging.warning('Log level set to ' + str(log_level) + " - OK") # Show the logging level for debug
### DNS Lookups ###
#
# Reverse lookups
try:
if dns is False:
logging.warning("DNS reverse lookups disabled - DISABLED")
elif dns is True:
logging.warning("DNS reverse lookups enabled - OK")
else:
logging.warning("DNS enable option incorrectly set - DISABLING")
dns = False
except:
logging.warning("DNS enable option not set - DISABLING")
dns = False
# RFC-1918 reverse lookups
try:
if lookup_internal is False:
logging.warning("DNS local IP reverse lookups disabled - DISABLED")
elif lookup_internal is True:
logging.warning("DNS local IP reverse lookups enabled - OK")
else:
logging.warning("DNS local IP reverse lookups incorrectly set - DISABLING")
lookup_internal = False
except:
logging.warning("DNS local IP reverse lookups not set - DISABLING")
lookup_internal = False
# Check if the Netflow v9 port is specified
try:
netflow_v9_port
except NameError: # Not specified, use default
netflow_v9_port = 9995
logging.warning("Netflow v9 port not set in netflow_options.py, defaulting to " + str(netflow_v9_port) + " - OK")
# Set up socket listener
try:
netflow_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
netflow_sock.bind(('0.0.0.0', netflow_v9_port))
logging.warning("Bound to port " + str(netflow_v9_port) + " - OK")
except ValueError as socket_error:
logging.critical("Could not open or bind a socket on port " + str(netflow_v9_port) + " - FAIL")
logging.critical(str(socket_error))
sys.exit()
# Spin up ES instance
es = Elasticsearch([elasticsearch_host])
# Stage individual flow
global flow_index
flow_index = {}
flow_index["_source"] = {}
# Stage multiple flows for the bulk Elasticsearch API index operation
global flow_dic
flow_dic = []
# Cache the Netflow v9 templates in received order to decode the data flows. ORDER MATTERS FOR TEMPLATES.
global template_list
template_list = {}
# Record counter for Elasticsearch bulk API upload trigger
record_num = 0
### Netflow v9 Collector ###
if __name__ == "__main__":
icmp_parser = icmp_parse() # ICMP Types and Codes
ip_parser = ip_parse() # Unpacking and parsing IPv4 and IPv6 addresses
mac = mac_address() # Unpacking and parsing MAC addresses and OUIs
netflow_v9_parser = netflowv9_parse() # Parsing Netflow v9 structures
int_un = int_parse() # Unpacking and parsing integers
ports_protocols_parser = ports_and_protocols() # Ports and Protocols
name_lookups = name_lookups() # DNS reverse lookups
# Continually collect packets
while True:
pointer = 0 # Tracking location in the packet
flow_counter = 0 # For debug purposes only
# Listen for packets inbound
flow_packet_contents, sensor_address = netflow_sock.recvfrom(65565)
### Unpack the flow packet header ###
try:
logging.info("Unpacking header from " + str(sensor_address[0]))
packet = {} # Flow header attributes cache
(
packet["netflow_version"],
packet["total_flow_count"],
packet["sys_uptime"],
packet["unix_secs"],
packet["sequence_number"],
packet["source_id"]
) = struct.unpack('!HHLLLL', flow_packet_contents[0:20]) # Unpack header
packet["Sensor"] = str(sensor_address[0])
pointer += 20 # Move past the packet header
logging.info(str(packet))
# Something went wrong unpacking the header, bail out
except Exception as flow_header_error:
logging.warning("Failed unpacking flow header from " + str(sensor_address[0]) + " - " + str(flow_header_error))
continue
# Check Netflow version
if int(packet["netflow_version"]) != 9:
logging.warning("Received a non-Netflow v9 packet from " + str(sensor_address[0]) + " - SKIPPING PACKET")
continue # Bail out
while True: # Iterate through all flows in the packet
# Unpack flow set ID and the length
try:
(flow_set_id, flow_set_length) = struct.unpack('!HH', flow_packet_contents[pointer:pointer+4])
logging.info("Found flow ID " + str(flow_set_id) + ", length " + str(flow_set_length) + " at " + str(pointer))
except:
logging.info("Out of bytes to unpack, stopping - OK")
break
pointer += 4 # Advance past the flow ID and Length
logging.info("Finshed, position " + str(pointer))
# Flow data set
if flow_set_id > 255:
logging.info("Unpacking data set " + str(flow_set_id) + ", position " + str(pointer))
hashed_id = hash(str(sensor_address[0])+str(flow_set_id))
### Missing template, drop the flow ###
if hashed_id not in template_list:
logging.warning("Waiting on template for set " + str(flow_set_id) + " from " + str(sensor_address[0]) + ", sequence " + str(packet["sequence_number"]) + " - DROPPING")
# Advance to the end of the flow
pointer = (flow_set_length + pointer)-4
logging.info("Finished, position " + str(pointer))
continue
data_position = pointer
# Get the current UTC time for the flows
now = datetime.datetime.utcnow()
if template_list[hashed_id]["Type"] == "Flow Data":
while data_position+4 <= (flow_set_length + (pointer-4)):
# Cache the flow data, to be appended to flow_dic[]
flow_index = {
"_index": str("flow-" + now.strftime("%Y-%m-%d")),
"_type": "Flow",
"_source": {
"Sensor": sensor_address[0],
"Flow Type": "Netflow v9",
"Sequence": packet["sequence_number"],
"Source ID": packet["source_id"],
"Time": now.strftime("%Y-%m-%dT%H:%M:%S") + ".%03d" % (now.microsecond / 1000) + "Z",
}
}
flow_counter += 1
record_num += 1
logging.info("Data flow number " + str(flow_counter) + ", set ID " + str(flow_set_id) + " from " + str(sensor_address[0]))
### Iterate through the ordered template ###
for template_key, field_size in template_list[hashed_id]["Definitions"].iteritems():
# Check if the template key is defined in the Netflow v9 standard fields
#
# Skip this field if it's not defined, even though it's in the template
try:
v9_fields[template_key]
except (KeyError):
logging.info("Skipping undefined field (template_key,field_size) - " + str((template_key, field_size)))
data_position += field_size
continue # Skip this undefined field
### Integer field ###
if v9_fields[template_key]["Type"] == "Integer":
# Unpack the integer
flow_payload = int_un.integer_unpack(flow_packet_contents,data_position,field_size)
# Special integer-type fields
if template_key not in ([4, 32, 139]):
pass
else:
# IANA protocol number in case the customer wants to sort by protocol number
if template_key == 4:
flow_index["_source"]['Protocol Number'] = flow_payload
# Do the special calculations for ICMP Code and Type (% operator)
elif template_key in ([32,139]):
num_icmp = icmp_parser.icmp_num_type_code(flow_payload)
flow_index["_source"]['ICMP Type'] = num_icmp[0]
flow_index["_source"]['ICMP Code'] = num_icmp[1]
human_icmp = icmp_parser.icmp_human_type_code(flow_payload)
flow_index["_source"]['ICMP Parsed Type'] = human_icmp[0]
flow_index["_source"]['ICMP Parsed Code'] = human_icmp[1]
else:
pass
### IPv4 field ###
elif v9_fields[template_key]["Type"] == "IPv4":
flow_payload = ip_parser.parse_ipv4(flow_packet_contents,data_position,field_size)
flow_index["_source"]["IP Protocol Version"] = 4
### IPv6 field ###
elif v9_fields[template_key]["Type"] == "IPv6":
flow_payload = ip_parser.parse_ipv6(flow_packet_contents,data_position,field_size)
flow_index["_source"]["IP Protocol Version"] = 6
### MAC Address field ###
elif v9_fields[template_key]["Type"] == "MAC":
# Parse MAC
parsed_mac = mac.mac_packed_parse(flow_packet_contents,data_position,field_size)
flow_payload = parsed_mac[0] # Parsed MAC address
### MAC Address OUIs ###
#
# Incoming Source MAC
if template_key == 56:
flow_index["_source"]['Incoming Source MAC OUI'] = parsed_mac[1]
# Outgoing Destination MAC
elif template_key == 57:
flow_index["_source"]['Outgoing Destination MAC OUI'] = parsed_mac[1]
# Incoming Destination MAC
elif template_key == 80:
flow_index["_source"]['Incoming Destination MAC OUI'] = parsed_mac[1]
# Outgoing Source MAC
elif template_key == 81:
flow_index["_source"]['Outgoing Source MAC OUI'] = parsed_mac[1]
# Station MAC Address
elif template_key == 365:
flow_index["_source"]['Station MAC Address OUI'] = parsed_mac[1]
# WTP MAC Address
elif template_key == 367:
flow_index["_source"]['WTP MAC Address OUI'] = parsed_mac[1]
# Dot1q Customer Source MAC Address
elif template_key == 414:
flow_index["_source"]['Dot1q Customer Source MAC Address OUI'] = parsed_mac[1]
# Dot1q Customer Destination MAC Address
elif template_key == 415:
flow_index["_source"]['Dot1q Customer Destination MAC Address OUI'] = parsed_mac[1]
### Something Else ###
else:
logging.warning("Unsupported field number " + str(template_key) + ", size " + str(field_size) + " from " + str(sensor_address[0]) + " in sequence " | |
dihedrals:
for pb in p.bonded_to:
if pb is not p1 and p is not p2:
self.add_dihedral(pb, p, p1, p2, f)
for p in p2.bonded_to:
if angles:
if p is not p1:
self.add_angle(p1, p2, p, f)
if dihedrals:
for pb in p.bonded_to:
if pb is not p2 and p is not p1:
self.add_dihedral(p1, p2, p, pb, f)
if dihedrals:
for pb1 in p1.bonded_to:
for pb2 in p2.bonded_to:
if pb1 is not p2 and pb2 is not p1:
self.add_dihedral(pb1, p1, p2, pb2, f)
if impropers:
if self.ff_class == '2':
for perm in permutations(p1.bonded_to, 3):
unique = True
for i in self.impropers:
if i.a is not p1:
continue
if set([i.b, i.c, i.d]) == set([perm[0], perm[1],
perm[2]]):
unique = False
break
if unique:
self.add_improper(p1, perm[0], perm[1], perm[2], f)
for perm in permutations(p2.bonded_to, 3):
unique = True
for i in self.impropers:
if i.a is not p2:
continue
if set([i.b, i.c, i.d]) == set([perm[0], perm[1],
perm[2]]):
unique = False
break
if unique:
self.add_improper(p2, perm[0], perm[1], perm[2], f)
def add_bond(self, a=None, b=None, f=None):
"""pysimm.system.System.add_bond
Add :class:`~pysimm.system.Bond` to system between two particles
Args:
a: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Bond`
b: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Bond`
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
Returns:
None
"""
if a is b:
return
a_name = a.type.eq_bond or a.type.name
b_name = b.type.eq_bond or b.type.name
btype = self.bond_types.get('%s,%s' % (a_name, b_name))
if not btype and f:
btype = f.bond_types.get('%s,%s' % (a_name, b_name))
if btype:
bt = btype[0].copy()
self.bond_types.add(bt)
btype = self.bond_types.get('%s,%s' % (a_name, b_name))
if btype:
new_b = Bond(type=btype[0], a=a, b=b)
self.bonds.add(new_b)
if a.bonded_to is None or b.bonded_to is None:
self.add_particle_bonding()
if a.bonded_to and b not in a.bonded_to:
a.bonded_to.add(b)
if b.bonded_to and a not in b.bonded_to:
b.bonded_to.add(a)
else:
error_print('error: system does not contain bond type named %s,%s '
'or could not find type in forcefield supplied'
% (a_name, b_name))
return
def add_angle(self, a=None, b=None, c=None, f=None):
"""pysimm.system.System.add_angle
Add :class:`~pysimm.system.Angle` to system between three particles
Args:
a: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Angle`
b: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Angle` (middle particle)
c: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Angle`
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
Returns:
None
"""
if a is c:
return
a_name = a.type.eq_angle or a.type.name
b_name = b.type.eq_angle or b.type.name
c_name = c.type.eq_angle or c.type.name
atype = self.angle_types.get(
'%s,%s,%s' % (a_name, b_name, c_name),
item_wildcard=None
)
if not atype and f:
atype = self.angle_types.get(
'%s,%s,%s' % (a_name, b_name, c_name)
)
atype.extend(
f.angle_types.get(
'%s,%s,%s' % (a_name, b_name, c_name)
)
)
atype = sorted(atype, key=lambda x: x.name.count('X'))
if atype:
if not self.angle_types.get(atype[0].name, item_wildcard=None):
atype = self.angle_types.add(atype[0].copy())
else:
atype = self.angle_types.get(atype[0].name, item_wildcard=None)[0]
elif atype:
atype = atype[0]
if atype:
self.angles.add(Angle(type=atype, a=a, b=b, c=c))
else:
error_print('error: system does not contain angle type named '
'%s,%s,%s or could not find type in forcefield supplied'
% (a_name, b_name, c_name))
return
def add_dihedral(self, a=None, b=None, c=None, d=None, f=None):
"""pysimm.system.System.add_dihedral
Add :class:`~pysimm.system.Dihedral` to system between four particles
Args:
a: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Dihedral`
b: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Dihedral` (middle particle)
c: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Dihedral` (middle particle)
d: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Dihedral`
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
Returns:
None
"""
if a is c or b is d:
return
a_name = a.type.eq_dihedral or a.type.name
b_name = b.type.eq_dihedral or b.type.name
c_name = c.type.eq_dihedral or c.type.name
d_name = d.type.eq_dihedral or d.type.name
dtype = self.dihedral_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name),
item_wildcard=None
)
if not dtype and f:
dtype = self.dihedral_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name)
)
dtype.extend(
f.dihedral_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name)
)
)
dtype = sorted(dtype, key=lambda x: x.name.count('X'))
if dtype:
if not self.dihedral_types.get(dtype[0].name, item_wildcard=None):
dtype = self.dihedral_types.add(dtype[0].copy())
else:
dtype = self.dihedral_types.get(dtype[0].name, item_wildcard=None)[0]
elif dtype:
dtype = dtype[0]
if dtype:
self.dihedrals.add(Dihedral(type=dtype, a=a, b=b, c=c, d=d))
else:
error_print('error: system does not contain dihedral type named '
'%s,%s,%s,%s or could not find type in forcefield '
'supplied' % (a_name, b_name,
c_name, d_name))
error_print('tags: %s %s %s %s' % (a.tag, b.tag, c.tag, d.tag))
return
def add_improper(self, a=None, b=None, c=None, d=None, f=None):
"""pysimm.system.System.add_improper
Add :class:`~pysimm.system.Improper` to system between four particles
Args:
a: :class:`~pysimm.system.pysimm.system.Particle` involved in new :class:`~pysimm.system.Improper` (middle particle)
b: :class:`~pysimm.system.pysimm.system.Particle` involved in new :class:`~pysimm.system.Improper`
c: :class:`~pysimm.system.pysimm.system.Particle` involved in new :class:`~pysimm.system.Improper`
d: :class:`~pysimm.system.pysimm.system.Particle` involved in new :class:`~pysimm.system.Improper`
f: :class:`~pysimm.system.pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
Returns:
None
"""
if a is b or a is c or a is d:
return
a_name = a.type.eq_improper or a.type.name
b_name = b.type.eq_improper or b.type.name
c_name = c.type.eq_improper or c.type.name
d_name = d.type.eq_improper or d.type.name
itype = self.improper_types.get('%s,%s,%s,%s'
% (a_name, b_name,
c_name, d_name),
improper_type=True,
item_wildcard=None)
if not itype and f:
itype = self.improper_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name),
improper_type=True
)
itype.extend(
f.improper_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name),
improper_type=True
)
)
itype = sorted(itype, key=lambda x: x.name.count('X'))
if itype:
if not self.improper_types.get(itype[0].name, item_wildcard=None, improper_type=True):
itype = self.improper_types.add(itype[0].copy())
else:
itype = self.improper_types.get(itype[0].name, item_wildcard=None, improper_type=True)[0]
elif itype:
itype = itype[0]
if itype:
self.impropers.add(Improper(type=itype, a=a, b=b, c=c, d=d))
else:
return
def check_forcefield(self):
"""pysimm.system.System.check_forcefield
Iterates through particles and prints the following:
tag
type name
type element
type description
bonded elements
Args:
None
Returns:
None
"""
if not self.objectified:
self.objectify()
for p in self.particles:
p.bond_elements = [x.a.type.elem if p is x.b else
x.b.type.elem for x in p.bonds]
p.nbonds = len(p.bond_elements)
print(p.tag, p.type.name, p.type.elem, p.type.desc, p.bond_elements)
def apply_forcefield(self, f, charges='default', set_box=True, box_padding=10,
update_ptypes=False, skip_ptypes=False):
"""pysimm.system.System.apply_forcefield
Applies force field data to :class:`~pysimm.system.System` based on typing rules defined in :class:`~pysimm.forcefield.Forcefield` object f
Args:
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
charges: type of charges to be applied default='default'
set_box: Update simulation box information based on particle positions default=True
box_padding: Add padding to simulation box if updating dimensions default=10 (Angstroms)
update_ptypes: If True, update particle types based on current :class:`~pysimm.system.ParticleType` names default=False
skip_ptypes: if True, do not change particle types
Returns:
None
"""
self.ff_class = f.ff_class
self.forcefield = f.name
if update_ptypes:
self.update_particle_types_from_forcefield(f)
skip_ptypes = True
if not skip_ptypes:
f.assign_ptypes(self)
if self.bonds.count > 0:
f.assign_btypes(self)
f.assign_atypes(self)
f.assign_dtypes(self)
f.assign_itypes(self)
if charges:
f.assign_charges(self, charges=charges)
if set_box:
self.set_box(box_padding, center=False)
def apply_charges(self, f, charges='default'):
"""pysimm.system.System.apply_charges
Applies charges derived using method provided by user. Defaults to 'default'. Calls :func:`~pysimm.forcefield.Forcefield.assign_charges` method of forcefield object provided.
Args:
f: :class:`~pysimm.forcefield.Forcefield` object
charges: type of charges to be applied default='default'
Returns:
None
"""
f.assign_charges(self, charges=charges)
def write_lammps_mol(self, out_data):
"""pysimm.system.System.write_lammps_mol
Write :class:`~pysimm.system.System` data formatted as LAMMPS molecule template
Args:
out_data: where to write data, file name or 'string'
Returns:
None or string if data file if out_data='string'
"""
if out_data == 'string':
out_file = StringIO()
else:
out_file = open(out_data, 'w+')
self.set_mass()
self.set_cog()
out_file.write('%s\n\n' % self.name)
out_file.write('%s atoms\n' % self.particles.count)
out_file.write('%s bonds\n' % self.bonds.count)
out_file.write('%s angles\n' % self.angles.count)
out_file.write('%s dihedrals\n' % self.dihedrals.count)
out_file.write('%s impropers\n' % self.impropers.count)
if self.particles.count > 0:
out_file.write('Coords\n\n')
for p in self.particles:
out_file.write('{} {} {} {}\n'.format(p.tag, p.x, p.y, p.z))
out_file.write('\n')
if self.particles.count > 0:
out_file.write('Types\n\n')
for p in self.particles:
out_file.write('{} {}\n'.format(p.tag, p.type.tag))
out_file.write('\n')
if self.particles.count > 0:
out_file.write('Charges\n\n')
for p in self.particles:
out_file.write('{} {}\n'.format(p.tag, p.charge))
out_file.write('\n')
if self.bonds.count > 0:
out_file.write('Bonds\n\n')
for b in self.bonds:
out_file.write('{} {} {} {}\n'.format(b.tag, b.type.tag, b.a.tag, b.b.tag))
out_file.write('\n')
if self.angles.count > 0:
out_file.write('Angles\n\n')
for a in self.angles:
out_file.write('{} {} {} {} {}\n'.format(a.tag, a.type.tag, a.a.tag, a.b.tag, a.c.tag))
out_file.write('\n')
if self.dihedrals.count > 0:
out_file.write('Dihedrals\n\n')
for d in self.dihedrals:
out_file.write('{} {} {} {} {} {}\n'.format(d.tag, d.type.tag, d.a.tag, d.b.tag, d.c.tag, d.d.tag))
out_file.write('\n')
if self.impropers.count > 0:
out_file.write('Impropers\n\n')
for i in self.impropers:
out_file.write('{} {} {} {} {} {}\n'.format(i.tag, i.type.tag, i.a.tag, i.b.tag, i.c.tag, i.d.tag))
if out_data == 'string':
s = out_file.getvalue()
out_file.close()
return s
else:
out_file.close()
def write_lammps(self, out_data, **kwargs):
"""pysimm.system.System.write_lammps
Write :class:`~pysimm.system.System` data formatted for LAMMPS
Args:
out_data: where to write data, file name or 'string'
Returns:
| |
== 'tty':
print(data)
return
if self.graphics == "zenity":
command = ['zenity', '--info', '--width=500', '--text=' + data]
elif self.graphics == "kdialog":
command = ['kdialog', '--msgbox', data]
else:
sys.exit(1)
subprocess.call(command, stderr=STDERR_REDIR)
def confirm_exit(self):
"""
Confirm exit from installer
"""
ret = self.ask(Messages.quit)
if ret == 0:
sys.exit(1)
def alert(self, text):
"""Generate alert message"""
if self.silent:
return
if self.graphics == 'tty':
print(text)
return
if self.graphics == 'zenity':
command = ['zenity', '--warning', '--text=' + text]
elif self.graphics == "kdialog":
command = ['kdialog', '--sorry', text]
else:
sys.exit(1)
subprocess.call(command, stderr=STDERR_REDIR)
def prompt_nonempty_string(self, show, prompt, val=''):
"""
Prompt user for input
"""
if self.graphics == 'tty':
if show == 0:
while True:
inp = str(getpass.getpass(prompt + ": "))
output = inp.strip()
if output != '':
return output
while True:
inp = str(get_input(prompt + ": "))
output = inp.strip()
if output != '':
return output
if self.graphics == 'zenity':
if val == '':
default_val = ''
else:
default_val = '--entry-text=' + val
if show == 0:
hide_text = '--hide-text'
else:
hide_text = ''
command = ['zenity', '--entry', hide_text, default_val,
'--width=500', '--text=' + prompt]
elif self.graphics == 'kdialog':
if show == 0:
hide_text = '--password'
else:
hide_text = '--inputbox'
command = ['kdialog', hide_text, prompt]
output = ''
while not output:
shell_command = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = shell_command.communicate()
output = out.decode('utf-8').strip()
if shell_command.returncode == 1:
self.confirm_exit()
return output
def get_user_cred(self):
"""
Get user credentials both username/password and personal certificate
based
"""
if Config.eap_outer == 'PEAP' or Config.eap_outer == 'TTLS':
self.__get_username_password()
if Config.eap_outer == 'TLS':
self.__get_p12_cred()
def __get_username_password(self):
"""
read user password and set the password property
do nothing if silent mode is set
"""
password = "a"
password1 = "b"
if self.silent:
return
if self.username:
user_prompt = self.username
elif Config.hint_user_input:
user_prompt = '@' + Config.user_realm
else:
user_prompt = ''
while True:
self.username = self.prompt_nonempty_string(
1, Messages.username_prompt, user_prompt)
if self.__validate_user_name():
break
while password != password1:
password = self.prompt_nonempty_string(
0, Messages.enter_password)
password1 = self.prompt_nonempty_string(
0, Messages.repeat_password)
if password != password1:
self.alert(Messages.passwords_differ)
self.password = password
def __get_graphics_support(self):
if os.environ.get('DISPLAY') is not None:
shell_command = subprocess.Popen(['which', 'zenity'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
shell_command.wait()
if shell_command.returncode == 0:
self.graphics = 'zenity'
else:
shell_command = subprocess.Popen(['which', 'kdialog'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
shell_command.wait()
# out, err = shell_command.communicate()
if shell_command.returncode == 0:
self.graphics = 'kdialog'
else:
self.graphics = 'tty'
else:
self.graphics = 'tty'
def __process_p12(self):
debug('process_p12')
pfx_file = os.environ['HOME'] + '/.cat_installer/user.p12'
if CRYPTO_AVAILABLE:
debug("using crypto")
try:
p12 = crypto.load_pkcs12(open(pfx_file, 'rb').read(),
self.password)
except:
debug("incorrect password")
return False
else:
if Config.use_other_tls_id:
return True
try:
self.username = p12.get_certificate().\
get_subject().commonName
except:
self.username = p12.get_certificate().\
get_subject().emailAddress
return True
else:
debug("using openssl")
command = ['openssl', 'pkcs12', '-in', pfx_file, '-passin',
'pass:' + self.password, '-nokeys', '-clcerts']
shell_command = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = shell_command.communicate()
if shell_command.returncode != 0:
return False
if Config.use_other_tls_id:
return True
out_str = out.decode('utf-8').strip()
subject = re.split(r'\s*[/,]\s*',
re.findall(r'subject=/?(.*)$',
out_str, re.MULTILINE)[0])
cert_prop = {}
for field in subject:
if field:
cert_field = re.split(r'\s*=\s*', field)
cert_prop[cert_field[0].lower()] = cert_field[1]
if cert_prop['cn'] and re.search(r'@', cert_prop['cn']):
debug('Using cn: ' + cert_prop['cn'])
self.username = cert_prop['cn']
elif cert_prop['emailaddress'] and \
re.search(r'@', cert_prop['emailaddress']):
debug('Using email: ' + cert_prop['emailaddress'])
self.username = cert_prop['emailaddress']
else:
self.username = ''
self.alert("Unable to extract username "
"from the certificate")
return True
def __select_p12_file(self):
"""
prompt user for the PFX file selection
this method is not being called in the silent mode
therefore there is no code for this case
"""
if self.graphics == 'tty':
my_dir = os.listdir(".")
p_count = 0
pfx_file = ''
for my_file in my_dir:
if my_file.endswith('.p12') or my_file.endswith('*.pfx') or \
my_file.endswith('.P12') or my_file.endswith('*.PFX'):
p_count += 1
pfx_file = my_file
prompt = "personal certificate file (p12 or pfx)"
default = ''
if p_count == 1:
default = '[' + pfx_file + ']'
while True:
inp = get_input(prompt + default + ": ")
output = inp.strip()
if default != '' and output == '':
return pfx_file
default = ''
if os.path.isfile(output):
return output
print("file not found")
if self.graphics == 'zenity':
command = ['zenity', '--file-selection',
'--file-filter=' + Messages.p12_filter +
' | *.p12 *.P12 *.pfx *.PFX', '--file-filter=' +
Messages.all_filter + ' | *',
'--title=' + Messages.p12_title]
shell_command = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cert, err = shell_command.communicate()
if self.graphics == 'kdialog':
command = ['kdialog', '--getopenfilename',
'.', '*.p12 *.P12 *.pfx *.PFX | ' +
Messages.p12_filter, '--title', Messages.p12_title]
shell_command = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=STDERR_REDIR)
cert, err = shell_command.communicate()
return cert.decode('utf-8').strip()
def __save_sb_pfx(self):
"""write the user PFX file"""
certfile = os.environ.get('HOME') + '/.cat_installer/user.p12'
with open(certfile, 'wb') as cert:
cert.write(base64.b64decode(Config.sb_user_file))
def __get_p12_cred(self):
"""get the password for the PFX file"""
if Config.eap_inner == 'SILVERBULLET':
self.__save_sb_pfx()
else:
if self.silent:
pfx_file = self.pfx_file
else:
pfx_file = self.__select_p12_file()
try:
copyfile(pfx_file, os.environ['HOME'] +
'/.cat_installer/user.p12')
except (OSError, RuntimeError):
print(Messages.user_cert_missing)
sys.exit(1)
if self.silent:
username = self.username
if not self.__process_p12():
sys.exit(1)
if username:
self.username = username
else:
while not self.password:
self.password = self.prompt_nonempty_string(
0, Messages.enter_import_password)
if not self.__process_p12():
self.alert(Messages.incorrect_password)
self.password = ''
if not self.username:
self.username = self.prompt_nonempty_string(
1, Messages.username_prompt)
def __validate_user_name(self):
# locate the @ character in username
pos = self.username.find('@')
debug("@ position: " + str(pos))
# trailing @
if pos == len(self.username) - 1:
debug("username ending with @")
self.alert(Messages.wrongUsernameFormat)
return False
# no @ at all
if pos == -1:
if Config.verify_user_realm_input:
debug("missing realm")
self.alert(Messages.wrongUsernameFormat)
return False
debug("No realm, but possibly correct")
return True
# @ at the beginning
if pos == 0:
debug("missing user part")
self.alert(Messages.wrongUsernameFormat)
return False
pos += 1
if Config.verify_user_realm_input:
if Config.hint_user_input:
if self.username.endswith('@' + Config.user_realm, pos-1):
debug("realm equal to the expected value")
return True
debug("incorrect realm; expected:" + Config.user_realm)
self.alert(Messages.wrong_realm.format(Config.user_realm))
return False
if self.username.endswith(Config.user_realm, pos):
debug("realm ends with expected suffix")
return True
debug("realm suffix error; expected: " + Config.user_realm)
self.alert(Messages.wrong_realm_suffix.format(
Config.user_realm))
return False
pos1 = self.username.find('@', pos)
if pos1 > -1:
debug("second @ character found")
self.alert(Messages.wrongUsernameFormat)
return False
pos1 = self.username.find('.', pos)
if pos1 == pos:
debug("a dot immediately after the @ character")
self.alert(Messages.wrongUsernameFormat)
return False
debug("all passed")
return True
class WpaConf(object):
"""
Prepare and save wpa_supplicant config file
"""
def __prepare_network_block(self, ssid, user_data):
out = """network={
ssid=\"""" + ssid + """\"
key_mgmt=WPA-EAP
pairwise=CCMP
group=CCMP TKIP
eap=""" + Config.eap_outer + """
ca_cert=\"""" + os.environ.get('HOME') + """/.cat_installer/ca.pem\"
identity=\"""" + user_data.username + """\"
altsubject_match=\"""" + ";".join(Config.servers) + """\"
"""
if Config.eap_outer == 'PEAP' or Config.eap_outer == 'TTLS':
out += "phase2=\"auth=" + Config.eap_inner + "\"\n" \
" password=\"" + user_data.password + "\"\n"
if Config.anonymous_identity != '':
out += " anonymous_identity=\"" + Config.anonymous_identity + "\""
if Config.eap_outer == 'TLS':
out += " private_key_passwd=\"" + <PASSWORD> + "\"\n" \
"private_key=\"" + os.environ.get('HOME') + "/.cat_installer/user.p12\"\n"
out += "\n}"
return out
def create_wpa_conf(self, ssids, user_data):
"""Create and save the wpa_supplicant config file"""
wpa_conf = os.environ.get('HOME') + \
'/.cat_installer/cat_installer.conf'
with open(wpa_conf, 'w') as conf:
for ssid in ssids:
net = self.__prepare_network_block(ssid, user_data)
conf.write(net)
class CatNMConfigTool(object):
"""
Prepare and save NetworkManager configuration
"""
def __init__(self):
self.cacert_file = None
self.settings_service_name = None
self.connection_interface_name = None
self.system_service_name = None
self.nm_version = None
self.pfx_file = None
self.settings = None
self.user_data = None
self.bus = None
def connect_to_nm(self):
"""
connect to DBus
"""
try:
self.bus = dbus.SystemBus()
except AttributeError:
# since dbus existed but is empty we have an empty package
# this gets shipped by pyqt5
print("DBus not properly installed")
return None
except dbus.exceptions.DBusException:
print("Can't connect to DBus")
return None
# main service name
self.system_service_name = "org.freedesktop.NetworkManager"
# check NM version
self.__check_nm_version()
debug("NM version: " + self.nm_version)
if self.nm_version == "0.9" or self.nm_version == "1.0":
self.settings_service_name = self.system_service_name
self.connection_interface_name = \
"org.freedesktop.NetworkManager.Settings.Connection"
# settings proxy
sysproxy = self.bus.get_object(
self.settings_service_name,
"/org/freedesktop/NetworkManager/Settings")
# settings interface
self.settings = dbus.Interface(sysproxy, "org.freedesktop."
"NetworkManager.Settings")
elif self.nm_version == "0.8":
self.settings_service_name = "org.freedesktop.NetworkManager"
self.connection_interface_name = "org.freedesktop.NetworkMana" \
"gerSettings.Connection"
# settings proxy
sysproxy = self.bus.get_object(
self.settings_service_name,
"/org/freedesktop/NetworkManagerSettings")
# settings intrface
self.settings = dbus.Interface(
sysproxy, "org.freedesktop.NetworkManagerSettings")
else:
print(Messages.nm_not_supported)
return None
debug("NM connection worked")
return True
def __check_opts(self):
"""
set certificate files paths and test for existence of the CA cert
"""
self.cacert_file = os.environ['HOME'] + '/.cat_installer/ca.pem'
self.pfx_file = os.environ['HOME'] + '/.cat_installer/user.p12'
if not os.path.isfile(self.cacert_file):
print(Messages.cert_error)
sys.exit(2)
def __check_nm_version(self):
"""
Get the | |
"""Defines VehiclesManager class
----------------------------------------------------------------------------------------------------------
This file is part of Sim-ATAV project and licensed under MIT license.
Copyright (c) 2018 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
For questions please contact:
<NAME> (etuncali [at] asu.edu)
----------------------------------------------------------------------------------------------------------
"""
import math
import time
import numpy as np
from Sim_ATAV.simulation_control.staliro_signal import STaliroSignal
from Sim_ATAV.simulation_control.item_description import ItemDescription
from Sim_ATAV.common.coordinate_system import CoordinateSystem
class VehiclesManager(object):
"""VehiclesManager keeps track of the vehicles in the simulation environment."""
VHC_DUMMY = 0
VHC_VUT = 1
POSITION_REPORTING = 0
ROTATION_REPORTING = 1
CORNERS_REPORTING = 2
def __init__(self, supervisor_controller, controller_comm_interface):
self.emitter_name = "emitter"
self.receiver_name = 'receiver'
self.debug_mode = 0
self.vehicles = []
self.vehicle_dictionary = {}
self.VUT_dictionary = {}
self.dummy_vhc_dictionary = {}
self.total_vhc_count = 0
self.has_sent_controller_config = False
self.supervisor_control = supervisor_controller
self.time_step = 0.01
self.current_sim_time = 0.0
self.step_counter = 0
self.num_of_steps_for_jerk_calc = 10
self.acc_arr = [0.0]*(self.num_of_steps_for_jerk_calc + 3)
self.cur_jerk_compute_index = 0
self.controller_comm_interface = controller_comm_interface
# Following are set to NOT SET instead of None in order to avoid getting device at every cycle
self.supervisor_emitter = 'NOT SET'
self.supervisor_receiver = 'NOT SET'
self.reporting_dict = {}
self.collect_detection_perf_from_vehicles = []
self.detection_perf_dict = {}
self.vehicles_to_collect_control = []
self.vehicle_control_dict = {}
self.stop_before_collision_list = []
self.pedestrians_manager = None
self.detection_eval_dict = {}
self.visibility_eval_dict = {}
self.vehicle_id_dictionary = {}
def set_pedestrians_manager(self, pedestrians_manager):
"""Sets a reference to the pedestrians manager."""
self.pedestrians_manager = pedestrians_manager
def record_vehicle(self, vehicle_object, vehicle_type):
"""Add the vehicle in the records. vehicle_type can be VUT / Dummy"""
self.vehicles.append(vehicle_object)
self.vehicles[self.total_vhc_count].node = \
self.supervisor_control.get_obj_node(self.vehicles[self.total_vhc_count])
self.vehicles[self.total_vhc_count].translation = \
self.supervisor_control.get_obj_field(self.vehicles[self.total_vhc_count], "translation")
self.vehicles[self.total_vhc_count].rotation = \
self.supervisor_control.get_obj_field(self.vehicles[self.total_vhc_count], "rotation")
self.vehicles[self.total_vhc_count].name = \
self.supervisor_control.get_obj_field(self.vehicles[self.total_vhc_count], "name")
self.vehicles[self.total_vhc_count].front_right_wheel_angular_velocity = \
self.supervisor_control.get_obj_field(self.vehicles[self.total_vhc_count],
"front_right_wheel_angular_velocity")
self.vehicles[self.total_vhc_count].front_left_wheel_angular_velocity = \
self.supervisor_control.get_obj_field(self.vehicles[self.total_vhc_count],
"front_left_wheel_angular_velocity")
self.vehicles[self.total_vhc_count].rear_right_wheel_angular_velocity = \
self.supervisor_control.get_obj_field(self.vehicles[self.total_vhc_count],
"rear_right_wheel_angular_velocity")
self.vehicles[self.total_vhc_count].rear_left_wheel_angular_velocity = \
self.supervisor_control.get_obj_field(self.vehicles[self.total_vhc_count],
"rear_left_wheel_angular_velocity")
self.vehicles[self.total_vhc_count].current_position = \
self.supervisor_control.get_obj_position_3D(self.vehicles[self.total_vhc_count])
self.vehicles[self.total_vhc_count].current_orientation = \
self.supervisor_control.get_obj_orientation(self.vehicles[self.total_vhc_count], 'y')
# We use current vehicle index as its id as well:
self.vehicle_dictionary[self.vehicles[self.total_vhc_count].def_name] = self.total_vhc_count
if vehicle_type == self.VHC_VUT:
self.VUT_dictionary[self.vehicles[self.total_vhc_count].def_name] = self.total_vhc_count
if vehicle_type == self.VHC_DUMMY:
self.dummy_vhc_dictionary[self.vehicles[self.total_vhc_count].def_name] = self.total_vhc_count
self.vehicle_id_dictionary[vehicle_object.vhc_id] = self.total_vhc_count;
self.total_vhc_count += 1
def change_vehicle_pose(self, vehicle_object):
"""Change the vehicle pose. (Generally, due to an external command that changes vhc positions at each step.)"""
if vehicle_object.vhc_id in self.vehicle_id_dictionary:
vhc_ind = self.vehicle_id_dictionary[vehicle_object.vhc_id]
vhc = self.vehicles[vhc_ind]
pos = vehicle_object.current_position
rot = vehicle_object.rotation
self.supervisor_control.set_obj_position_3D(vhc, pos)
self.supervisor_control.set_obj_rotation(vhc, rot)
def update_vehicle_states(self, vhc):
"""Update the current states of the vehicle."""
vhc.previous_velocity = vhc.current_velocity[:] if vhc.current_velocity is not None else None
vhc.previous_acceleration_3d = vhc.acceleration_3d[:] if vhc.acceleration_3d is not None else None
vhc.current_position = self.supervisor_control.get_obj_position_3D(vhc)
vhc.current_orientation = self.supervisor_control.get_obj_orientation(vhc, 'y')
velocity_6d = self.supervisor_control.get_obj_velocity(vhc)
vhc.current_velocity = velocity_6d[:3]
vhc.angular_velocity_3d = velocity_6d[3:]
vhc.speed = math.sqrt(vhc.current_velocity[0]**2 + vhc.current_velocity[1]**2 + vhc.current_velocity[2]**2)
if vhc.previous_orientation is None:
vhc.yaw_rate = 0.0
else:
vhc.yaw_rate = ((vhc.current_orientation - vhc.previous_orientation) /
(self.current_sim_time - vhc.state_record_time))
if vhc.previous_velocity is None:
vhc.acceleration_3d = [0.0, 0.0, 0.0]
else:
vhc.acceleration_3d = [
(vhc.current_velocity[0] - vhc.previous_velocity[0]) / (self.current_sim_time - vhc.state_record_time),
(vhc.current_velocity[1] - vhc.previous_velocity[1]) / (self.current_sim_time - vhc.state_record_time),
(vhc.current_velocity[2] - vhc.previous_velocity[2]) / (self.current_sim_time - vhc.state_record_time)]
orient_vector = self.supervisor_control.get_obj_orientation_vector(vhc, [0.0, 0.0, 1.0])
vhc.acceleration = np.dot(np.array(vhc.acceleration_3d), orient_vector) # acceleration along vhc orientation
if vhc.previous_acceleration_3d is None:
vhc.jerk_3d = [0.0, 0.0, 0.0]
else:
vhc.jerk_3d = [((vhc.acceleration_3d[0] - vhc.previous_acceleration_3d[0]) /
(self.current_sim_time - vhc.state_record_time)),
((vhc.acceleration_3d[1] - vhc.previous_acceleration_3d[1]) /
(self.current_sim_time - vhc.state_record_time)),
((vhc.acceleration_3d[2] - vhc.previous_acceleration_3d[2]) /
(self.current_sim_time - vhc.state_record_time))]
vhc.jerk = np.dot(np.array(vhc.jerk_3d), orient_vector) # jerk along the vehicle orientation
vhc.state_record_time = self.current_sim_time
vhc.previous_orientation = vhc.current_orientation
def update_all_vehicles_states(self):
"""Updates the state of the all vehicles."""
for vhc in self.vehicles:
self.update_vehicle_states(vhc)
def get_reference_value(self, ref_index, ref_field, current_sim_time):
"""Get value of the reference field of the indexed vehicle at the given time."""
if ref_index == 0: # reference is time
ret_val = current_sim_time
else:
vhc = self.vehicles[ref_index-1]
if ref_field == 0:
ret_val = vhc.speed
elif ref_field == 1:
pos = self.supervisor_control.get_obj_position_3D(vhc)
ret_val = pos[0]
elif ref_field == 2:
pos = self.supervisor_control.get_obj_position_3D(vhc)
ret_val = pos[1]
elif ref_field == 3:
pos = self.supervisor_control.get_obj_position_3D(vhc)
ret_val = pos[2]
else:
ret_val = 0.0
return ret_val
def transmit_all_vhc_positions(self, emitter):
"""Transmit all vehicle positions through emitter."""
for vhc in self.vehicles:
# print('Vhc {} Position: {}'.format(vhc.vhc_id, vhc.current_position))
self.controller_comm_interface.transmit_vehicle_position_message(emitter, vhc.vhc_id, vhc.current_position)
def transmit_init_controller_params(self, emitter):
"""Transmit the neural network controller parameters."""
if self.has_sent_controller_config is False:
for vhc in self.vehicles:
for c_param in vhc.controller_parameters:
self.controller_comm_interface.transmit_set_controller_parameters_message(
emitter=emitter,
vhc_id=c_param.vehicle_id,
parameter_name=c_param.parameter_name,
parameter_data=c_param.parameter_data)
time.sleep(0.1)
self.has_sent_controller_config = True
def apply_manual_position_control(self, vhc_id):
"""Manually control the position of the vehicle."""
vhc = self.vehicles[self.dummy_vhc_dictionary[vhc_id]]
pos = self.supervisor_control.get_obj_position_3D(vhc)
for sig in vhc.signal:
reference_value = self.get_reference_value(sig.ref_index, sig.ref_field, self.current_sim_time)
signal_value = sig.get_signal_value_corresponding_to_value_of_reference(
reference_value, STaliroSignal.INTERPOLATION_TYPE_NONE)
if sig.signal_type == sig.SIGNAL_TYPE_SPEED:
pos[0] = pos[0] + signal_value * self.time_step
self.supervisor_control.set_obj_position_3D(vhc, pos)
if sig.signal_type == sig.SIGNAL_TYPE_Y_POSITION:
pos[2] = signal_value
self.supervisor_control.set_obj_position_3D(vhc, pos)
def set_time_step(self, time_step):
"""Set the time_step."""
self.time_step = time_step
self.num_of_steps_for_jerk_calc = int(math.ceil(0.05 / time_step))
if self.debug_mode > 0:
print('Num of steps for jerk calculation = {}'.format(self.num_of_steps_for_jerk_calc))
self.acc_arr = [0.0]*(self.num_of_steps_for_jerk_calc+3)
def get_emitter(self):
"""Returns the supervisor emitter"""
supervisor_emitter = self.supervisor_control.get_emitter(self.emitter_name)
return supervisor_emitter
def get_receiver(self):
"""Returns the supervisor receiver"""
supervisor_receiver = self.supervisor_control.get_receiver(self.receiver_name)
if supervisor_receiver is not None:
supervisor_receiver.enable(10)
return supervisor_receiver
def get_det_perf(self, vehicle_index, object_index, object_type_text):
"""Get detection performance for the requested object."""
det_perf = 0.0
if vehicle_index < len(self.vehicles):
if (self.vehicles[vehicle_index].vhc_id, object_index, object_type_text) in self.detection_perf_dict:
det_perf = self.detection_perf_dict[(self.vehicles[vehicle_index].vhc_id, object_index,
object_type_text)]
return det_perf
def get_vehicle_control(self, vehicle_index, control_type):
"""Get applied control actions for the given vehicle."""
applied_control = 0.0
if vehicle_index < len(self.vehicles):
if (self.vehicles[vehicle_index].vhc_id, control_type) in self.vehicle_control_dict:
applied_control = self.vehicle_control_dict[(self.vehicles[vehicle_index].vhc_id, control_type)]
return applied_control
def add_stop_before_collision_item(self, item_to_stop, item_not_to_collide):
"""Adds the item descriptions for the item to stop and item not to collide into the local list."""
self.stop_before_collision_list.append((item_to_stop, item_not_to_collide))
def set_initial_state(self, vehicle_index, state_index, initial_value):
"""Sets the given initial state value for the requested vehicle."""
if len(self.vehicles) > vehicle_index:
vhc = self.vehicles[vehicle_index]
if state_index == vhc.STATE_ID_VELOCITY_X:
obj_velocity = self.supervisor_control.get_obj_velocity(vhc)
obj_velocity[CoordinateSystem.X_AXIS] = initial_value
self.supervisor_control.set_obj_velocity(vhc, obj_velocity)
elif state_index == vhc.STATE_ID_VELOCITY_Y:
obj_velocity = self.supervisor_control.get_obj_velocity(vhc)
obj_velocity[CoordinateSystem.Y_AXIS] = initial_value
self.supervisor_control.set_obj_velocity(vhc, obj_velocity)
elif state_index == vhc.STATE_ID_VELOCITY_Z:
obj_velocity = self.supervisor_control.get_obj_velocity(vhc)
obj_velocity[CoordinateSystem.Z_AXIS] = initial_value
self.supervisor_control.set_obj_velocity(vhc, obj_velocity)
else:
print("WARNING! Requested initial state setting is not supported yet! {} {} {}".format(vehicle_index,
state_index,
initial_value))
def simulate_vehicles(self, current_sim_time_s):
"""Simulation vehicles for one time step."""
self.current_sim_time = current_sim_time_s
control_type = 0
if self.supervisor_emitter == 'NOT SET':
self.supervisor_emitter = self.get_emitter()
if self.supervisor_receiver == 'NOT SET':
self.supervisor_receiver = self.get_receiver()
self.update_all_vehicles_states()
# Following is to stop vehicles before they collide into another vehicle or pedestrian (stops like DARTH VADER)
for (item_to_stop, item_not_to_collide) in self.stop_before_collision_list:
for (vhc_ind, vhc) in enumerate(self.vehicles):
if item_to_stop.item_index in [ItemDescription.ITEM_INDEX_ALL, vhc_ind]:
if item_not_to_collide.item_type == ItemDescription.ITEM_TYPE_VEHICLE:
for (vhc2_ind, vhc2) in enumerate(self.vehicles):
if (item_not_to_collide.item_index in [ItemDescription.ITEM_INDEX_ALL, vhc2_ind] and
vhc2_ind != vhc_ind):
not_to_collide_pos = vhc2.current_position
if (math.sqrt((vhc.current_position[0] - not_to_collide_pos[0])**2 +
(vhc.current_position[2] - not_to_collide_pos[2])**2) < 15.0):
vhc_towards_right = (math.pi/2.0 - math.pi/4 < vhc.current_orientation
< math.pi/2.0 + math.pi/4)
vhc_towards_left = (-math.pi/2.0 - math.pi/4 < vhc.current_orientation
< -math.pi/2.0 + math.pi/4)
vhc_on_right = ((not_to_collide_pos[0] - 8.0 < vhc.current_position[0]
< not_to_collide_pos[0] - 5.0)
and (not_to_collide_pos[2] - 2.5 < vhc.current_position[2]
< not_to_collide_pos[2] + 6.5))
vhc_on_left = ((not_to_collide_pos[0] + 8.0 > vhc.current_position[0]
> not_to_collide_pos[0] + 5.0)
and (not_to_collide_pos[2] - 2.5 < vhc.current_position[2]
< not_to_collide_pos[2] + 6.5))
if (vhc.speed > 0.0 and ((vhc_on_left and vhc_towards_left) or
(vhc_on_right and vhc_towards_right))):
self.supervisor_control.set_obj_position_3D(vhc, vhc.current_position)
self.supervisor_control.reset_obj_physics(vhc)
elif ((item_not_to_collide.item_type is ItemDescription.ITEM_TYPE_PEDESTRIAN) and
(self.pedestrians_manager is not None)):
for (ped_ind, ped) in enumerate(self.pedestrians_manager.pedestrians):
if item_not_to_collide.item_index in [ItemDescription.ITEM_INDEX_ALL, ped_ind]:
not_to_collide_pos = ped.current_position
if (math.sqrt((vhc.current_position[0] - not_to_collide_pos[0])**2 +
(vhc.current_position[2] - not_to_collide_pos[2])**2) < 15.0):
vhc_towards_left = (math.pi/2.0 - math.pi/4 < vhc.current_orientation
< math.pi/2.0 + math.pi/4)
vhc_towards_right = (-math.pi/2.0 - math.pi/4 < vhc.current_orientation
< -math.pi/2.0 + math.pi/4)
vhc_on_right = ((not_to_collide_pos[0] - 8.0 < vhc.current_position[0]
< not_to_collide_pos[0] - 5.0)
and (not_to_collide_pos[2] - 2.5 < vhc.current_position[2]
< not_to_collide_pos[2] + 6.5))
vhc_on_left = ((not_to_collide_pos[0] + 8.0 > vhc.current_position[0]
> not_to_collide_pos[0] + 5.0)
and (not_to_collide_pos[2] - 2.5 < vhc.current_position[2]
< not_to_collide_pos[2] + 6.5))
if (vhc.speed > 0.0 and ((vhc_on_left and vhc_towards_right) or
(vhc_on_right and vhc_towards_left))):
self.supervisor_control.set_obj_position_3D(vhc, vhc.current_position)
self.supervisor_control.reset_obj_physics(vhc)
if self.supervisor_emitter is not None:
self.controller_comm_interface.transmit_backlogged_messages(self.supervisor_emitter)
for vhc in self.vehicles:
report_vhc = False
if (vhc.vhc_id, self.POSITION_REPORTING) in self.reporting_dict:
period = self.reporting_dict[(vhc.vhc_id, self.POSITION_REPORTING)]
if period == -1: # Report only once
self.reporting_dict[(vhc.vhc_id, self.POSITION_REPORTING)] = -2 # Won't report next time
if period != -2:
report_vhc = True
elif (0, self.POSITION_REPORTING) in self.reporting_dict:
period = self.reporting_dict[(0, self.POSITION_REPORTING)]
if period == -1: # Report only once
self.reporting_dict[(vhc.vhc_id, self.POSITION_REPORTING)] = -2 # Won't report next time
if period != -2:
report_vhc = True
else:
period = | |
<reponame>HyunCello/Tutorial<gh_stars>1-10
#!/usr/bin/env python
"""driver_r1.py: ROS driver for Omorobot R1 and R1-mini"""
# For more information, please visit our website www.omorobot.com
# Want to discuss with developers using our robots? Please visit our forum website at http://omorobot1.synology.me
# Also note that this software is for experimental and subject to change
# without any notifications.
__license__ = "MIT"
__version__ = "0.1.3"
__status__ = "Experimental"
'''
## License
The MIT License (MIT)
R1 and R1 mini driver for ROS: an open source platform for driving a robot with ROS.
Copyright (C) 2019 OMOROBOT Inc
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import sys
import rospy
import serial
import io
import numpy as np
import math
import os
from time import sleep
from std_msgs.msg import UInt8, Int8, Int16, Float64, Float32
from std_msgs.msg import Header
from std_srvs.srv import Trigger
from omoros.msg import R1MotorStatusLR, R1MotorStatus
from copy import copy, deepcopy
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist, TwistWithCovariance, Pose, Point, Vector3, Quaternion
from tf.broadcaster import TransformBroadcaster
from nav_msgs.msg import Odometry
from tf.transformations import quaternion_from_euler
class MyPose(object):
x = 0
y = 0
theta = 0
timestamp = 0
class ArrowCon:
setFwd = 0 # 1:Fwd, -1: Rev
setRot = 0 # 1:CCW(Turn Left), -1: CW(Turn Right)
targetOdo_L = 0 # Odometry target
targetOdo_R = 0
isFinished = True # True: If arrow motion is completed
cnt = 0
class Encoder(object):
Dir = 1.0
PPR = 0
GearRatio = 0
Step = 0
PPWheelRev = 0
class VehicleConfig(object):
BodyCircumference = 0 # circumference length of robot for spin in place
WheelCircumference = 0
WIDTH = 0.0 # Default Vehicle width in mm
WHEEL_R = 0.0 # Wheel radius
WHEEL_MAXV = 0.0 # Maximum wheel speed (mm/s)
V_Limit = 0 # Speed limit for vehicle (m/s)
W_Limit = 0 # Rotational Speed limit for vehicle (rad/s)
V_Limit_JOY = 0 # Speed limit for Joy control (m/s)
W_Limit_JOY = 0 # Rotational Speed limit for Joy control (rad/s)
ArrowFwdStep = 100.0 # Forward motion step when arrow key pressed (mm)
ArrowRotRate = 1/10.0 # Rotational rate per full turn
encoder = Encoder()
class Command:
isAlive = False # Set to True if subscrived command message has been received
mode = 0 # Command mode (0:vel, rot) <--> (1:speedL, speedR)
speed = 0.0 # Speed mm/s
deg_sec = 0.0 # Rotational speed deg/s
speedL = 0.0 # Left Wheel speed mm/s
speedR = 0.0 # Right wheel speed mm/s
class Robot:
rospy.init_node('omoros', anonymous=True)
# fetch /global parameters
param_port = rospy.get_param('~port')
param_baud = rospy.get_param('~baud')
param_modelName = rospy.get_param('~modelName')
param_joy_en = rospy.get_param('~joy_enable')
print('PARAM JOY_ENABLE:')
print(param_joy_en)
# Open Serial port with parameter settings
ser = serial.Serial(param_port, param_baud)
#ser = serial.Serial('/dev/ttyS0', 115200) #For raspberryPi
ser_io = io.TextIOWrapper(io.BufferedRWPair(ser, ser, 1),
newline = '\r',
line_buffering = True)
config = VehicleConfig()
pose = MyPose()
joyAxes = []
joyButtons = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # Buttons 15
joyDeadband = 0.15
exp = 0.3 # Joystick expo setting
if param_joy_en == 1:
isAutoMode = False
print "In Manual mode"
else :
isAutoMode = True
print "In Auto mode"
isArrowMode = False # Whether to control robo with arrow key or not
arrowCon = ArrowCon
#initialize data
cmd = Command
enc_L = 0.0 # Left wheel encoder count from QENCOD message
enc_R = 0.0 # Right wheel encoder count from QENCOD message
enc_L_prev = 0.0
enc_R_prev = 0.0
enc_offset_L = 0.0
enc_offset_R = 0.0
enc_cnt = 0
odo_L = 0.0 # Left Wheel odometry returned from QODO message
odo_R = 0.0 # Right Wheel odometry returned from QODO message
RPM_L = 0.0 # Left Wheel RPM returned from QRPM message
RPM_R = 0.0 # Right Wheel RPM returned from QRPM message
speedL = 0.0 # Left Wheel speed returned from QDIFF message
speedR = 0.0 # Reft Wheel speed returned from QDIFF message
vel = 0.0 # Velocity returned from CVW command
rot = 0.0 # Rotational speed returned from CVR command
def __init__(self):
## Set vehicle specific configurations
if self.param_modelName == "r1":
print "**********"
print "Driving R1"
print "**********"
self.config.WIDTH = 0.591 # Apply vehicle width for R1 version
self.config.WHEEL_R = 0.11 # Apply wheel radius for R1 version
self.config.WHEEL_MAXV = 2400.0 # Maximum speed can be applied to each wheel (mm/s)
self.config.V_Limit = 10.2 # Limited speed (m/s)
self.config.W_Limit = 0.25
self.config.V_Limit_JOY = 0.25 # Limited speed for joystick control
self.config.W_Limit_JOY = 0.05
self.config.ArrowFwdStep = 250 # Steps move forward based on Odometry
self.config.ArrowRotRate = 0.125
self.config.encoder.Dir = 1.0
self.config.encoder.PPR = 1000
self.config.encoder.GearRatio = 15
elif self.param_modelName == "mini":
print "***************"
print "Driving R1-mini"
print "***************"
self.config.WIDTH = 0.170 # Apply vehicle width for mini version
self.config.WHEEL_R = 0.0336 # Apply wheel radius for mini version
self.config.WHEEL_MAXV = 500.0
self.config.V_Limit = 0.2
self.config.W_Limit = 0.1
self.config.V_Limit_JOY = 0.2
self.config.W_Limit_JOY = 0.05
self.config.ArrowFwdStep = 100
self.config.ArrowRotRate = 0.1
self.config.encoder.Dir = 1.0
self.config.encoder.PPR = 11
self.config.encoder.GearRatio = 21
else :
print "Error: param:modelName, Only support r1 and mini. exit..."
exit()
print('Wheel Track:{:.2f}m, Radius:{:.3f}m'.format(self.config.WIDTH, self.config.WHEEL_R))
self.config.BodyCircumference = self.config.WIDTH * math.pi
print('Platform Rotation arc length: {:04f}m'.format(self.config.BodyCircumference))
self.config.WheelCircumference = self.config.WHEEL_R * 2 * math.pi
print('Wheel circumference: {:04f}m'.format(self.config.WheelCircumference))
self.config.encoder.Step = self.config.WheelCircumference / (self.config.encoder.PPR * self.config.encoder.GearRatio * 4)
print('Encoder step: {:04f}m/pulse'.format(self.config.encoder.Step))
self.config.encoder.PPWheelRev = self.config.WheelCircumference / self.config.encoder.Step
print('Encoder pulses per wheel rev: {:.2f} pulses/rev'.format(self.config.encoder.PPWheelRev))
print('Serial port:'+self.ser.name) # Print which port was really used
self.joyAxes = [0,0,0,0,0,0,0,0]
self.joyButtons = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
# Configure data output
if self.ser.isOpen():
print("Serial Open")
self.resetODO()
sleep(0.05)
self.reset_odometry()
self.setREGI(0,'QENCOD')
sleep(0.05)
self.setREGI(1,'QODO')
sleep(0.05)
self.setREGI(2,'QDIFFV')
sleep(0.05)
self.setREGI(3,'0')
sleep(0.05)
self.setREGI(4,'0')
#self.setREGI(3,'QVW')
#sleep(0.05)
#self.setREGI(4,'QRPM')
sleep(0.05)
self.setSPERI(20)
sleep(0.05)
self.setPEEN(1)
sleep(0.05)
self.reset_odometry()
# Subscriber
rospy.Subscriber("joy", Joy, self.callbackJoy)
rospy.Subscriber("cmd_vel", Twist, self.callbackCmdVel)
# publisher
self.pub_enc_l = rospy.Publisher('motor/encoder/left', Float64, queue_size=10)
self.pub_enc_r = rospy.Publisher('motor/encoder/right', Float64, queue_size=10)
self.pub_motor_status = rospy.Publisher('motor/status', R1MotorStatusLR, queue_size=10)
self.odom_pub = rospy.Publisher("odom", Odometry, queue_size=10)
self.odom_broadcaster = TransformBroadcaster()
rate = rospy.Rate(rospy.get_param('~hz', 30)) # 30hz
rospy.Timer(rospy.Duration(0.05), self.joytimer)
rospy.Timer(rospy.Duration(0.01), self.serReader)
self.pose.timestamp = rospy.Time.now()
while not rospy.is_shutdown():
if self.cmd.isAlive == True:
self.cmd.cnt += 1
if self.cmd.cnt > 1000: #Wait for about 3 seconds
self.cmd.isAlive = False
self.isAutoMode = False
rate.sleep()
self.ser.close()
def serReader(self, event):
reader = self.ser_io.readline()
if reader:
packet = reader.split(",")
try:
header = packet[0].split("#")[1]
if header.startswith('QVW'):
self.vel = int(packet[1])
self.rot = int(packet[2])
elif header.startswith('QENCOD'):
enc_L = int(packet[1])
enc_R = int(packet[2])
if self.enc_cnt == 0:
self.enc_offset_L = enc_L
self.enc_offset_R = enc_R
self.enc_cnt+=1
self.enc_L = enc_L*self.config.encoder.Dir - self.enc_offset_L
self.enc_R = enc_R*self.config.encoder.Dir - self.enc_offset_R
self.pub_enc_l.publish(Float64(data=self.enc_L))
self.pub_enc_r.publish(Float64(data=self.enc_R))
self.pose = self.updatePose(self.pose, self.enc_L, self.enc_R)
#print('Encoder:L{:.2f}, R:{:.2f}'.format(self.enc_L, self.enc_R))
elif header.startswith('QODO'):
self.odo_L = float(packet[1])*self.config.encoder.Dir
self.odo_R = float(packet[2])*self.config.encoder.Dir
#print('Odo:{:.2f}mm,{:.2f}mm'.format(self.odo_L, self.odo_R))
elif header.startswith('QRPM'):
self.RPM_L = int(packet[1])
self.RPM_R = int(packet[2])
#print('RPM:{:.2f}mm,{:.2f}mm'.format(self.RPM_L, self.RPM_R))
elif header.startswith('QDIFFV'):
self.speedL = int(packet[1])
self.speedR = int(packet[2])
except:
pass
status_left = R1MotorStatus(low_voltage = 0, overloaded = 0, power = 0,
encoder = self.enc_L, RPM = self.RPM_L, ODO = self.odo_L, speed = self.speedL)
status_right = R1MotorStatus(low_voltage = 0, overloaded = 0, power = 0,
encoder = self.enc_R, RPM = self.RPM_R, ODO = self.odo_R, speed = self.speedR)
self.pub_motor_status.publish(R1MotorStatusLR(header=Header(stamp=rospy.Time.now()),
Vspeed = self.vel, Vomega = self.rot,
left=status_left, right=status_right))
def callbackJoy(self, data):
self.joyAxes = deepcopy(data.axes)
#print('Joy:{:.2f},{:.2f}'.format(self.joyAxes[0], self.joyAxes[1]))
# Read the most recent button state
newJoyButtons = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
newJoyButtons = deepcopy(data.buttons)
# Check if button 1(B) is newly set
if (newJoyButtons[1]==1) and (newJoyButtons[1]!=self.joyButtons[1]):
if self.isAutoMode!= True:
self.isAutoMode = True
print "In Auto mode"
else:
self.isAutoMode | |
tk.Label(box, text=x)
label.grid(column=2 * i, row=0,sticky="w")
self.labels.append(tk.StringVar())
menu = tk.OptionMenu(box, self.labels[i], *a)
menu.config(width="8")
menu.grid(column=i * 2 + 1, row=0, sticky="e")
self.labels[0].set(a[0])
self.labels[1].set(a[2])
#Decimal
label = tk.Label(box, text="Decimal places:")
label.grid(column=0, row=1,columnspan=2,sticky="w")
self.labels.append(tk.StringVar())
menu = tk.OptionMenu(box, self.labels[-1], *["0","1","2"])
menu.config(width="8")
menu.grid(column=2, row=1,columnspan=2,sticky="e")
self.labels[-1].set("1")
#TS MARK
label = tk.Label(box, text="TS Mark:")
label.grid(column=0, row=2,columnspan=2,sticky="w")
self.labels.append(tk.StringVar())
menu = tk.OptionMenu(box, self.labels[-1], *[" ", "‡ (big)", "‡ (small)"])
menu.config(width="8")
menu.grid(column=2, row=2,columnspan=2,sticky="e")
box.grid_columnconfigure(5, weight=1,minsize="180")
self.labels[-1].set(" ")
#ADJUST GRID
for n in range(3):
box.grid_rowconfigure(n, weight=1)
for n in range(4):
box.grid_columnconfigure(n, weight=1)
def _build_aesthetics(self):
box = self.framefy("Aesthetics")
a = [" ", "( )", "[ ]", r"{ }", '" "', "' '"]
b = [a * 2 for a in range(11)]
c = [1,2]
e = [a,a,b,c]
f = ["G:","H:","Width:","Decimal"]
for a,(b,c) in enumerate(zip(f,e)):
label = tk.Label(box,text=b)
label.grid(column=2*a,row=0)
self.aesthetics.append(tk.StringVar())
menu = tk.OptionMenu(box, self.aesthetics[a], *c)
menu.config(width="2")
menu.grid(column=a * 2 + 1, row=0)
for i,a in enumerate([" ","( )",10,1]):
self.aesthetics[i].set(a)
a = [0 + a * 10 for a in range(11)]
b = [60 + a * 5 for a in range(11)]
c = [" ","‡ (big)","‡ (small)"]
d = ["X offset:","X dist:","TS mark:"]
e = [a,b,c]
for a, (b, c) in enumerate(zip(d, e)):
label = tk.Label(box, text=b)
label.grid(column=0 if a ==0 else 2 * a +1, row=1, columnspan = 2 if a ==0 else 1)
self.aesthetics.append(tk.StringVar())
menu = tk.OptionMenu(box, self.aesthetics[a+4], *c)
menu.config(width="8" if a == 2 else "2")
menu.grid(column=a * 2 + 2, row=1, columnspan = 3 if a == 2 else 1, sticky="news" if a==2 else "")
for i, a in enumerate([40, 80, " "]):
self.aesthetics[i+4].set(a)
def _build_plot_sel(self):
box = self.framefy("Plot")
for i,a in enumerate(pref.menu_h):
self.plot.append(tk.IntVar(value=1))
c1 = tk.Checkbutton(box, text=a, variable=self.plot[i], onvalue=1, offvalue=0)
c1.grid(column=i%4,row=i//4, sticky="w")
n = len(pref.menu_h)
self.plot.append(tk.IntVar(value=1))
c1 = tk.Checkbutton(box, text="Comparers", variable=self.plot[n], onvalue=1, offvalue=0)
c1.grid(column=0,row=((n-1)//4)+1,columnspan = 2, sticky="w")
self.plot.append(tk.IntVar(value=1))
c1 = tk.Checkbutton(box, text="Connections", variable=self.plot[n+1], onvalue=1, offvalue=0)
c1.grid(column=2,row=((n-1)//4)+1,columnspan = 2, sticky="w")
for n in range(4):
box.grid_columnconfigure(n, weight=1)
for n in range(3):
box.grid_rowconfigure(n, weight=1)
def _build_titles(self,idx):
box = self.boxify("Titles",idx)
for a,b,c in zip([0,1,2],self.titles,["Main:","y:","x:"]):
label = tk.Label(box, text=c,width=10)
label.grid(column=0, row=a)
self.titles[a] = tk.Entry(box, justify=tk.CENTER, bd=2, width=50)
self.titles[a].insert(0, b)
self.titles[a].grid(column=1, row=a, padx="0", sticky="news")
def _build_loadsave(self,idx):
box = self.boxify("Close, Load & Save Data", idx)
label = tk.Label(box, text="Path's and connection's info")
label.grid(column=0,row=0,sticky="w")
button = tk.Button(box, text="Close", command=self._blank_state, padx="1")
button.config(width=7)
button.grid(column=1, row=0, sticky="e")
button = tk.Button(box, text="Load", command=self.load_state, padx="1")
button.config(width=7)
button.grid(column=2,row=0,sticky="e")
button = tk.Button(box, text="Save as", command=self._save_as, padx="1")
button.config(width=7)
button.grid(column=3,row=0,sticky="e")
button = tk.Button(box, text="Save", command=self._save, padx="1")
button.config(width=7)
button.grid(column=4,row=0,sticky="e")
box.grid_columnconfigure(0, weight=1)
def _build_generator(self,idx):
box = self.boxify("Generate random PES", idx)
label = tk.Label(box, text="Random PES generator")
label.pack(side=tk.LEFT)
button = tk.Button(box, text="Fill in data", command=self._ask_confirmation, padx="1")
button.config(width=10)
button.pack(side=tk.RIGHT)
def _build_preview(self,idx):
box = self.boxify("Preview with either command or default manager, or save svg file", idx)
self.command = tk.Entry(box, justify=tk.CENTER, bd=4)
self.command.insert(0, pref.command_line)
self.command.grid(column=0,row=0,sticky="news")
button = tk.Button(box, text="Command", command=self.run_data_a, padx="1")
button.config(width=8)
button.grid(column=1,row=0,sticky="e")
button = tk.Button(box, text="Default", command=self.run_data_b, padx="1")
button.config(width=8)
button.grid(column=2,row=0,sticky="e")
button = tk.Button(box, text="Save svg", command=self.return_svg, padx="1")
button.config(width=8)
button.grid(column=3,row=0,sticky="e")
box.grid_columnconfigure(0, weight=1)
def _build_message(self,idx):
box = self.boxify("Message",idx)
m = tk.Message(box)
scrollbar = tk.Scrollbar(box)
scrollbar.grid(column=1,row=0,sticky="nes")
self.msg = tk.Text(box,
width=40,
yscrollcommand=scrollbar.set,
height=12 if pref.trickster else 18,
state="disabled",
background=m.cget("background"),
relief="flat",
wrap=tk.WORD,
font=('TkFixedFont',8))
m.destroy()
self.msg.grid(column=0,row=0,sticky="news")
if not pref.xlsx:
self.message("Welcome!\n\nTo enable .xlsx file support, please install openpyxl python library via the shell command:\npython3 -m pip install openpyxl")
else:
self.message("Welcome!\n\n{}".format(random.choice(pref.hints)))
box.grid_rowconfigure(0, weight=1)
box.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(idx, weight=1)
def _save(self):
try:
if self.f.endswith(".xlsx") and pref.xlsx:
from openpyxl import Workbook
wb = Workbook()
wb.remove(wb.active)
for a,b in zip(self.gen_data(type=".xlsx"),pref.menu_e):
sheet = wb.create_sheet(title=f'Path {b}')
for i,c in enumerate(a,start=1):
sheet.append(c[1:4])
try:
wb.save(self.f)
except PermissionError:
self.message(f"Error while saving file!\nIs the file:\n'{self.f}' already open?")
else:
try:
with open(self.f,"w") as out:
if self.f.endswith(".txt"):
txt = "\n".join(a for a in self.gen_data(type=".txt") if len(a.split()) >= 1)
out.write(txt)
except PermissionError:
self.message(f"Error while saving file!\nIs the file:\n'{self.f}' already open?")
except AttributeError: self._save_as()
except FileNotFoundError: self._save_as()
def _save_as(self):
self.f = tk.filedialog.asksaveasfilename(**pref.allowed_extensions)
self._change_win_title(self.f)
if any(self.f.endswith(a) for a in (".sff",".txt",".xlsx")):self._save()
def load_state(self,file_n=None):
if file_n is None:
file_n = tk.filedialog.askopenfilename(**pref.allowed_extensions)
try:
if file_n.endswith(".xlsx") and pref.xlsx:
self._blank_state(ask=False)
import openpyxl
try:
wb = openpyxl.load_workbook(file_n)
except:
self.message(f"Could not read {file_n} as xlsx file!\nAre you sure this is a proper xlsx file?")
return
notes = [getattr(note, a) for a in pref.menu_g]
exceeded = False
for a,b in zip(wb.sheetnames, notes):
sheet = wb[a]
for n in range(1,pref.n_structures+10):
if n > pref.n_structures:
if any(sheet.cell(row=n,column=i).value is None for i in range(1,4)):
exceeded = True
continue
for i in range(1,4):
if sheet.cell(row=n,column=i).value is None: continue
b.data[n-1][i].insert(0,str(sheet.cell(row=n,column=i).value))
if exceeded:
self.message("Exceeding number of structures")
elif file_n.endswith(".txt"):
with open(file_n, mode="r") as file:
self._blank_state(ask=False)
all_tabs = {}
tab_data = []
for line in file.read().splitlines():
line = line.split()
non_hash = True if len(line) == 1 and not line[0].startswith("#") else False
if len(line) >= 2 or non_hash:
tab_data.append(line)
elif len(line) == 1 and any(line[0] == f"#{a}" for a in pref.menu_e):
all_tabs[line[0]] = tab_data
tab_data = []
if len(all_tabs) == 0 and len(tab_data) != 0:
all_tabs["#A"] = tab_data
missing = [b for b in [f"#{a}" for a in pref.menu_e] if b not in all_tabs.keys()]
for a in missing: all_tabs[a] = []
notes = [getattr(note, a) for a in pref.menu_g]
exceeded = False
for a,b in zip(notes,sorted(all_tabs.keys())):
for i,c in enumerate(all_tabs[b]):
if i >= pref.n_structures:
exceeded = True
continue
for n in range(3):
try: a.data[i][n+1].insert(0,c[n])
except IndexError: pass
if exceeded:
self.message("Exceeding number of structures")
else:
self.message(f"Unrecognized file {file_n}")
except FileNotFoundError:
self.message("File not found!")
return
finally:
self._change_win_title(file_n)
self.f = file_n
def fill_in(self):
size = random.random()+0.5
max_value = min(len(pref.alphabet), pref.n_structures)
lenght = random.randint(5,12)
tab = getattr(note,[a for a in pref.menu_g][note.index(note.select())])
for i,n in zip(range(max_value),pref.alphabet):
value = size*random.randrange(-20,20)-i*size*2
for idx in range(len(tab.data[i])):
if idx == 1:
tab.data[i][idx].delete(0, tk.END)
if i+1 < lenght: tab.data[i][idx].insert(0, n)
elif i + 1 == lenght: tab.data[i][idx].insert(0, "A'")
elif idx == 2:
tab.data[i][idx].delete(0, tk.END)
if i < lenght: tab.data[i][idx].insert(0,"{:.2f}".format(value))
elif idx == 3:
tab.data[i][idx].delete(0, tk.END)
if i < lenght: tab.data[i][idx].insert(0,"{:.2f}".format(value + random.choice([-random.random(), +random.random()])))
elif idx == 4:
tab.data[i][idx].set(pref.menu_d[1])
max_v, min_v = None, None
for i in range(max_value):
if max_v is None: max_v = [i,tab.data[i][2].get()]
if min_v is None: min_v = [i,tab.data[i][2].get()]
if i < lenght and float(tab.data[i][2].get()) > float(max_v[1]): max_v = [i, tab.data[i][2].get()]
if i < lenght and float(tab.data[i][2].get()) < float(min_v[1]) : min_v = [i, tab.data[i][2].get()]
if i == 0: tab.data[i][0].set("INT")
elif i+1 == lenght: tab.data[i][0].set("INT")
elif i >= lenght: tab.data[i][0].set(" ")
else:
if float(tab.data[i-1][2].get()) < float(tab.data[i][2].get()) > float(tab.data[i+1][2].get()):
tab.data[i][0].set("TS")
else:
tab.data[i][0].set("INT")
def _ask_confirmation(self):
if note.index(tk.END) -2 <= note.index(note.select()):
self.message("Cannot fill in data for connection and comparer tabs!\n")
return
msgbox = tk.messagebox.askquestion(
f'Fill in random PES cycle at {pref.menu_h[note.index(note.select())]}',
'Are you sure? All unsaved data will be lost!', icon='warning')
if msgbox == "yes":
self.fill_in()
self._change_win_title("Unsaved")
if hasattr(self,"f"): del(self.f)
def _change_win_title(self,path):
window.title(f"{pref.name} @ {path}")
def _blank_state(self,ask=True):
if ask:
msgbox = tk.messagebox.askquestion('Close document', 'Are you sure? All unsaved data will be lost!', icon='warning')
if msgbox != "yes":return
self._change_win_title("Unsaved")
if hasattr(self,"f"): del(self.f)
for a in [getattr(note,a) for a in pref.menu_g]:
for i in range(pref.n_structures):
for idx in range(5):
if idx == 0: a.data[i][idx].set(pref.menu_z[0])
if idx in [1,2,3]: a.data[i][idx].delete(0, tk.END)
if idx == 4: a.data[i][idx].set(pref.menu_d[1])
for a in [getattr(note,a) for a in pref.menu_g]:
for n in range(2):
for idx, b in zip([0, 1, 2], [pref.menu_a, pref.menu_b, pref.menu_c]):
a.option_menu.line_opt_data[n][idx].set(b[[1,1,0][idx] if n == 0 else [1,0,2][idx]])
for a in range(pref.n_connectors):
for b in range(4):
note.tab_connections.data[a][b].set("")
for b,c in zip(range(3),[pref.menu_a, pref.menu_b, pref.menu_c]):
note.tab_connections.data[a][b+4].set(c[0])
def message(self,text):
now = datetime.datetime.now()
self.msg.configure(state="normal")
self.msg.tag_add("start", "0.0", tk.END)
self.msg.tag_config("start", foreground="grey")
if type(text) == str: text = [text]
for txt in text:
self.msg.insert("1.0",txt+"\n")
self.msg.insert("1.0", "[" + ":".join(["{:02d}".format(a) for a in [now.hour, now.minute, now.second]]) + "] "+"\n")
self.msg.configure(state="disabled")
def boxify(self,name,row):
box = ttk.LabelFrame(self, text=name)
box.grid(column=0, row=row, sticky="news")
box.grid_columnconfigure(0, weight=1)
return box
def framefy(self,name):
x = tk.Frame()
x.grid(column=0, row=0, sticky="news")
x.grid_columnconfigure(0, weight=1)
self.note.add(x,text=name)
return x
def print_data(self):
notes = [getattr(note,a) for a in pref.menu_g]
for a,b in zip(notes,pref.menu_e):
print(f"NOTE {b}")
for idx,line in enumerate(getattr(a,"data")):
if any(c.get().strip() != "" for c in line[:-1]):
print(f"#{idx+1}",[n.get() for n in line])
print("NOTE CONNECTIONS")
for idx,a in enumerate(note.tab_connections.data):
if any(c.get().strip() != "" for c in a[:-3]):
print(f"#{idx+1}", [n.get() for n in a])
def gen_data(self,type):
notes = [getattr(note,a) for a in pref.menu_g]
txt_data = []
xlsx_data = []
for a,b in zip(notes,pref.menu_e):
xlsx = []
for idx,line in enumerate(getattr(a,"data")):
c = [n.get() for n in line]
txt_data.append("{:<20} {:>10} {:>10}".format(*c[1:4]))
xlsx.append(c)
txt_data.append("#{}".format(b))
xlsx_data.append(xlsx)
if type == ".txt":
return txt_data
elif type == ".xlsx":
return xlsx_data
def save_svg_as(self):
return tk.filedialog.asksaveasfilename(defaultextension=".svg", title="Save svg", filetypes=[("Scalable Vector Graphics", ".svg")])
def run_data_a(self):
self.return_svg(promp=False); os.system(self.command.get())
def run_data_b(self):
self.return_svg(promp=False)
filename = os.path.join(os.getcwd(), ".E_profile.svg")
if sys.platform == "win32" or os.name == "nt":
os.startfile(filename)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, filename])
def return_svg(self,promp=True):
svg_name = None if promp == False else self.save_svg_as()
msg = SvgGenEsp(self)
msg = msg.save_svg(svg_name)
if not msg is None: self.message(msg)
class SvgGenEsp:
def __init__(self,options):
self.options = options # Please only use this at __init__
#MAIN
m_options = ["energy", "comma", "plot"]
self.main = {a: b.get() for a, b in zip(m_options, getattr(self.options,"main"))}
self.e_source = 4 if getattr(self.options,"main")[0].get() == 1 else 3
self.e_complement = 3 if self.e_source == 4 else 4
self.comma = True if self.main["comma"] == 1 else False
self.plot_np = True if self.main["plot"] == 1 else False
#SPAN
s_options = ["span","irrespective","big_arrow","units","temperature"]
self.span = {a:b.get() for a,b in zip(s_options,getattr(self.options,"span"))}
self.span_worthy = True
self.span_request = True if self.span["span"] == 1 else False
self.big_arrow = True if self.span["big_arrow"] == 1 else False
#GRAPHIC STYLE
self.frame = getattr(self.options,"style")[0].get()
self.grid_decimal = getattr(self.options, "style")[1].get()
#HORIZONTAL
self.wide = [int(getattr(self.options,"horizontal")[0].get()) * a + b for a,b in zip([-1,1],[20,40])]
self.x_start_offset = int(getattr(self.options,"horizontal")[1].get())
self.x_end_offset = int(getattr(self.options, "horizontal")[2].get())
self.x_space = int(getattr(self.options,"horizontal")[3].get())
#VERTICAL
self.top_height = int(getattr(self.options,"vertical")[0].get())
self.bottom_height = 400 - int(getattr(self.options,"vertical")[1].get())
#LABELS
self.g_h_labels = {a: getattr(self.options,"labels")[b].get() for a,b in zip(["g","h"],[0,1])}
self.e_decimal = getattr(self.options,"labels")[2].get()
self.ts_mark = getattr(self.options,"labels")[3].get()
#PLOT
self.plot = [a.get() for a in getattr(self.options, "plot")]
self.plot_path = {a:bool(b) for a,b in zip(pref.menu_e,self.plot)}
#TITLE
self.main_title = getattr(self.options,"titles")[0].get()
self.y_title = getattr(self.options,"titles")[1].get()
self.x_title = getattr(self.options, "titles")[2].get()
# RETURN
self.svg_code = ['<?xml version="1.0" encoding="UTF-8" ?>']
self.msg = []
if self.span_request:
self.temperature = self._verify_temp(self.span["temperature"])
# CONECTORS
self.conectors = [[b.get() for b in a] for a in note.tab_connections.data]
# COMPARERS
x = ("A","1","B","2","S1","S2","S3","S4","S5","S6")
self.comparers = [{l:note.tab_comparers.data[n][i].get() for i,l in zip(range(10),x)} for n in range(pref.n_comparers)]
# DATA OPTIONS
fc = lambda a: getattr(note, "tab_{}".format(a.lower())).option_menu.line_opt_data
self.path_options = {a: [[c.get() for c in b] for b in fc(a)] for a in pref.menu_e}
# DATA
dt = lambda a: enumerate(getattr(note,a).data)
fa = lambda idx,c: float(c.get().replace(",",".")) if idx == self.e_source-1 else c.get()
fb = lambda b: is_str_float(b[self.e_source-1].get().replace(",","."))
self.raw_crt = [[[i+1,*[fa(idx,c) for idx,c in enumerate(b)]] for i,b in dt(a) if fb(b)] for a in pref.menu_g]
self.raw_crt_dict = {a: b for a, b in zip(pref.menu_e, self.raw_crt) if self.plot_path[a] | |
query_block_size + memory_block_size)
indices = tf.range(0, index_length, delta=1, name="index_range")
indices = tf.reshape(indices, [1, -1, 1]) # [1, length, 1] for convs
kernel = tf.expand_dims(tf.eye(memory_block_size), axis=1)
gather_indices = tf.nn.conv1d(
tf.cast(indices, tf.float32),
kernel,
query_block_size,
padding="VALID",
name="gather_conv")
gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0)
# Get left and right memory blocks for each query.
# [length, batch, heads, dim]
k_t = tf.transpose(k, [2, 0, 1, 3])
v_t = tf.transpose(v, [2, 0, 1, 3])
k_unmasked_windows = gather_dilated_memory_blocks(
k_t, num_memory_blocks, gap_size, query_block_size, memory_block_size,
gather_indices)
v_unmasked_windows = gather_dilated_memory_blocks(
v_t, num_memory_blocks, gap_size, query_block_size, memory_block_size,
gather_indices)
# Combine memory windows.
block_q_shape = common_layers.shape_list(q)
masked_attention_bias = tf.tile(
tf.expand_dims(attention_bias_lower_triangle(query_block_size), axis=0),
[block_q_shape[0], block_q_shape[1], block_q_shape[2], 1, 1])
padding_attention_bias = tf.expand_dims(
embedding_to_padding(k_unmasked_windows) * -1e9, axis=-2)
padding_attention_bias = tf.tile(padding_attention_bias,
[1, 1, 1, query_block_size, 1])
attention_bias = tf.concat(
[masked_attention_bias, padding_attention_bias], axis=-1)
# combine memory windows
k_windows = tf.concat([self_k_part, k_unmasked_windows], 3)
v_windows = tf.concat([self_v_part, v_unmasked_windows], 3)
output = dot_product_attention(
q,
k_windows,
v_windows,
attention_bias,
dropout_rate=0.,
name="dilated_1d",
make_image_summary=False)
output = tf.reshape(output, [batch_size, num_heads, -1, depth_v])
# Remove the padding if introduced.
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1])
output.set_shape(v_list_shape)
return output
def local_attention_2d(q,
k,
v,
query_shape=(8, 16),
memory_flange=(8, 16),
name=None):
"""Strided block local self-attention.
The 2-D sequence is divided into 2-D blocks of shape query_shape. Attention
for a given query position can only see memory positions less than or equal to
the query position. The memory positions are the corresponding block with
memory_flange many positions to add to the height and width of the block
(namely, left, top, and right).
Args:
q: a Tensor with shape [batch, heads, h, w, depth_k]
k: a Tensor with shape [batch, heads, h, w, depth_k]
v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current
implementation, depth_v must be equal to depth_k.
query_shape: an tuple indicating the height and width of each query block.
memory_flange: an integer indicating how much to look in height and width
from each query block.
name: an optional string
Returns:
a Tensor of shape [batch, heads, h, w, depth_v]
"""
with tf.variable_scope(
name, default_name="local_self_attention_2d", values=[q, k, v]):
v_shape = common_layers.shape_list(v)
# Pad query, key, value to ensure multiple of corresponding lengths.
q = pad_to_multiple_2d(q, query_shape)
k = pad_to_multiple_2d(k, query_shape)
v = pad_to_multiple_2d(v, query_shape)
paddings = [[0, 0], [0, 0], [memory_flange[0], memory_flange[1]],
[memory_flange[0], memory_flange[1]], [0, 0]]
k = tf.pad(k, paddings)
v = tf.pad(v, paddings)
# Set up query blocks.
q_indices = gather_indices_2d(q, query_shape, query_shape)
q_new = gather_blocks_2d(q, q_indices)
# Set up key and value blocks.
memory_shape = (query_shape[0] + 2 * memory_flange[0],
query_shape[1] + 2 * memory_flange[1])
k_and_v_indices = gather_indices_2d(k, memory_shape, query_shape)
k_new = gather_blocks_2d(k, k_and_v_indices)
v_new = gather_blocks_2d(v, k_and_v_indices)
attention_bias = tf.expand_dims(
tf.to_float(embedding_to_padding(k_new)) * -1e9, axis=-2)
output = dot_product_attention(
q_new,
k_new,
v_new,
attention_bias,
dropout_rate=0.,
name="local_2d",
make_image_summary=False)
# Put representations back into original shapes.
padded_q_shape = common_layers.shape_list(q)
output = scatter_blocks_2d(output, q_indices, padded_q_shape)
# Remove the padding if introduced.
output = tf.slice(output, [0, 0, 0, 0, 0],
[-1, -1, v_shape[2], v_shape[3], -1])
return output
def pad_to_multiple_2d(x, block_shape):
"""Making sure x is a multiple of shape. x is [batch, heads, h, w, depth]."""
old_shape = x.get_shape().dims
last = old_shape[-1]
height_padding = -common_layers.shape_list(x)[2] % block_shape[0]
width_padding = -common_layers.shape_list(x)[3] % block_shape[1]
paddings = [[0, 0], [0, 0], [0, height_padding], [0, width_padding], [0, 0]]
padded_x = tf.pad(x, paddings)
padded_shape = padded_x.get_shape().as_list()
padded_shape = padded_shape[:-1] + [last]
padded_x.set_shape(padded_shape)
return padded_x
def reshape_range(tensor, i, j, shape):
"""Reshapes a tensor between dimensions i and j."""
t_shape = common_layers.shape_list(tensor)
target_shape = t_shape[:i] + shape + t_shape[j:]
return tf.reshape(tensor, target_shape)
def gather_blocks_2d(x, indices):
"""Gathers flattened blocks from x."""
x_shape = common_layers.shape_list(x)
x = reshape_range(x, 2, 4, [tf.reduce_prod(x_shape[2:4])])
# [length, batch, heads, dim]
x_t = tf.transpose(x, [2, 0, 1, 3])
x_new = tf.gather(x_t, indices)
# returns [batch, heads, num_blocks, block_length ** 2, dim]
return tf.transpose(x_new, [2, 3, 0, 1, 4])
def scatter_blocks_2d(x, indices, shape):
"""scatters blocks from x into shape with indices."""
x_shape = common_layers.shape_list(x)
# [length, batch, heads, dim]
x_t = tf.transpose(
tf.reshape(x, [x_shape[0], x_shape[1], -1, x_shape[-1]]), [2, 0, 1, 3])
x_t_shape = common_layers.shape_list(x_t)
indices = tf.reshape(indices, [-1, 1])
scattered_x = tf.scatter_nd(indices, x_t, x_t_shape)
scattered_x = tf.transpose(scattered_x, [1, 2, 0, 3])
return tf.reshape(scattered_x, shape)
def gather_indices_2d(x, block_shape, block_stride):
"""Getting gather indices."""
# making an identity matrix kernel
kernel = tf.eye(block_shape[0] * block_shape[1])
kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1])
# making indices [1, h, w, 1] to appy convs
x_shape = common_layers.shape_list(x)
indices = tf.range(x_shape[2] * x_shape[3])
indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1])
indices = tf.nn.conv2d(
tf.cast(indices, tf.float32),
kernel,
strides=[1, block_stride[0], block_stride[1], 1],
padding="VALID")
# making indices [num_blocks, dim] to gather
dims = common_layers.shape_list(indices)[:3]
if all([isinstance(dim, int) for dim in dims]):
num_blocks = functools.reduce(operator.mul, dims, 1)
else:
num_blocks = tf.reduce_prod(dims)
indices = tf.reshape(indices, [num_blocks, -1])
return tf.cast(indices, tf.int32)
def make_2d_block_raster_mask(query_shape, memory_flange):
"""Creates a mask for 2d block raster scan.
The query mask can look to the left, top left, top, and top right, but
not to the right. Inside the query, we have the standard raster scan
masking.
Args:
query_shape: A tuple of ints (query_height, query_width)
memory_flange: A tuple of ints
(memory_flange_height, memory_flange_width)
Returns:
A tensor of shape query_size, memory_size
"""
# mask inside the query block
query_triangle = common_layers.ones_matrix_band_part(
np.prod(query_shape), np.prod(query_shape), -1, 0)
split_query_masks = tf.split(query_triangle, query_shape[0], axis=1)
# adding mask for left and right
mask_pieces = [
tf.concat(
[
tf.ones([np.prod(query_shape), memory_flange[1]]),
split_query_masks[i],
tf.zeros([np.prod(query_shape), memory_flange[1]])
],
axis=1) for i in range(query_shape[0])
]
# adding mask for top
final_mask = tf.concat(
[
tf.ones([
np.prod(query_shape),
(query_shape[1] + 2 * memory_flange[1]) * memory_flange[0]
]),
tf.concat(mask_pieces, axis=1)
],
axis=1)
# 0.0 is visible location, 1.0 is masked.
return 1. - final_mask
def get_memory_region(x, query_block_shape, memory_flange, q_indices):
"""Get the memory regions that surround a 2d query.
The memory regions will be the left and top right.
Args:
x: A tensor with shape [batch, heads, height, width, depth]
query_block_shape: a 2-d tuple of integers
memory_flange: a 2-d tuple of integers
q_indices: a tensor of indices for each of the center blocks.
[num_blocks, block_length]
Returns:
x_flange: A tensor of shape [batch, heads, #blocks, block_length, depth]
"""
# Padding x to be multiple of query_shape and then
# extracting the memory blocks from the same regions as the query blocks
x_query_padded = pad_to_multiple_2d(x, query_block_shape)
x_center = gather_blocks_2d(x_query_padded, q_indices)
# Then padding the flange region
paddings = [[0, 0], [0, 0], [memory_flange[0], 0],
[memory_flange[1], memory_flange[1]], [0, 0]]
x_memory_padded = tf.pad(x_query_padded, paddings)
left_x = None
top_x = None
# Extracting the memory regions around the query block. left_x_region extends
# to the left and the top_x_region is the combination of top left, top, and
# top right of the query block
# if no left region
if memory_flange[1] > 0:
left_x_region = x_memory_padded[:, :, memory_flange[
0]:, :-(query_block_shape[1] + memory_flange[1]), :]
left_memory_shape = (query_block_shape[0], memory_flange[1])
left_indices = gather_indices_2d(left_x_region, left_memory_shape,
query_block_shape)
left_x = gather_blocks_2d(left_x_region, left_indices)
# if no top region
if memory_flange[0] > 0:
top_x_region = x_memory_padded[:, :, :-query_block_shape[0], :, :]
top_memory_shape = (memory_flange[0],
query_block_shape[1] + 2 * memory_flange[1])
top_indices = gather_indices_2d(top_x_region, top_memory_shape,
query_block_shape)
top_x = gather_blocks_2d(top_x_region, top_indices)
x_flange = None
if top_x is not None and left_x is not None:
x_flange = tf.concat([top_x, left_x], axis=3)
else:
x_flange = top_x if top_x is not None else left_x
return x_flange, x_center
def get_shifted_center_blocks(x, indices):
"""Get right shifted blocks for masked local attention 2d.
Args:
x: A tensor with shape [batch, heads, height, width, depth]
indices: The indices to gather blocks
Returns:
x_shifted: a tensor of extracted blocks, each block right shifted along
length.
"""
center_x = gather_blocks_2d(x, indices)
# Shift right along the length dimension
def shift_right_2d_blocks(x):
"""Shift the second to last dimension of x right by one."""
shifted_targets = (
tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :, :-1, :])
return shifted_targets
x_shifted = shift_right_2d_blocks(center_x)
return x_shifted
def right_shift_blockwise(x, query_shape, name=None):
"""Right shifts once in every block.
Args:
x: a tensor of shape [batch, height, width, depth]
query_shape: A 2d tuple of ints
name: a string
Returns:
| |
import torch
import torch.nn as nn
from lib.model.embedding import Embedding
class TemporalBlock(nn.Module):
"""
Reference 3D pose estimation model with temporal convolutions.
This implementation can be used for all use-cases.
"""
def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.2, channels=1024, latten_features=256, dense=False,
is_train=True, Optimize1f=True):
super().__init__()
self.is_train = is_train
self.augment = False
self.Optimize1f = Optimize1f
self.num_joints_in = num_joints_in
self.in_features = in_features
self.num_joints_out = num_joints_out
self.filter_widths = filter_widths
self.drop = nn.Dropout(dropout)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.pad = [filter_widths[0] // 2]
self.expand_bn = nn.BatchNorm1d(channels, momentum=0.1)
# self.shrink = nn.Conv1d(channels, num_joints_out * 3, 1)
self.shrink = nn.Conv1d(channels, latten_features, 1)
if self.Optimize1f == False:
self.expand_conv = nn.Conv1d(num_joints_in * in_features, channels, filter_widths[0], bias=False)
else:
self.expand_conv = nn.Conv1d(num_joints_in * in_features, channels, filter_widths[0],
stride=filter_widths[0], bias=False)
layers_conv = []
layers_bn = []
self.causal_shift = [(filter_widths[0]) // 2 if causal else 0]
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1) * next_dilation // 2)
self.causal_shift.append((filter_widths[i] // 2 * next_dilation) if causal else 0)
if self.Optimize1f == False:
layers_conv.append(nn.Conv1d(channels, channels,
filter_widths[i] if not dense else (2 * self.pad[-1] + 1),
dilation=next_dilation if not dense else 1,
bias=False))
else:
layers_conv.append(nn.Conv1d(channels, channels, filter_widths[i], stride=filter_widths[i], bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1, bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
next_dilation *= filter_widths[i]
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn)
def set_bn_momentum(self, momentum):
self.expand_bn.momentum = momentum
for bn in self.layers_bn:
bn.momentum = momentum
def set_training_status(self, is_train):
self.is_train = is_train
def set_augment(self, augment):
self.augment = augment
def receptive_field(self):
"""
Return the total receptive field of this model as # of frames.
"""
frames = 0
for f in self.pad:
frames += f
return 1 + 2 * frames
def forward(self, x):
x = self.drop(self.relu(self.expand_bn(self.expand_conv(x))))
for i in range(len(self.pad) - 1):
pad = self.pad[i + 1]
shift = self.causal_shift[i + 1]
if self.Optimize1f == False:
res = x[:, :, pad + shift: x.shape[2] - pad + shift]
else:
res = x[:, :, self.causal_shift[i + 1] + self.filter_widths[i + 1] // 2:: self.filter_widths[i + 1]]
x = self.drop(self.relu(self.layers_bn[2 * i](self.layers_conv[2 * i](x))))
x = res + self.drop(self.relu(self.layers_bn[2 * i + 1](self.layers_conv[2 * i + 1](x))))
x = self.shrink(x)
x = x.permute(0, 2, 1)
x_sz = x.shape
x = x.reshape(x_sz[0] * x_sz[1], x_sz[2]).unsqueeze(1)
return x
class Linear(nn.Module):
def __init__(self, linear_size, p_dropout=0.25):
super(Linear, self).__init__()
self.l_size = linear_size
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.dropout = nn.Dropout(p_dropout)
self.w1 = nn.Linear(self.l_size, self.l_size)
self.batch_norm1 = nn.BatchNorm1d(self.l_size)
self.w2 = nn.Linear(self.l_size, self.l_size)
self.batch_norm2 = nn.BatchNorm1d(self.l_size)
def forward(self, x):
y = self.w1(x)
y = self.batch_norm1(y)
y = self.relu(y)
y = self.dropout(y)
y = self.w2(y)
y = self.batch_norm2(y)
y = self.relu(y)
y = self.dropout(y)
out = x + y
return out
class FCBlock(nn.Module):
def __init__(self, channel_in, channel_out, linear_size, block_num):
super(FCBlock, self).__init__()
self.linear_size = linear_size
self.block_num = block_num
self.layers = []
self.channel_in = channel_in
self.stage_num = 3
self.p_dropout = 0.25
self.fc_1 = nn.Linear(self.channel_in, self.linear_size)
self.bn_1 = nn.BatchNorm1d(self.linear_size)
for i in range(block_num):
self.layers.append(Linear(self.linear_size, self.p_dropout))
self.fc_2 = nn.Linear(self.linear_size, channel_out)
self.layers = nn.ModuleList(self.layers)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.dropout = nn.Dropout(self.p_dropout)
def forward(self, x):
x = self.fc_1(x)
x = self.bn_1(x)
x = self.relu(x)
x = self.dropout(x)
for i in range(self.block_num):
x = self.layers[i](x)
x = self.fc_2(x)
return x
class RIEModel(nn.Module):
"""
Reference 3D pose estimation model with temporal convolutions.
This implementation can be used for all use-cases.
"""
def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.2, latten_features=256,
channels=1024, dense=False, is_train=True, Optimize1f=True, stage=1,
extrinsic_dim=12, embedd_dim=64):
super(RIEModel, self).__init__()
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
dense -- use regular dense convolutions instead of dilated convolutions (ablation experiment)
is_train -- if the model runs in training mode or not
Optimize1f=True -- using 1 frame optimization or not
stage -- current stage when using the multi-stage optimization method
"""
self.augment = False
self.is_train = is_train
self.num_joints_in = num_joints_in
self.num_joints_out = num_joints_out
self.in_features = in_features
self.latten_features = latten_features
self.stage = stage
if self.num_joints_in == 17:
self.LocalLayer_Torso = TemporalBlock(5 * 3, in_features, num_joints_out, filter_widths, causal, dropout,
channels, self.latten_features, dense, is_train, Optimize1f)
elif self.num_joints_in == 15:
self.LocalLayer_Torso = TemporalBlock(3 * 3, in_features, num_joints_out, filter_widths, causal, dropout,
channels, self.latten_features, dense, is_train, Optimize1f)
elif self.num_joints_in == 14:
self.LocalLayer_Torso = TemporalBlock(2 * 3, in_features, num_joints_out, filter_widths, causal, dropout,
channels, self.latten_features, dense, is_train, Optimize1f)
self.LocalLayer_LArm = TemporalBlock(3 * 3, in_features, num_joints_out, filter_widths, causal, dropout,
channels, self.latten_features, dense, is_train, Optimize1f)
self.LocalLayer_RArm = TemporalBlock(3 * 3, in_features, num_joints_out, filter_widths, causal, dropout,
channels, self.latten_features, dense, is_train, Optimize1f)
self.LocalLayer_LLeg = TemporalBlock(3 * 3, in_features, num_joints_out, filter_widths, causal, dropout,
channels, self.latten_features, dense, is_train, Optimize1f)
self.LocalLayer_RLeg = TemporalBlock(3 * 3, in_features, num_joints_out, filter_widths, causal, dropout,
channels, self.latten_features, dense, is_train, Optimize1f)
self.pad = (self.receptive_field() - 1) // 2
self.GlobalInfo = FCBlock(num_joints_in * self.in_features, self.latten_features, 1024, 2)
if stage != 1:
self.FuseBlocks = nn.ModuleList([])
for i in range(5):
self.FuseBlocks.append(
FCBlock(self.latten_features * 4, self.latten_features, 1024, 1)
)
self.camera_embedding = True if (extrinsic_dim > 0 and embedd_dim > 0) else False
self.extrinsic_dim = extrinsic_dim
self.embedd_dim = embedd_dim
if self.camera_embedding:
self.embedder = Embedding(in_channels=extrinsic_dim, out_channels=embedd_dim)
self.out_features_dim = self.latten_features * 2 if stage == 1 else self.latten_features * 3
self.out_features_dim += embedd_dim
if self.num_joints_in == 17:
self.Integration_Torso = FCBlock(self.out_features_dim, 5 * 3, 1024, 1)
elif self.num_joints_in == 15:
self.Integration_Torso = FCBlock(self.out_features_dim, 3 * 3, 1024, 1)
elif self.num_joints_in == 14:
self.Integration_Torso = FCBlock(self.out_features_dim, 2 * 3, 1024, 1)
self.Integration_LArm = FCBlock(self.out_features_dim, 3 * 3, 1024, 1)
self.Integration_RArm = FCBlock(self.out_features_dim, 3 * 3, 1024, 1)
self.Integration_LLeg = FCBlock(self.out_features_dim, 3 * 3, 1024, 1)
self.Integration_RLeg = FCBlock(self.out_features_dim, 3 * 3, 1024, 1)
def set_bn_momentum(self, momentum):
self.LocalLayer_Torso.set_bn_momentum(momentum)
self.LocalLayer_LArm.set_bn_momentum(momentum)
self.LocalLayer_RArm.set_bn_momentum(momentum)
self.LocalLayer_LLeg.set_bn_momentum(momentum)
self.LocalLayer_RLeg.set_bn_momentum(momentum)
def set_training_status(self, is_train):
self.is_train = is_train
self.LocalLayer_Torso.set_training_status(is_train)
self.LocalLayer_LArm.set_training_status(is_train)
self.LocalLayer_RArm.set_training_status(is_train)
self.LocalLayer_LLeg.set_training_status(is_train)
self.LocalLayer_RLeg.set_training_status(is_train)
def set_augment(self, augment):
self.augment = augment
self.LocalLayer_Torso.set_augment(augment)
self.LocalLayer_LArm.set_augment(augment)
self.LocalLayer_RArm.set_augment(augment)
self.LocalLayer_LLeg.set_augment(augment)
self.LocalLayer_RLeg.set_augment(augment)
def receptive_field(self):
"""
Return the total receptive field of this model as # of frames.
"""
return self.LocalLayer_Torso.receptive_field()
def forward(self, x, param):
assert len(x.shape) == 4
assert x.shape[-2] == self.num_joints_in
assert x.shape[-1] == self.in_features
pad = (self.receptive_field() - 1) // 2
in_current = x[:, x.shape[1] // self.in_features:x.shape[1] // self.in_features + 1]
in_current = in_current.reshape(in_current.shape[0] * in_current.shape[1], -1)
x_sz = x.shape
x = x.view(x.shape[0], x.shape[1], -1)
x = x.permute(0, 2, 1)
sz = x.shape
# Positional information encoding
diff = x - x[:, 0:self.in_features, :].repeat(1, sz[1] // self.in_features, 1)
# Temporal information encoding
diff_t = x - x[:, :, x.shape[2] // self.in_features:x.shape[2] // self.in_features + 1].expand(sz[0], sz[1], sz[2])
if self.in_features == 3:
# Grouping
if self.num_joints_in == 17:
in_Torso = torch.cat(
(x[:, 0:3, :], x[:, 21:33, :], diff[:, 0:3, :], diff[:, 21:33, :], diff_t[:, 0:3, :], diff_t[:, 21:33, :]),
dim=1)
in_LArm = torch.cat((x[:, 42:51, :], diff[:, 42:51, :], diff_t[:, 42:51, :]), dim=1)
in_RArm = torch.cat((x[:, 33:42, :], diff[:, 33:42, :], diff_t[:, 33:42, :]), dim=1)
in_LLeg = torch.cat((x[:, 3:12, :], diff[:, 3:12, :], diff_t[:, 3:12, :]), dim=1)
in_RLeg = torch.cat((x[:, 12:21, :], diff[:, 12:21, :], diff_t[:, 12:21, :]), dim=1)
elif self.num_joints_in == 15:
in_Torso = torch.cat(
(x[:, 0:6, :], x[:, 42:45, :], diff[:, 0:6, :], diff[:, 42:45, :], diff_t[:, 0:6, :], diff_t[:, 42:45, :]),
dim=1)
in_LArm = torch.cat((x[:, 6:15, :], diff[:, 6:15, :], diff_t[:, 6:15, :]), dim=1)
in_RArm = torch.cat((x[:, 15:24, :], diff[:, 15:24, :], diff_t[:, 15:24, :]), dim=1)
in_LLeg = torch.cat((x[:, 24:33, :], diff[:, 24:33, :], diff_t[:, 24:33, :]), dim=1)
in_RLeg = torch.cat((x[:, 33:42, :], diff[:, 33:42, :], diff_t[:, 33:42, :]), dim=1)
elif self.num_joints_in == 14:
in_Torso = torch.cat(
(x[:, 0:3, :], x[:, 21:24, :], diff[:, 0:3, :], diff[:, 21:24, :], diff_t[:, 0:3, :], diff_t[:, 21:24, :]),
dim=1)
in_LArm = torch.cat((x[:, 24:33, :], diff[:, 24:33, :], diff_t[:, 24:33, :]), dim=1)
in_RArm = torch.cat((x[:, 33:42, :], diff[:, 33:42, :], diff_t[:, 33:42, :]), dim=1)
in_LLeg = torch.cat((x[:, 12:21, :], | |
the overall hit rate,
# instead of averaging the hit rates
#if info_by_band_pair[bp]["total_events"] == 0:
# info_by_band_pair[bp]["overall_hit_rate"] == 0
#else:
# info_by_band_pair[bp]["overall_hit_rate"] = info_by_band_pair[bp]["total_hits"]/info_by_band_pair[bp]["total_events"]
# Return info
return info_by_band_pair
# Note: 0 hits for 0 events gets counted as a hit rate of 0.
# Perhaps it should be discarded instead?
# But then what if the entire span has 0 events?
def getAvgHitRates(datalist):
print("Performing getAvgHitRates")
num_tests = len(datalist)
total_rates = sum([result["hit_count"]/result["test_events"] for result in datalist if result["test_events"]!=0])
for i, result in enumerate(datalist):
print(result)
toprint = [result["hit_count"], result["test_events"]]
if toprint[1] == 0:
toprint.append(0)
else:
toprint.append(toprint[0]/toprint[1])
print("\t".join([str(x) for x in toprint]))
print(total_rates/num_tests)
return total_rates/num_tests
"""
getDataByCovRate
Given a path to csv results from running risk models,
return a dictionary where keys are coverage rates and
values are the rows of info with that coverage from the csv.
"""
def getDataByCovRate(results_full_path,
header_types = csv_data_types,
earliest_eval_date = None,
latest_eval_date = None,
):
# Keep track of total number of events (i.e., crimes)
total_event_count = 0
dates_seen = []
model_param_names = []
cov_rates = set()
# Instantiate a mapping from coverage rate to {another mapping of results}.
# That other mapping will be from model to results.
# And, those results will be a list of mappings, each entry in the list being
# a different row from the csv results
datadicts_by_cov_rate = defaultdict(lambda: defaultdict(list))
# Open csv output and start reading it
with open(results_full_path, newline="") as f:
reader = csv.reader(f)
# Obtain column names from header in first line
header = next(reader, None)
# Read each line of data
for dataline in reader:
# Instantiate a map from col name to data, for this line
dataline_dict = dict()
# All data is currently in string form.
# Use header_types to cast the data appropriately.
for i,d in enumerate(dataline):
# Default is empty string
casted_data = ""
# Things like int("") don't work, so we catch that here
if d != "":
casted_data = header_types[i](d)
# Transform data into str/int/float/datetime64 before storing it
dataline_dict[header[i]] = casted_data
# Keep track of how many eval_date's we've seen,
# and how many events (crimes) there have been in total
# If date is outside of desired range, continue
dataline_date = dataline_dict["eval_date"]
if earliest_eval_date != None and dataline_date < earliest_eval_date:
continue
if latest_eval_date != None and latest_eval_date < dataline_date:
continue
if dataline_date not in dates_seen:
total_event_count += dataline_dict["test_events"]
dates_seen.append(dataline_date)
# Grab coverage and model, since we'll use those a lot
dataline_cov = dataline_dict["coverage_rate"]
if dataline_cov not in cov_rates:
cov_rates.add(dataline_cov)
dataline_model = dataline_dict["model"]
# Grab the bandwidths for PHS results, store them as "param_pair"
if dataline_model == "phs":
time_band = int(dataline_dict["phs_time_band"][:-1])
dist_band = dataline_dict["phs_dist_band"]
dataline_dict["param_pair"] = (time_band, dist_band)
model_param_name = dataline_model
if dataline_model == "random":
model_param_name += "-" + str(dataline_dict["rand_seed"])
elif dataline_model == "phs":
model_param_name += "-" + "-".join([str(x) for x in dataline_dict["param_pair"]])
if model_param_name not in model_param_names:
model_param_names.append(model_param_name)
# Store dict so they're first sorted by coverage then by model type
datadicts_by_cov_rate[dataline_cov][model_param_name].append(dataline_dict)
return datadicts_by_cov_rate, dates_seen, model_param_names, sorted(cov_rates)
def graphHitRatesOverTime(results_full_path):
datadicts_by_cov_rate, exp_dates, model_names, cov_rates = getDataByCovRate(results_full_path)
for cov_rate in cov_rates:
# Declare figure
print("Declaring figure for graphHitRatesOverTime...")
fig, ax = plt.subplots(figsize=(12,6))
names_for_legend = []
cov_results_all_models = datadicts_by_cov_rate[cov_rate]
num_dates = len(exp_dates)
num_models = len(model_names)
for mn in model_names:
if len(cov_results_all_models[mn]) != num_dates:
print("Error!")
print(f"Model: {mn}")
print(f"Expected number of experiments: {num_dates}")
print(f"Found number of experiments: {len(cov_results_all_models[mn])}")
sys.exit(0)
result_matrix = np.zeros((num_models, num_dates))
for mn_index, mn in enumerate(model_names):
names_for_legend.append(mn)
model_results = cov_results_all_models[mn]
for mr_index, mr in enumerate(model_results):
result_matrix[mn_index, mr_index] = mr["hit_pct"]
for row_num, row in enumerate(result_matrix):
ax.plot(exp_dates, row)
ax.legend(names_for_legend)
ax.tick_params(axis='x', rotation=90)
ax.set_title(f"Hit rates over time, coverage {cov_rate}")
"""
result_matrix = np.zeros((len(all_exp_results[0]), len(all_exp_results)))
for exp_num, exp in enumerate(all_exp_results):
for model_num, model_result in enumerate(exp):
result_matrix[model_num, exp_num] = model_result[0][coverage_cell_index]
for row_num, row in enumerate(result_matrix):
ax.plot(test_data_dates, row + (results_count_offset * row_num) )
names_for_legend.append(all_exp_results[0][row_num][1])
x_axis_size = len(hit_rates_dict[model_names[0]][0])
x_axis_values = np.linspace(0,1,x_axis_size)
print(x_axis_size)
for mn in model_names:
for hr in hit_rates_dict[mn]:
ax.plot(x_axis_values, hr)
for mr in model_runs_list[mn]:
names_for_legend.append(mr)
ax.legend(names_for_legend)
"""
return
"""
Copied snippets from riskModelsCompare
Still working out this section...
"""
def graphCoverageVsHitRate(hit_rates_dict, model_runs_list, model_names):
"""
print(len(hit_rates_dict))
for m in hit_rates_dict:
print(m)
print(len(hit_rates_dict[m]))
print(len(hit_rates_dict[m][0]))
print(len(model_runs_list))
print(model_runs_list)
"""
model_hit_rate_pairs = []
for mn in model_names:
model_hit_rate_pairs += list(zip(model_runs_list[mn], hit_rates_dict[mn]))
#hit_rates_flat += hit_rates_dict[mn]
#model_runs_flat += model_runs_list[mn]
#print(len(hit_rates_flat))
#print(len(model_runs_flat))
print(len(model_hit_rate_pairs))
### DECLARE FIGURE FOR HITRATE/COVERAGE
# !!! I should add an option for the x-axis of the figure!!!
#results_count_offset = .025
#results_rate_offset = .005
#results_count_offset = 0
#results_rate_offset = 0
# new version
# Declare figure
print("Declaring figure for graphCoverageVsHitRate...")
fig, ax = plt.subplots(figsize=(12,6))
names_for_legend = []
x_axis_size = len(hit_rates_dict[model_names[0]][0])
x_axis_values = np.linspace(0,1,x_axis_size)
print(x_axis_size)
for mn in model_names:
for hr in hit_rates_dict[mn]:
ax.plot(x_axis_values, hr)
for mr in model_runs_list[mn]:
names_for_legend.append(mr)
ax.legend(names_for_legend)
return
"""
result_matrix = np.zeros((len(all_exp_results[0]), len(all_exp_results)))
for exp_num, exp in enumerate(all_exp_results):
for model_num, model_result in enumerate(exp):
result_matrix[model_num, exp_num] = model_result[0][coverage_cell_index]
for row_num, row in enumerate(result_matrix):
ax.plot(test_data_dates, row + (results_count_offset * row_num) )
names_for_legend.append(all_exp_results[0][row_num][1])
#ax.legend(names_for_legend)
ax.tick_params(axis='x', rotation=90)
# one of the orig sections from riskModelsCompare
# Declare figure
print("Declaring figure...")
fig, ax = plt.subplots(figsize=(12,6))
names_for_legend = []
result_matrix = np.zeros((len(all_exp_results[0]), len(all_exp_results)))
for exp_num, exp in enumerate(all_exp_results):
for model_num, model_result in enumerate(exp):
result_matrix[model_num, exp_num] = model_result[0][coverage_cell_index]
for row_num, row in enumerate(result_matrix):
ax.plot(test_data_dates, row + (results_count_offset * row_num) )
names_for_legend.append(all_exp_results[0][row_num][1])
#ax.legend(names_for_legend)
ax.tick_params(axis='x', rotation=90)
# Declare figure
print("Declaring figure...")
fig, ax = plt.subplots(figsize=(12,6))
names_for_legend = []
#xcoords = test_data_dates
coverage_rate = 0.10
coverage_cell_index = int(num_cells_region * coverage_rate)-1
print("reg {}".format(num_cells_region))
print("cov {}".format(coverage_rate))
print("cci {}".format(coverage_cell_index))
result_matrix = np.zeros((len(all_exp_results[0]), len(all_exp_results)))
for exp_num, exp in enumerate(all_exp_results):
if test_data_counts[exp_num] == 0:
continue
for model_num, model_result in enumerate(exp):
result_matrix[model_num, exp_num] = \
model_result[0][coverage_cell_index]/test_data_counts[exp_num]
for row_num, row in enumerate(result_matrix):
ax.plot(test_data_dates, row + (results_rate_offset * row_num) )
names_for_legend.append(all_exp_results[0][row_num][1])
#ax.legend(names_for_legend)
ax.tick_params(axis='x', rotation=90)
"""
def main():
datadir = os.path.join("..", "..", "Data")
#results_fname = "results_190515_Chicago_160101_1M_1D.csv"
#results_fname = "results_190517_Chicago_020101_1Y_1D.csv"
#results_fname = "results_190517_Chicago_020101_1Y_3D.csv"
#results_fname = "results_190522_Chicago_020101_1Y_7D.csv"
#results_fname = "results_190621_Chicago_160301_1M_1D.csv"
#results_fname = "results_190621_Chicago_160301_9M_1D.csv"
#results_fname = "temp_results_190621_Chicago_010301_17Y_1D.csv"
#results_fname = "results_190621_Chicago_010301_17Y_1D.csv"
results_fname = "results_190628_Chicago_130101_5Y_1D.csv"
# Only include results of tests later OR EQUAL to this date
earliest_eval_date = np.datetime64("2013-01-01")
# Only include results of tests earlier BUT NOT EQUAL to this date
latest_eval_date = None
results_full_path = os.path.join(datadir, results_fname)
# Keep track of dates seen in the output data
dates_seen = set()
datadicts_by_cov_rate = getDataByCovRate(results_full_path)
# Determine the number of evaluation dates in the data
# We expect this to equal the number of instances of random/naive/ideal
# experiments, and also the number of phs experiments when multiplied by
# the number of phs parameter combinations.
num_dates = len(dates_seen)
print(num_dates)
earliest_date_seen =sorted(dates_seen)[0]
latest_date_seen =sorted(dates_seen)[-1]
print(earliest_date_seen)
print(latest_date_seen)
phsdicts_by_cov_rate = dict([(cov, d["phs"]) for cov, d in datadicts_by_cov_rate.items()])
naivedicts_by_cov_rate = dict([(cov, d["naive"]) for cov, d in datadicts_by_cov_rate.items()])
create_naive_csv_summary = True
if create_naive_csv_summary:
timespan = "1M"
date_today = datetime.date.today()
date_today_str = getSixDigitDate(date_today)
earliest_date_str = getSixDigitDate(earliest_date_seen)
latest_date_str = getSixDigitDate(latest_date_seen)
sumcsv_base = f"ratesummary_xsr_nai_{date_today_str}_{earliest_date_str}_{latest_date_str}_{timespan}.csv"
sumcsvname = os.path.join(datadir, sumcsv_base)
writeModelSummaryCsv(naivedicts_by_cov_rate, timespan, "naive", csvname=sumcsvname)
sys.exit(0)
create_phs_csv_summary = False
if create_phs_csv_summary:
timespan = "1M"
date_today = datetime.date.today()
date_today_str = getSixDigitDate(date_today)
earliest_date_str = getSixDigitDate(earliest_date_seen)
latest_date_str = getSixDigitDate(latest_date_seen)
phssumcsv_base = f"ratesummary_xsr_phs_{date_today_str}_{earliest_date_str}_{latest_date_str}_{timespan}.csv"
phssumcsvname = os.path.join(datadir, phssumcsv_base)
#writePhsSummaryCsv(phs_list, timespan, csvname=phssumcsvname)
writeModelSummaryCsv(phsdicts_by_cov_rate, timespan, "phs", csvname=phssumcsvname)
sys.exit(0)
create_phs_csv_var = True
if create_phs_csv_var:
| |
ugsu = unique_game[s][u]
mrg = ugsu[-1]
game_problems = {}
for prevreport in ugsu[:-1]:
problems = set()
if mrg.is_inconsistent(prevreport, problems):
game_problems.setdefault(mrg, set()).update(problems)
if not game_problems:
self.gamesxref[mrg] = None
if mrg.section in self.matches:
for umkey in self.matches[mrg.section]:
if (
umkey[0] != mrg.hometeam
or umkey[1] != mrg.awayteam
):
continue
match = self.matches[mrg.section][umkey][-1]
done = False
for game in match.games:
if game.result != tobereported:
continue
if (
game.homeplayer != mrg.homeplayer
or game.awayplayer != mrg.awayplayer
):
continue
if game not in self.gamesxref:
self.gamesxref[game] = mrg
self.gamesxref[mrg] = game
done = True
break
self.gamesxref[mrg] = False
if done:
break
else:
sect = []
if isinstance(mrg.section, (str, bytes)):
sect.append(mrg.section)
else:
for w in mrg.section:
sect.append(w)
self.reports.error.append(
(
" ".join(
(
"Inconsistent reports for",
" ".join(sect),
"game.",
)
),
self.reports,
)
)
mrg.tagger.append_generated_report(
self.reports.error,
" Most recent report:",
)
mrg.tagger.append_generated_report(
self.reports.error,
" ".join(
(
" ",
nullstring(mrg.hometeam),
"-",
nullstring(mrg.awayteam),
" ",
nullstring(mrg.homeplayer),
mrg.get_print_result()[0],
nullstring(mrg.awayplayer),
)
),
)
prevreport.tagger.append_generated_report(
self.reports.error, " Earlier report:"
)
prevreport.tagger.append_generated_report(
self.reports.error,
" ".join(
(
" ",
nullstring(prevreport.hometeam),
"-",
nullstring(prevreport.awayteam),
" ",
nullstring(prevreport.homeplayer),
prevreport.get_print_result()[0],
nullstring(prevreport.awayplayer),
)
),
)
self.reports.error.append(("", self.reports))
# get_finished_games copied from slcollation.
# The pdlcollation version is identical.
def get_finished_games(self):
"""Return list of finished games"""
finished = []
for section in self.finishedgames:
for ugkey in self.finishedgames[section]:
finished.append(self.finishedgames[section][ugkey][-1])
return finished
# The methods from here on are copied from Collation.
# Added later: I think this means 'from gameobjects.Collation' which has
# been deleted: see comment at bottom of gameobjects.py
# Changed to populate er_results from er_matchresults
def collate_matches(self, reports, schedule):
"""Collate results in matchrecords with expected results in schedule
Match score inconsistent with game scores is reported as an error when
the condition occurs on an earlier report: the condition is accepted on
the most recent report and noted in the validation report.
There are several distinct steps:
Collect match report by teams in match taking a source dependent tag
into account to deal with possible duplicate reports.
Check that any duplicate reports are consistent.
Cross-refernce reports with the schedule.
Produce report of errors and inconsistencies that may, or may not, be
deemed errors.
"""
def nullstring(s):
if isinstance(s, str):
return s
else:
return ""
matchrecords = reports.er_matchresults
self.matches.clear()
self.matchesxref.clear()
unique_match = self.matches
for e, f, match in sorted(
[(m.order, m.source, m) for m in matchrecords]
):
if match.competition not in unique_match:
unique_match[match.competition] = dict()
umkey = (match.hometeam, match.awayteam, match.source)
if umkey not in unique_match[match.competition]:
unique_match[match.competition][umkey] = [match]
else:
unique_match[match.competition][umkey].append(match)
# Assume fixtures have a date and match reports either have a date or
# or the matches are reported in fixture list date order.
# MatchFixture is unorderable so decorate to sort.
fixtures = sorted(
[(f.date, e, f) for e, f in enumerate(schedule.es_fixtures)]
)
for s in unique_match:
teamalias = schedule.es_team_alias.get(s, {})
for u in sorted(unique_match[s]):
umsu = unique_match[s][u]
mrm = umsu[-1]
authorizor = _MatchAuthorization(mrm)
authorizor.authorize_match_report(mrm)
match_problems = {}
# This condition is reported later, as a warning, when earlier
# reports are present.
if len(umsu) == 1:
if not mrm.get_unfinished_games_and_score_consistency()[1]:
match_problems.setdefault(ONLY_REPORT)
for pmr in umsu[:-1]:
authorizor.authorize_match_report(pmr)
# Not really sure if this should be reported as an error
# for earlier reports because the consistency of each game
# with the most recent report is enough: but changing a
# match score without getting an error may be a surprise.
if not pmr.get_unfinished_games_and_score_consistency()[1]:
match_problems.setdefault(MATCH_SCORE)
if len(pmr.games) != len(mrm.games):
match_problems.setdefault(GAME_COUNT)
continue
for mrmg, prg in zip(mrm.games, pmr.games):
problems = set()
mrmg.is_inconsistent(prg, problems)
if problems:
match_problems.setdefault(mrmg, set()).update(
problems
)
if not authorizor.is_match_authorized():
match_problems.setdefault(AUTHORIZATION)
if not match_problems:
self.matchesxref[mrm] = None
hometeam = teamalias.get(mrm.hometeam, {mrm.hometeam: {}})
awayteam = teamalias.get(mrm.awayteam, {mrm.awayteam: {}})
for df, ef, fixture in fixtures:
if mrm.competition == fixture.competition:
if fixture.hometeam in hometeam:
if fixture.awayteam in awayteam:
if fixture not in self.matchesxref:
self.matchesxref[fixture] = mrm
self.matchesxref[mrm] = fixture
if not mrm.date:
mrm.date = fixture.date
break
self.matchesxref[mrm] = False
self.games[(s, u)] = mrm
# Add matches which are consistent to er_results
reports.set_match_result(mrm)
else:
rep = ["Inconsistent reports for"]
if isinstance(mrm.competition, str):
rep.append(mrm.competition)
else:
sect = []
for e in mrm.competition:
if isinstance(e, str):
sect.append(e)
else:
for w in e:
sect.append(w)
rep.append(" ".join(sect))
rnd = nullstring(mrm.round)
if rnd:
rep.append("Round")
rep.append(rnd)
rep.append("match:")
self.reports.error.append((" ".join(rep), self.reports))
self.reports.error.append(
(
" ".join(
(
" ",
mrm.hometeam,
"".join(
(
nullstring(mrm.homescore),
"-",
nullstring(mrm.awayscore),
)
),
mrm.awayteam,
" ",
mrm.source,
)
),
self.reports,
)
)
self.reports.error.append(
(" Error detail:", self.reports)
)
mp = {
k: v
for k, v in match_problems.items()
if not isinstance(k, MatchGame)
}
if mp:
for k in mp:
match_problems.pop(k, None)
self.reports.error.append(
(
" ".join(
(
" ",
", ".join([e for e in sorted(mp)]),
)
),
self.reports,
)
)
for g, d in match_problems.items():
self.reports.error.append(
(
" ".join(
(
" ",
nullstring(g.board),
nullstring(g.homeplayer.name),
g.get_print_result()[0],
nullstring(g.awayplayer.name),
" **",
", ".join([e for e in sorted(d)]),
)
),
self.reports,
)
)
mrm.tagger.append_generated_report(
self.reports.error, " Most recent report:"
)
for g in mrm.games:
g.tagger.append_generated_report(
self.reports.error,
" ".join(
(
" ",
nullstring(g.board),
nullstring(g.homeplayer.name),
g.get_print_result()[0],
nullstring(g.awayplayer.name),
)
),
)
for m in umsu[:-1]:
games = m.games
m.tagger.append_generated_report(
self.reports.error, " Earlier report:"
)
for g in games:
g.tagger.append_generated_report(
self.reports.error,
" ".join(
(
" ",
nullstring(g.board),
nullstring(g.homeplayer.name),
g.get_print_result()[0],
nullstring(g.awayplayer.name),
)
),
)
self.reports.error.append(("", self.reports))
fnp = [
(
f.competition,
len(f.tagger.datatag),
f.tagger.datatag,
f.tagger.teamone,
f.tagger.teamtwo,
e,
f,
)
for e, f in enumerate(schedule.es_fixtures)
if f not in self.matchesxref
]
self.fixturesnotplayed = [f[-1] for f in sorted(fnp)]
def collate_players(self, schedule):
"""Unify and complete player references used in games.
For each unique player identity there is likely to be several Player
instances used in Game instances. Pick one of the Player instances
and map all Game references to it.
Event and club details were not available when the Player instances
were created. Amend the instances still referenced by Game instances.
Add each Player instance to the dictionary of player identities with
games in this event.
Generate data for player reports.
"""
players = dict()
teamclub = dict()
identities = dict()
# pick one of the player instances for an identity and use it in
# all places for that identity
for section in self.matches:
for umkey in self.matches[section]:
for match in self.matches[section][umkey]:
if match.hometeam not in teamclub:
teamclub[match.hometeam] = schedule.get_club_team(
section, match.hometeam
)
if match.awayteam not in teamclub:
teamclub[match.awayteam] = schedule.get_club_team(
section, match.awayteam
)
for game in match.games:
for player in (game.homeplayer, game.awayplayer):
if player:
identity = (
player.name,
player.event,
schedule.es_startdate,
schedule.es_enddate,
teamclub[player.club],
)
if identity not in players:
players[identity] = player
gpi = player.get_player_identity()
if gpi not in identities:
identities[gpi] = identity
if player is not players[identity]:
if player is game.homeplayer:
game.homeplayer = players[
identities[gpi]
]
elif player is game.awayplayer:
game.awayplayer = players[
identities[gpi]
]
# complete the player identities by adding in event and club details
for p in players:
player = players[p]
player.startdate = schedule.es_startdate
player.enddate = schedule.es_enddate
player.club = teamclub[player.club]
player.affiliation = player.club
player.__dict__["_identity"] = (
player.name,
player.event,
player.startdate,
player.enddate,
player.club,
)
self.set_player(player)
# Generate data for player reports.
for section in self.matches:
for umkey in self.matches[section]:
match = self.matches[section][umkey][-1]
homet = match.hometeam
if homet not in self.teamplayers:
self.teamplayers[homet] = dict()
tph = self.teamplayers[homet]
homec = schedule.get_club_team(section, homet)
if homec not in self.clubplayers:
self.clubplayers[homec] = dict()
cph = self.clubplayers[homec]
awayt = match.awayteam
if awayt not in self.teamplayers:
self.teamplayers[awayt] = dict()
tpa = self.teamplayers[awayt]
awayc = schedule.get_club_team(section, awayt)
if awayc not in self.clubplayers:
self.clubplayers[awayc] = dict()
cpa = self.clubplayers[awayc]
for game in match.games:
if game.homeplayer:
homep = game.homeplayer.get_player_identity()
if homep not in tph:
tph[homep] = [match]
else:
tph[homep].append(match)
if homep not in cph:
cph[homep] = set(game.homeplayer.reported_codes)
else:
cph[homep].update(game.homeplayer.reported_codes)
if game.awayplayer:
awayp = game.awayplayer.get_player_identity()
if awayp not in tpa:
tpa[awayp] = [match]
else:
tpa[awayp].append(match)
if awayp not in cpa:
cpa[awayp] = set(game.awayplayer.reported_codes)
else:
cpa[awayp].update(game.awayplayer.reported_codes)
def get_fixtures_not_played(self):
"""Return list of fixtures not played"""
return self.fixturesnotplayed
def get_fixtures_played(self):
"""Return list of fixtures played"""
return [f for f in self.matchesxref if isinstance(f, MatchFixture)]
def get_non_fixtures_played(self):
"""Return list of matches played that | |
88.2),
('b', 1, 23.3),
('c', 8, 42.0),
('d', 7, 100.9),
('c', 2))
actual = rowselect(table, lambda rec: rec['foo'] == 'a')
expect = (('foo', 'bar', 'baz'),
('a', 4, 9.3),
('a', 2, 88.2))
ieq(expect, actual)
ieq(expect, actual) # check can iterate twice
def test_selectre():
table = (('foo', 'bar', 'baz'),
('aa', 4, 9.3),
('aaa', 2, 88.2),
('b', 1, 23.3),
('ccc', 8, 42.0),
('bb', 7, 100.9),
('c', 2))
actual = selectre(table, 'foo', '[ab]{2}')
expect = (('foo', 'bar', 'baz'),
('aa', 4, 9.3),
('aaa', 2, 88.2),
('bb', 7, 100.9))
ieq(expect, actual)
ieq(expect, actual)
def test_fieldmap():
table = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, 'female', 17, 1.78, 74.4),
(4, 'male', 21, 1.33, 45.2),
(5, '-', 25, 1.65, 51.9))
mappings = OrderedDict()
mappings['subject_id'] = 'id'
mappings['gender'] = 'sex', {'male': 'M', 'female': 'F'}
mappings['age_months'] = 'age', lambda v: v * 12
mappings['bmi'] = lambda rec: rec['weight'] / rec['height']**2
actual = fieldmap(table, mappings)
expect = (('subject_id', 'gender', 'age_months', 'bmi'),
(1, 'M', 16*12, 62.0/1.45**2),
(2, 'F', 19*12, 55.4/1.34**2),
(3, 'F', 17*12, 74.4/1.78**2),
(4, 'M', 21*12, 45.2/1.33**2),
(5, '-', 25*12, 51.9/1.65**2))
ieq(expect, actual)
ieq(expect, actual) # can iteratate twice?
# do it with suffix
actual = fieldmap(table)
actual['subject_id'] = 'id'
actual['gender'] = 'sex', {'male': 'M', 'female': 'F'}
actual['age_months'] = 'age', lambda v: v * 12
actual['bmi'] = '{weight} / {height}**2'
ieq(expect, actual)
# test short rows
table2 = (('id', 'sex', 'age', 'height', 'weight'),
(1, 'male', 16, 1.45, 62.0),
(2, 'female', 19, 1.34, 55.4),
(3, 'female', 17, 1.78, 74.4),
(4, 'male', 21, 1.33, 45.2),
(5, '-', 25, 1.65))
expect = (('subject_id', 'gender', 'age_months', 'bmi'),
(1, 'M', 16*12, 62.0/1.45**2),
(2, 'F', 19*12, 55.4/1.34**2),
(3, 'F', 17*12, 74.4/1.78**2),
(4, 'M', 21*12, 45.2/1.33**2),
(5, '-', 25*12, None))
actual = fieldmap(table2, mappings)
ieq(expect, actual)
def test_fieldmap_empty():
table = (('foo', 'bar'),)
expect = (('foo', 'baz'),)
mappings = OrderedDict()
mappings['foo'] = 'foo'
mappings['baz'] = 'bar', lambda v: v*2
actual = fieldmap(table, mappings)
ieq(expect, actual)
def test_facet():
table = (('foo', 'bar', 'baz'),
('a', 4, 9.3),
('a', 2, 88.2),
('b', 1, 23.3),
('c', 8, 42.0),
('d', 7, 100.9),
('c', 2))
fct = facet(table, 'foo')
assert set(fct.keys()) == set(['a', 'b', 'c', 'd'])
expect_fcta = (('foo', 'bar', 'baz'),
('a', 4, 9.3),
('a', 2, 88.2))
ieq(fct['a'], expect_fcta)
ieq(fct['a'], expect_fcta) # check can iterate twice
expect_fctc = (('foo', 'bar', 'baz'),
('c', 8, 42.0),
('c', 2))
ieq(fct['c'], expect_fctc)
ieq(fct['c'], expect_fctc) # check can iterate twice
def test_facet_2():
table = (('foo', 'bar', 'baz'),
('aa', 4, 9.3),
('aa', 2, 88.2),
('bb', 1, 23.3),
('cc', 8, 42.0),
('dd', 7, 100.9),
('cc', 2))
fct = facet(table, 'foo')
assert set(fct.keys()) == set(['aa', 'bb', 'cc', 'dd'])
expect_fcta = (('foo', 'bar', 'baz'),
('aa', 4, 9.3),
('aa', 2, 88.2))
ieq(fct['aa'], expect_fcta)
ieq(fct['aa'], expect_fcta) # check can iterate twice
expect_fctc = (('foo', 'bar', 'baz'),
('cc', 8, 42.0),
('cc', 2))
ieq(fct['cc'], expect_fctc)
ieq(fct['cc'], expect_fctc) # check can iterate twice
def test_facet_empty():
table = (('foo', 'bar'),)
actual = facet(table, 'foo')
eq_(list(), list(actual.keys()))
def test_rangefacet():
table1 = (('foo', 'bar'),
('a', 3),
('a', 7),
('b', 2),
('b', 1),
('b', 9),
('c', 4),
('d', 3))
rf = rangefacet(table1, 'bar', 2)
eq_([(1, 3), (3, 5), (5, 7), (7, 9)], list(rf.keys()))
expect_13 = (('foo', 'bar'),
('b', 2),
('b', 1)) # N.B., it get's sorted
ieq(expect_13, rf[(1, 3)])
ieq(expect_13, rf[(1, 3)])
expect_79 = (('foo', 'bar'),
('a', 7),
('b', 9))
ieq(expect_79, rf[(7, 9)])
def test_rowreduce():
table1 = (('foo', 'bar'),
('a', 3),
('a', 7),
('b', 2),
('b', 1),
('b', 9),
('c', 4))
def sumbar(key, rows):
return [key, sum(row[1] for row in rows)]
table2 = rowreduce(table1, key='foo', reducer=sumbar, fields=['foo', 'barsum'])
expect2 = (('foo', 'barsum'),
('a', 10),
('b', 12),
('c', 4))
ieq(expect2, table2)
def test_rowreduce_fieldnameaccess():
table1 = (('foo', 'bar'),
('a', 3),
('a', 7),
('b', 2),
('b', 1),
('b', 9),
('c', 4))
def sumbar(key, records):
return [key, sum([rec['bar'] for rec in records])]
table2 = rowreduce(table1, key='foo', reducer=sumbar, fields=['foo', 'barsum'])
expect2 = (('foo', 'barsum'),
('a', 10),
('b', 12),
('c', 4))
ieq(expect2, table2)
def test_rowreduce_more():
table1 = (('foo', 'bar'),
('aa', 3),
('aa', 7),
('bb', 2),
('bb', 1),
('bb', 9),
('cc', 4))
def sumbar(key, records):
return [key, sum(rec['bar'] for rec in records)]
table2 = rowreduce(table1, key='foo', reducer=sumbar, fields=['foo', 'barsum'])
expect2 = (('foo', 'barsum'),
('aa', 10),
('bb', 12),
('cc', 4))
ieq(expect2, table2)
def test_rowreduce_empty():
table = (('foo', 'bar'),)
expect = (('foo', 'bar'),)
reducer = lambda key, rows: (key, [r[0] for r in rows])
actual = rowreduce(table, key='foo', reducer=reducer, fields=('foo', 'bar'))
ieq(expect, actual)
def test_rangerowreduce():
table1 = (('foo', 'bar'),
('a', 3),
('a', 7),
('b', 2),
('b', 1),
('b', 9),
('c', 4))
def redu(key, rows):
return [key[0], key[1], ''.join([row[0] for row in rows])]
table2 = rangerowreduce(table1, 'bar', 2, reducer=redu,
fields=['minbar', 'maxbar', 'foos'])
expect2 = (('minbar', 'maxbar', 'foos'),
(1, 3, 'bb'),
(3, 5, 'ac'),
(5, 7, ''),
(7, 9, 'a'),
(9, 11, 'b'))
ieq(expect2, table2)
ieq(expect2, table2)
def test_rangerowreduce_fieldnameaccess():
table1 = (('foo', 'bar'),
('a', 3),
('a', 7),
('b', 2),
('b', 1),
('b', 9),
('c', 4))
def redu(key, recs):
return [key[0], key[1], ''.join([rec['foo'] for rec in recs])]
table2 = rangerowreduce(table1, 'bar', 2, reducer=redu,
fields=['minbar', 'maxbar', 'foos'])
expect2 = (('minbar', 'maxbar', 'foos'),
(1, 3, 'bb'),
(3, 5, 'ac'),
(5, 7, ''),
(7, 9, 'a'),
(9, 11, 'b'))
ieq(expect2, table2)
ieq(expect2, table2)
def test_aggregate_simple():
table1 = (('foo', 'bar', 'baz'),
('a', 3, True),
('a', 7, False),
('b', 2, True),
('b', 2, False),
('b', 9, False),
('c', 4, True))
# simplest signature - aggregate whole rows
table2 = aggregate(table1, 'foo', len)
expect2 = (('key', 'value'),
('a', 2),
('b', 3),
('c', 1))
ieq(expect2, table2)
ieq(expect2, table2)
# next simplest signature - aggregate single field
table3 = aggregate(table1, 'foo', sum, 'bar')
expect3 = (('key', 'value'),
('a', 10),
('b', 13),
('c', 4))
ieq(expect3, table3)
ieq(expect3, table3)
# alternative signature for simple aggregation
table4 = aggregate(table1, key=('foo', 'bar'), aggregation=list, value=('bar', 'baz'))
expect4 = (('key', 'value'),
(('a', 3), [(3, True)]),
(('a', 7), [(7, False)]),
(('b', 2), [(2, True), (2, False)]),
(('b', 9), [(9, False)]),
(('c', 4), [(4, True)]))
ieq(expect4, table4)
ieq(expect4, table4)
def test_aggregate_multifield():
table1 = (('foo', 'bar'),
('a', 3),
('a', 7),
('b', 2),
('b', 1),
('b', 9),
('c', 4))
# dict arg
aggregators = OrderedDict()
aggregators['count'] = len
aggregators['minbar'] = 'bar', min
aggregators['maxbar'] = 'bar', max
aggregators['sumbar'] = 'bar', sum
aggregators['listbar'] = 'bar', list
aggregators['bars'] = 'bar', strjoin(', ')
table2 = aggregate(table1, 'foo', aggregators)
expect2 = (('key', 'count', 'minbar', 'maxbar', 'sumbar', 'listbar', 'bars'),
('a', 2, 3, 7, 10, [3, 7], '3, 7'),
('b', 3, 1, 9, 12, [2, 1, 9], '2, 1, 9'),
('c', 1, 4, 4, 4, [4], '4'))
ieq(expect2, table2)
ieq(expect2, table2) # check can iterate twice
# use suffix notation
table3 = aggregate(table1, 'foo')
table3['count'] = len
table3['minbar'] = 'bar', min
table3['maxbar'] = 'bar', max
table3['sumbar'] = 'bar', sum
table3['listbar'] = 'bar' # default aggregation is list
table3['bars'] = 'bar', strjoin(', ')
ieq(expect2, table3)
# list arg
aggregators = [('count', len),
('minbar', 'bar', min),
('maxbar', 'bar', max),
('sumbar', 'bar', sum),
('listbar', 'bar', list),
('bars', 'bar', strjoin(', '))]
table4 = aggregate(table1, 'foo', aggregators)
ieq(expect2, table4)
ieq(expect2, table4) # check can iterate twice
def test_aggregate_more():
table1 = (('foo', 'bar'),
('aa', 3),
('aa', 7),
('bb', 2),
('bb', 1),
('bb', 9),
('cc', 4),
('dd', 3))
aggregators = OrderedDict()
aggregators['minbar'] = 'bar', min
aggregators['maxbar'] = 'bar', max
aggregators['sumbar'] = 'bar', sum
aggregators['listbar'] = 'bar' # default aggregation is list
aggregators['bars'] = 'bar', strjoin(', ')
table2 = aggregate(table1, 'foo', aggregators)
expect2 = (('key', 'minbar', 'maxbar', 'sumbar', 'listbar', 'bars'),
('aa', 3, 7, 10, [3, 7], '3, 7'),
('bb', 1, 9, 12, [2, 1, 9], '2, 1, 9'),
('cc', 4, 4, 4, [4], '4'),
('dd', 3, 3, 3, [3], '3'))
ieq(expect2, table2)
ieq(expect2, table2) # check can iterate twice
table3 = aggregate(table1, 'foo')
table3['minbar'] = 'bar', min
table3['maxbar'] = 'bar', max
table3['sumbar'] = 'bar', | |
= pose_bones
for i, pmx_bone in sorted(enumerate(pmxModel.bones), key=lambda x: x[1].transform_order):
b_bone = pose_bones[i]
mmd_bone = b_bone.mmd_bone
mmd_bone.name_j = b_bone.name #pmx_bone.name
mmd_bone.name_e = pmx_bone.name_e
mmd_bone.is_controllable = pmx_bone.isControllable
mmd_bone.transform_order = pmx_bone.transform_order
mmd_bone.transform_after_dynamics = pmx_bone.transAfterPhis
if pmx_bone.displayConnection == -1 or pmx_bone.displayConnection == [0.0, 0.0, 0.0]:
mmd_bone.is_tip = True
logging.debug('bone %s is a tip bone', pmx_bone.name)
elif b_bone.name in specialTipBones:
mmd_bone.is_tip = True
logging.debug('bone %s is a special tip bone. DisplayConnection: %s', pmx_bone.name, str(pmx_bone.displayConnection))
elif not isinstance(pmx_bone.displayConnection, int):
logging.debug('bone %s is using a vector tail', pmx_bone.name)
else:
logging.debug('bone %s is not using a vector tail and is not a tip bone. DisplayConnection: %s',
pmx_bone.name, str(pmx_bone.displayConnection))
b_bone.bone.hide = not pmx_bone.visible #or mmd_bone.is_tip
if not pmx_bone.isRotatable:
b_bone.lock_rotation = [True, True, True]
if not pmx_bone.isMovable:
b_bone.lock_location = [True, True, True]
if pmx_bone.isIK:
if pmx_bone.target != -1:
self.__applyIk(i, pmx_bone, pose_bones)
if pmx_bone.hasAdditionalRotate or pmx_bone.hasAdditionalLocation:
bone_index, influ = pmx_bone.additionalTransform
mmd_bone.has_additional_rotation = pmx_bone.hasAdditionalRotate
mmd_bone.has_additional_location = pmx_bone.hasAdditionalLocation
mmd_bone.additional_transform_influence = influ
if 0 <= bone_index < len(pose_bones):
mmd_bone.additional_transform_bone = pose_bones[bone_index].name
if pmx_bone.localCoordinate is not None:
mmd_bone.enabled_local_axes = True
mmd_bone.local_axis_x = pmx_bone.localCoordinate.x_axis
mmd_bone.local_axis_z = pmx_bone.localCoordinate.z_axis
if pmx_bone.axis is not None:
mmd_bone.enabled_fixed_axis = True
mmd_bone.fixed_axis = pmx_bone.axis
if not self.__apply_bone_fixed_axis and mmd_bone.is_tip:
b_bone.lock_rotation = [True, False, True]
b_bone.lock_location = [True, True, True]
b_bone.lock_scale = [True, True, True]
def __importRigids(self):
start_time = time.time()
self.__rigidTable = {}
rigid_pool = self.__rig.createRigidBodyPool(len(self.__model.rigids))
for i, (rigid, rigid_obj) in enumerate(zip(self.__model.rigids, rigid_pool)):
loc = Vector(rigid.location).xzy * self.__scale
rot = Vector(rigid.rotation).xzy * -1
size = Vector(rigid.size).xzy if rigid.type == pmx.Rigid.TYPE_BOX else Vector(rigid.size)
obj = self.__rig.createRigidBody(
obj = rigid_obj,
name = rigid.name,
name_e = rigid.name_e,
shape_type = rigid.type,
dynamics_type = rigid.mode,
location = loc,
rotation = rot,
size = size * self.__scale,
collision_group_number = rigid.collision_group_number,
collision_group_mask = [rigid.collision_group_mask & (1<<i) == 0 for i in range(16)],
arm_obj = self.__armObj,
mass=rigid.mass,
friction = rigid.friction,
angular_damping = rigid.rotation_attenuation,
linear_damping = rigid.velocity_attenuation,
bounce = rigid.bounce,
bone = None if rigid.bone == -1 or rigid.bone is None else self.__boneTable[rigid.bone].name,
)
obj.hide = True
MoveObject.set_index(obj, i)
self.__rigidTable[i] = obj
logging.debug('Finished importing rigid bodies in %f seconds.', time.time() - start_time)
def __importJoints(self):
start_time = time.time()
joint_pool = self.__rig.createJointPool(len(self.__model.joints))
for i, (joint, joint_obj) in enumerate(zip(self.__model.joints, joint_pool)):
loc = Vector(joint.location).xzy * self.__scale
rot = Vector(joint.rotation).xzy * -1
obj = self.__rig.createJoint(
obj = joint_obj,
name = joint.name,
name_e = joint.name_e,
location = loc,
rotation = rot,
rigid_a = self.__rigidTable.get(joint.src_rigid, None),
rigid_b = self.__rigidTable.get(joint.dest_rigid, None),
maximum_location = Vector(joint.maximum_location).xzy * self.__scale,
minimum_location = Vector(joint.minimum_location).xzy * self.__scale,
maximum_rotation = Vector(joint.minimum_rotation).xzy * -1,
minimum_rotation = Vector(joint.maximum_rotation).xzy * -1,
spring_linear = Vector(joint.spring_constant).xzy,
spring_angular = Vector(joint.spring_rotation_constant).xzy,
)
obj.hide = True
MoveObject.set_index(obj, i)
logging.debug('Finished importing joints in %f seconds.', time.time() - start_time)
def __importMaterials(self):
self.__importTextures()
pmxModel = self.__model
self.__materialFaceCountTable = []
for i in pmxModel.materials:
mat = bpy.data.materials.new(name=i.name)
self.__materialTable.append(mat)
mmd_mat = mat.mmd_material
mmd_mat.name_j = i.name
mmd_mat.name_e = i.name_e
mmd_mat.ambient_color = i.ambient
mmd_mat.diffuse_color = i.diffuse[0:3]
mmd_mat.alpha = i.diffuse[3]
mmd_mat.specular_color = i.specular
mmd_mat.shininess = i.shininess
mmd_mat.is_double_sided = i.is_double_sided
mmd_mat.enabled_drop_shadow = i.enabled_drop_shadow
mmd_mat.enabled_self_shadow_map = i.enabled_self_shadow_map
mmd_mat.enabled_self_shadow = i.enabled_self_shadow
mmd_mat.enabled_toon_edge = i.enabled_toon_edge
mmd_mat.edge_color = i.edge_color
mmd_mat.edge_weight = i.edge_size
mmd_mat.sphere_texture_type = str(i.sphere_texture_mode)
if i.is_shared_toon_texture:
mmd_mat.is_shared_toon_texture = True
mmd_mat.shared_toon_texture = i.toon_texture
else:
mmd_mat.is_shared_toon_texture = False
if i.toon_texture >= 0:
mmd_mat.toon_texture = self.__textureTable[i.toon_texture]
else:
mmd_mat.toon_texture = ''
mmd_mat.comment = i.comment
self.__materialFaceCountTable.append(int(i.vertex_count/3))
self.__meshObj.data.materials.append(mat)
fnMat = FnMaterial(mat)
if i.texture != -1:
texture_slot = fnMat.create_texture(self.__textureTable[i.texture])
texture_slot.texture.use_mipmap = self.__use_mipmap
self.__imageTable[len(self.__materialTable)-1] = texture_slot.texture.image
if i.sphere_texture_mode == 2:
amount = self.__spa_blend_factor
else:
amount = self.__sph_blend_factor
if i.sphere_texture != -1 and amount != 0.0:
texture_slot = fnMat.create_sphere_texture(self.__textureTable[i.sphere_texture])
texture_slot.diffuse_color_factor = amount
if i.sphere_texture_mode == 3 and getattr(pmxModel.header, 'additional_uvs', 0):
texture_slot.uv_layer = 'UV1' # for SubTexture
mmd_mat.sphere_texture_type = mmd_mat.sphere_texture_type # re-update
def __importFaces(self):
pmxModel = self.__model
mesh = self.__meshObj.data
vertex_map = self.__vertex_map
loop_indices_orig = tuple(i for f in pmxModel.faces for i in f)
loop_indices = tuple(vertex_map[i][1] for i in loop_indices_orig) if vertex_map else loop_indices_orig
material_indices = tuple(i for i, c in enumerate(self.__materialFaceCountTable) for x in range(c))
mesh.loops.add(len(pmxModel.faces)*3)
mesh.loops.foreach_set('vertex_index', loop_indices)
mesh.polygons.add(len(pmxModel.faces))
mesh.polygons.foreach_set('loop_start', tuple(range(0, len(mesh.loops), 3)))
mesh.polygons.foreach_set('loop_total', (3,)*len(pmxModel.faces))
mesh.polygons.foreach_set('use_smooth', (True,)*len(pmxModel.faces))
mesh.polygons.foreach_set('material_index', material_indices)
uv_textures, uv_layers = getattr(mesh, 'uv_textures', mesh.uv_layers), mesh.uv_layers
uv_tex = uv_textures.new()
uv_layer = uv_layers[uv_tex.name]
uv_table = {vi:self.flipUV_V(v.uv) for vi, v in enumerate(pmxModel.vertices)}
uv_layer.data.foreach_set('uv', tuple(v for i in loop_indices_orig for v in uv_table[i]))
if uv_textures is not uv_layers:
for bf, mi in zip(uv_tex.data, material_indices):
bf.image = self.__imageTable.get(mi, None)
if pmxModel.header and pmxModel.header.additional_uvs:
logging.info('Importing %d additional uvs', pmxModel.header.additional_uvs)
zw_data_map = collections.OrderedDict()
split_uvzw = lambda uvi: (self.flipUV_V(uvi[:2]), uvi[2:])
for i in range(pmxModel.header.additional_uvs):
add_uv = uv_layers[uv_textures.new(name='UV'+str(i+1)).name]
logging.info(' - %s...(uv channels)', add_uv.name)
uv_table = {vi:split_uvzw(v.additional_uvs[i]) for vi, v in enumerate(pmxModel.vertices)}
add_uv.data.foreach_set('uv', tuple(v for i in loop_indices_orig for v in uv_table[i][0]))
if not any(any(s[1]) for s in uv_table.values()):
logging.info('\t- zw are all zeros: %s', add_uv.name)
else:
zw_data_map['_'+add_uv.name] = {k:self.flipUV_V(v[1]) for k, v in uv_table.items()}
for name, zw_table in zw_data_map.items():
logging.info(' - %s...(zw channels of %s)', name, name[1:])
add_zw = uv_textures.new(name=name)
if add_zw is None:
logging.warning('\t* Lost zw channels')
continue
add_zw = uv_layers[add_zw.name]
add_zw.data.foreach_set('uv', tuple(v for i in loop_indices_orig for v in zw_table[i]))
def __importVertexMorphs(self):
mmd_root = self.__root.mmd_root
categories = self.CATEGORIES
self.__createBasisShapeKey()
for morph in (x for x in self.__model.morphs if isinstance(x, pmx.VertexMorph)):
shapeKey = self.__meshObj.shape_key_add(name=morph.name)
vtx_morph = mmd_root.vertex_morphs.add()
vtx_morph.name = morph.name
vtx_morph.name_e = morph.name_e
vtx_morph.category = categories.get(morph.category, 'OTHER')
for md in morph.offsets:
shapeKeyPoint = shapeKey.data[md.index]
shapeKeyPoint.co += Vector(md.offset).xzy * self.__scale
def __importMaterialMorphs(self):
mmd_root = self.__root.mmd_root
categories = self.CATEGORIES
for morph in (x for x in self.__model.morphs if isinstance(x, pmx.MaterialMorph)):
mat_morph = mmd_root.material_morphs.add()
mat_morph.name = morph.name
mat_morph.name_e = morph.name_e
mat_morph.category = categories.get(morph.category, 'OTHER')
for morph_data in morph.offsets:
data = mat_morph.data.add()
data.related_mesh = self.__meshObj.data.name
if 0 <= morph_data.index < len(self.__materialTable):
data.material = self.__materialTable[morph_data.index].name
data.offset_type = ['MULT', 'ADD'][morph_data.offset_type]
data.diffuse_color = morph_data.diffuse_offset
data.specular_color = morph_data.specular_offset
data.shininess = morph_data.shininess_offset
data.ambient_color = morph_data.ambient_offset
data.edge_color = morph_data.edge_color_offset
data.edge_weight = morph_data.edge_size_offset
data.texture_factor = morph_data.texture_factor
data.sphere_texture_factor = morph_data.sphere_texture_factor
data.toon_texture_factor = morph_data.toon_texture_factor
def __importBoneMorphs(self):
mmd_root = self.__root.mmd_root
categories = self.CATEGORIES
for morph in (x for x in self.__model.morphs if isinstance(x, pmx.BoneMorph)):
bone_morph = mmd_root.bone_morphs.add()
bone_morph.name = morph.name
bone_morph.name_e = morph.name_e
bone_morph.category = categories.get(morph.category, 'OTHER')
for morph_data in morph.offsets:
if not (0 <= morph_data.index < len(self.__boneTable)):
continue
data = bone_morph.data.add()
bl_bone = self.__boneTable[morph_data.index]
data.bone = bl_bone.name
converter = BoneConverter(bl_bone, self.__scale)
data.location = converter.convert_location(morph_data.location_offset)
data.rotation = converter.convert_rotation(morph_data.rotation_offset)
def __importUVMorphs(self):
mmd_root = self.__root.mmd_root
categories = self.CATEGORIES
__OffsetData = collections.namedtuple('OffsetData', 'index, offset')
__convert_offset = lambda x: (x[0], -x[1], x[2], -x[3])
for morph in (x for x in self.__model.morphs if isinstance(x, pmx.UVMorph)):
uv_morph = mmd_root.uv_morphs.add()
uv_morph.name = morph.name
uv_morph.name_e = morph.name_e
uv_morph.category = categories.get(morph.category, 'OTHER')
uv_morph.uv_index = morph.uv_index
offsets = (__OffsetData(d.index, __convert_offset(d.offset)) for d in morph.offsets)
FnMorph.store_uv_morph_data(self.__meshObj, uv_morph, offsets, '')
uv_morph.data_type = 'VERTEX_GROUP'
def __importGroupMorphs(self):
mmd_root = self.__root.mmd_root
categories = self.CATEGORIES
morph_types = self.MORPH_TYPES
pmx_morphs = self.__model.morphs
for morph in (x for x in pmx_morphs if isinstance(x, pmx.GroupMorph)):
group_morph = mmd_root.group_morphs.add()
group_morph.name = morph.name
group_morph.name_e = morph.name_e
group_morph.category = categories.get(morph.category, 'OTHER')
for morph_data in morph.offsets:
if not (0 <= morph_data.morph < len(pmx_morphs)):
continue
data = group_morph.data.add()
m = pmx_morphs[morph_data.morph]
data.name = m.name
data.morph_type = morph_types[m.type_index()]
data.factor = morph_data.factor
def __importDisplayFrames(self):
pmxModel = self.__model
root = self.__root
morph_types = self.MORPH_TYPES
for i in pmxModel.display:
frame = root.mmd_root.display_item_frames.add()
frame.name = i.name
frame.name_e = i.name_e
frame.is_special = i.isSpecial
for disp_type, index in i.data:
item = frame.data.add()
if disp_type == 0:
item.type = 'BONE'
item.name = self.__boneTable[index].name
elif disp_type == 1:
item.type = 'MORPH'
morph = pmxModel.morphs[index]
item.name = morph.name
item.morph_type = morph_types[morph.type_index()]
else:
raise Exception('Unknown display item type.')
DisplayItemQuickSetup.apply_bone_groups(root.mmd_root, self.__armObj)
def __addArmatureModifier(self, meshObj, armObj):
armModifier = meshObj.modifiers.new(name='Armature', type='ARMATURE')
armModifier.object = armObj
armModifier.use_vertex_groups = True
armModifier.name = 'mmd_bone_order_override'
armModifier.show_render = armModifier.show_viewport = (len(meshObj.data.vertices) > 0)
def __assignCustomNormals(self):
mesh = self.__meshObj.data
if not hasattr(mesh, 'has_custom_normals'):
logging.info(' * No support for custom normals!!')
return
logging.info('Setting custom normals...')
if self.__vertex_map:
verts, faces = self.__model.vertices, self.__model.faces
custom_normals = [(Vector(verts[i].normal).xzy).normalized() for f in faces for i in f]
mesh.normals_split_custom_set(custom_normals)
else:
custom_normals = [(Vector(v.normal).xzy).normalized() for v in self.__model.vertices]
mesh.normals_split_custom_set_from_vertices(custom_normals)
mesh.use_auto_smooth = True
logging.info(' - Done!!')
def __renameLRBones(self, use_underscore):
pose_bones = self.__armObj.pose.bones
for i in pose_bones:
self.__rig.renameBone(i.name, utils.convertNameToLR(i.name, use_underscore))
# self.__meshObj.vertex_groups[i.mmd_bone.name_j].name = i.name
def __translateBoneNames(self):
pose_bones | |
<gh_stars>0
#! /usr/bin/env python
import FWCore.ParameterSet.Config as cms
import sys
import os
import math
import re
import Validation.RecoTau.RecoTauValidation_cfi as validation
from optparse import OptionParser
from ROOT import *
__author__ = "<NAME> (<EMAIL>) and <NAME> (<EMAIL>)"
__doc__ = """Script to plot the content of a Validation .root file and compare it to a different file:\n\n
Usage: MultipleCompare.py -T testFile -R refFile [options] [search strings that you want to apply '*' is supported as special character]"""
def LoadCommandlineOptions(argv):
sys.argv = argv
parser = OptionParser(description=__doc__)
parser.add_option('--myhelp',metavar='', action="store_true",help='prints this output message',dest='help',default = False)
parser.add_option('--TestFile','-T',metavar='testFile', type=str,help='Sets the test file',dest='test',default = '')
parser.add_option('--RefFile','-R',metavar='refFile', type=str,help='Sets the reference file',dest='ref',default = None)
parser.add_option('--output','-o',metavar='outputFile', type=str,help='Sets the output file',dest='out',default = 'MultipleCompare.png')
parser.add_option('--logScaleY',action="store_true", dest="logScaleY", default=False, help="Sets the log scale in the plot (Y axis)")
parser.add_option('--logScaleX',action="store_true", dest="logScaleX", default=False, help="Sets the log scale in the plot (X axis)")
parser.add_option('--fakeRate','-f',action="store_true", dest="fakeRate", default=False, help="Sets the fake rate options and put the correct label (implies --logScale)")
parser.add_option('--testLabel','-t',metavar='testLabel', type=str,help='Sets the label to put in the plots for test file',dest='testLabel',default = None)
parser.add_option('--refLabel','-r',metavar='refLabel', type=str,help='Sets the label to put in the plots for ref file',dest='refLabel',default = None)
parser.add_option('--sampleLabel','-s',metavar='sampleLabel', type=str,help='Sets the label to indicate the sample used',dest='sampleLabel',default = None)
parser.add_option('--maxLogX',metavar='number', type=float,help='Sets the maximum of the scale in log scale both in the main and in the sub pad (requires --logScale or -f to work)',dest='maxLogX',default = 100)
parser.add_option('--minLogX',metavar='number', type=float,help='Sets the minimum of the scale in log scale (requires --logScale or -f to work)',dest='minLogX',default = 0.001)
parser.add_option('--minLogY',metavar='number', type=float,help='Sets the minimum of the scale in log scale (requires --logScale or -f to work)',dest='minLogY',default = 0.0001)
parser.add_option('--maxLogY',metavar='number', type=float,help='Sets the maximum of the scale in log scale (requires --logScale or -f to work)',dest='maxLogY',default = 3)
parser.add_option('--minYR',metavar='number', type=float,help='Sets the minimum of the scale in sub pad',dest='minYR',default = 0)
parser.add_option('--maxYR',metavar='number', type=float,help='Sets the maximum of the scale in sub pad',dest='maxYR',default = 1.2)
# parser.add_option('--minDivY',metavar='number', type=float,help='Sets the minimum of the scale in the ratio pad',dest='minDivY',default = 0.)
# parser.add_option('--maxDivY',metavar='number', type=float,help='Sets the maximum of the scale in the ratio pad',dest='maxDivY',default = 2)
# parser.add_option('--minDivX',metavar='number', type=float,help='Sets the minimum of the scale in the ratio pad',dest='minDivX',default = 0.)
# parser.add_option('--maxDivX',metavar='number', type=float,help='Sets the maximum of the scale in the ratio pad',dest='maxDivX',default = 2)
parser.add_option('--logDiv',action="store_true", dest="logDiv", default=False, help="Sets the log scale in the plot")
parser.add_option('--normalize',action="store_true", dest="normalize", default=False, help="plot normalized")
parser.add_option('--maxRange',metavar='number',type=float, dest="maxRange", default=1.6, help="Sets the maximum range in linear plots")
parser.add_option('--maxXaxis',metavar='number',type=float, dest="maxXaxis", default=800, help="Sets the maximum range on x axis in the main pad")
parser.add_option('--minXaxis',metavar='number',type=float,help="Sets the minimum range on x axis in the main pad",dest="minXaxis", default=-3)
parser.add_option('--maxYaxis',metavar='number',type=float, dest="maxYaxis", default=2, help="Sets the maximum range on Y axis in the main pad")
parser.add_option('--minYaxis',metavar='number',type=float, dest="minYaxis", default=0, help="Sets the minimum range on Y axis in the main pad")
parser.add_option('--rebin', dest="rebin", type=int, default=-1, help="Sets the rebinning scale")
parser.add_option('--branding','-b',metavar='branding', type=str,help='Define a branding to label the plots (in the top right corner)',dest='branding',default = None)
#parser.add_option('--search,-s',metavar='searchStrings', type=str,help='Sets the label to put in the plots for ref file',dest='testLabel',default = None) No idea on how to tell python to use all the strings before a new option, thus moving this from option to argument (but may be empty)
(options,toPlot) = parser.parse_args()
if options.help:
parser.print_help()
sys.exit(0)
return [options, toPlot]
def GetContent(dir):
tempList = dir.GetListOfKeys()
retList = []
for it in range(0,tempList.GetSize()):
retList.append(tempList.At(it).ReadObj())
return retList
def MapDirStructure( directory, dirName, objectList ):
dirContent = GetContent(directory)
for entry in dirContent:
if type(entry) is TDirectory or type(entry) is TDirectoryFile:
subdirName = os.path.join(dirName,entry.GetName())
MapDirStructure(entry, subdirName,objectList)
else:
pathname = os.path.join(dirName,entry.GetName())
objectList.append(pathname)
def Match(required, got):
for part in required.split('*'):
if got.find(part) == -1:
return False
return True
def Divide(hNum,hDen):
ret = hNum.Clone('Division')
ret.GetYaxis().SetTitle('Ratio')
for binI in range(hNum.GetNbinsX()+1):
denVal = hDen.GetBinContent(binI)
denErr = hDen.GetBinError(binI)
numErr = hNum.GetBinError(binI)
numVal = hNum.GetBinContent(binI)
if denVal == 0:
ret.SetBinContent(binI,0)
ret.SetBinError(binI,0)
else:
ret.SetBinContent(binI,numVal/denVal)
if numVal==0:
ret.SetBinError(binI,1)
else:
ret.SetBinError(binI,(numVal/denVal)*math.sqrt(math.pow(numErr/numVal,2) + math.pow(denErr/denVal,2) ) )
return ret
def DetermineHistType(name):
#automatically derive all plot types in the future?
type = ''
label = ''
prefix = ''
#assuming plots name like: tauType_plotType_xAxis or tauType_plotType_selection
matches = re.match(r'.*/(.*)_(.*)_(.*)', name)
if matches:
prefix = matches.group(1)
label = matches.group(3)
knowntypes = (['pTRatio','SumPt','Size'])
for knowntype in knowntypes:
if matches.group(2) == knowntype:
type = knowntype
if not type: #there are plots labelled ..._vs_...
type = 'Eff'
else:
type = 'Eff'
prefixParts = prefix.partition('Discrimination')
if prefixParts[2] != '':
prefix = prefixParts[2]
prefixParts = prefix.partition('By')
if prefixParts[2] != '':
prefix = prefixParts[2]
#print 'type is ' + type
return [type, label, prefix]
def DrawTitle(text):
title = TLatex()
title.SetNDC()
title.SetTextAlign(12)#3*10=right,3*1=top
title.SetTextSize(.035)
leftMargin = gStyle.GetPadLeftMargin()
topMargin = 1 - 0.5*gStyle.GetPadTopMargin()
title.DrawLatex(leftMargin, topMargin, text)
def DrawBranding(options, label=''):
if options.branding != None or label != '':
text = TLatex()
text.SetNDC();
text.SetTextAlign(11)#3*10=right,3*1=top
text.SetTextSize(.025)
text.SetTextColor(13)
if options.out.find(".eps")!=-1:
text.SetTextAngle(-91.0)#eps BUG
else:
text.SetTextAngle(-90.0)
rightMargin = 1 - gStyle.GetPadRightMargin()
topMargin = 1 - gStyle.GetPadTopMargin()
if label!='':
label += ': '
text.DrawLatex(rightMargin+.01, topMargin+0.025, label+options.branding);
def FindParents(histoPath):
root = histoPath[:histoPath.find('_')]
par = histoPath[histoPath.find('Eff')+3:]
validationPlots = validation.proc.efficiencies.plots._Parameterizable__parameterNames
found =0
num = ''
den = ''
for efficiency in validationPlots:
effpset = getattr(validation.proc.efficiencies.plots,efficiency)
effName = effpset.efficiency.value()
effNameCut = effName[effName.find('_'):effName.find('#')]
if effNameCut in histoPath:
if found == 1:
print 'More than one pair of parents found for ' + histopath + ':'
assert(False)
num = root + effpset.numerator.value()[effName.find('_'):].replace('#PAR#',par)
den = root + effpset.denominator.value()[effName.find('_'):].replace('#PAR#',par)
found += 1
return [num,den]
def Rebin(tfile, histoPath, rebinVal):
parents = FindParents(histoPath)
num = tfile.Get(parents[0])
if type(num) != TH1F:
print 'Looking for ' + num
print 'Plot now found! What the hell are you doing? Exiting...'
sys.exit()
denSingle = tfile.Get(parents[1])
if type(denSingle) != TH1F:
print 'Looking for '+denSingle
print 'Plot now found! What the hell are you doing? Exiting...'
sys.exit()
num.Rebin(rebinVal)
den = denSingle.Rebin(rebinVal,'denClone')
retVal = num.Clone(histoPath+'Rebin%s'%rebinVal)
#print 'Num : ' + parents[0]
#print 'Den : ' +parents[1]
#print "NumBins: %s DenBins: %s" % (num.GetNbinsX(), den.GetNbinsX() )
retVal.Divide(num,den,1,1,'B')
return retVal
def findRange(hists, min0=-1, max0=-1):
if len(hists) < 1:
return
#auto ranges if no user value provided
min = min0
max = max0
if min0 == -1 or max0 == -1:
for hist in hists:
if min0 == -1:
#Divide() sets bin to zero if division not possible. Ignore these bins.
minTmp = getMinimumIncludingErrors(hist)
if minTmp < min or min == -1:
min = minTmp
if max0 == -1:
maxTmp = getMaximumIncludingErrors(hist)
if maxTmp > max or max == -1:
max = maxTmp
return [min, max]
def optimizeRangeMainPad(argv, pad, hists, maxLogX_, minX_, maxX_, maxLogY_, minY_, maxY_):
pad.Update()
if pad.GetLogy():
if maxLogY_ > 0:
maxLogY = maxLogY_
else:
maxLogY = -1
minY, maxY = findRange(hists, -1, maxLogY)
else:
minY, maxY = findRange(hists, minY_, maxY_)
if pad.GetLogy():
if minY == 0:
minY = 0.001
else:
if minY < 0.7:
minY = minY #start from zero if possible
if maxY <= 1.1 and maxY > 0.7:
maxY = 1.2 #prefere fixed range for easy comparison
hists[0].SetAxisRange(minY, maxY, "Y")
if pad.GetLogx():
if maxLogX_ > 0:
maxLogX = maxLogX_
else:
maxLogX = -1
minX, maxX = findRange(hists, -1, maxLogX)
else:
minX, maxX = findRange(hists, minX_, maxX_)
if pad.GetLogx():
if minX == 0:
minX = 0.001
else:
if minX < 0.7:
minX = minX #start from zero if possible
if maxX <= 1.1 and maxX > 0.7:
maxX = 1.2 #prefere fixed range for easy comparison
hists[0].SetAxisRange(minX, maxX, "X")
def optimizeRangeSubPad(argv, pad, hists, maxLogX_, minX_, maxX_, minYRatio_, maxYRatio_):
pad.Update()
if pad.GetLogx():
if maxLogX_ > 0:
maxLogX = maxLogX_
else:
maxLogX = -1
minX, maxX = findRange(hists, -1, maxLogX)
else:
minX, maxX = findRange(hists, minX_, maxX_)
if pad.GetLogx():
if minX == 0:
minX = 0.001
else:
if minX < 0.7:
minX = minX #start from zero if possible
if maxX <= 1.1 and maxX > 0.7:
maxX = 1.2 #prefere fixed range for easy comparison
hists[0].SetAxisRange(minX, maxX, "X")
min = -1
max = -1
if minYRatio_ > 0:
min = minYRatio_
if maxYRatio_ > 0:
max = maxYRatio_
min, max = findRange(hists, min, max)
if max > 2:
max = 2 #maximal bound
hists[0].SetAxisRange(min, max, "Y")
def getMaximumIncludingErrors(hist):
#find maximum considering also the errors
distance = 1.
max = -1
pos = 0
for i in range(1, hist.GetNbinsX()):
if hist.GetBinContent(i) > max:#ignore errors here
max = hist.GetBinContent(i)
pos = i
return max + distance*hist.GetBinError(pos)
def getMinimumIncludingErrors(hist):
#find minimum considering also the errors
#ignoring zero | |
if self.is_name_exists(
new_engine_id_name):
sg.Popup(
'{} is existing. Please '
'modify the name! You can '
'modify the config later thru '
'Engine->Manage->Edit'.format(
new_engine_id_name),
title=button_title,
icon=ico_path[platform]['pecg'])
continue
break
else:
sg.Popup('Please input engine id '
'name, or press Get Id Name '
'button.',
title=button_title,
icon=ico_path[platform]['pecg'])
except Exception:
logging.exception('Failed to get engine '
'path and file')
# Outside add window while loop
add_win.Close()
install_win.Enable()
# Save the new configured engine to pecg_engines.json.
if not is_cancel_add_win:
que = queue.Queue()
t = threading.Thread(
target=self.add_engine_to_config_file,
args=(new_engine_path_file,
new_engine_id_name, que,), daemon=True)
t.start()
while True:
try:
msg = que.get_nowait()
break
except Exception:
continue
t.join()
if msg == 'Failure':
sg.Popup('Failed to add {} in config '
'file!'.format(new_engine_id_name),
title=button_title,
icon=ico_path[platform]['pecg'])
self.engine_id_name_list = \
self.get_engine_id_name_list()
break
install_win.Close()
window.Enable()
# Define default engine opponent and adviser
if engine_id_name is None:
engine_id_name = self.get_default_engine_opponent()
if self.adviser_id_name is None:
self.set_default_adviser_engine()
self.update_labels_and_game_tags(window, human=self.username)
continue
# Mode: Neutral
if button == 'Edit':
button_title = 'Engine/Manage/' + button
opt_name = []
ret_opt_name = []
engine_path_file, engine_id_name = None, None
edit_layout = [
[sg.Text('Current configured engine names')],
[sg.Listbox(values=self.engine_id_name_list,
size=(48,10),
key='engine_id_name_k')],
[sg.Button('Modify'), sg.Button('Cancel')]
]
window.Disable()
edit_win = sg.Window(button_title, layout=edit_layout,
icon=ico_path[platform]['pecg'])
is_cancel_edit_win = False
while True:
e, v = edit_win.Read(timeout=100)
if e is None or e == 'Cancel':
is_cancel_edit_win = True
break
if e == 'Modify':
option_layout, option_layout2 = [], []
button_title += '/' + e
try:
orig_idname = engine_id_name = v['engine_id_name_k'][0]
except Exception:
sg.Popup('Please select an engine to modify.',
title='/Edit/Modify',
icon=ico_path[platform]['pecg'])
continue
# Read engine config file
with open(self.engine_config_file, 'r') as json_file:
data = json.load(json_file)
# First option that can be set is the config name
option_layout.append(
[sg.Text('name', size=(4, 1)),
sg.Input(engine_id_name, size=(38, 1),
key='string_name_k')])
opt_name.append(['name', 'string_name_k'])
for p in data:
name = p['name']
path = p['workingDirectory']
file = p['command']
engine_path_file = Path(path, file)
option = p['options']
if name == engine_id_name:
num_opt = len(option)
opt_cnt = 0
for o in option:
opt_cnt += 1
name = o['name']
value = o['value']
type_ = o['type']
if type_ == 'spin':
min_ = o['min']
max_ = o['max']
key_name = type_ + '_' + name.lower() + '_k'
opt_name.append([name, key_name])
ttip = 'min {} max {}'.format(min_, max_)
spin_layout = \
[sg.Text(name, size=(16, 1)),
sg.Input(value, size=(8, 1),
key=key_name,
tooltip=ttip)]
if num_opt > 10 and opt_cnt > num_opt//2:
option_layout2.append(spin_layout)
else:
option_layout.append(spin_layout)
elif type_ == 'check':
key_name = type_ + '_' + name.lower() + '_k'
opt_name.append([name, key_name])
check_layout = \
[sg.Text(name, size=(16, 1)),
sg.Checkbox('', key=key_name,
default=value)]
if num_opt > 10 and opt_cnt > num_opt//2:
option_layout2.append(check_layout)
else:
option_layout.append(check_layout)
elif type_ == 'string':
key_name = type_ + '_' + name + '_k'
opt_name.append([name, key_name])
# Use FolderBrowse()
if 'syzygypath' in name.lower():
sy_layout = \
[sg.Text(name, size=(16, 1)),
sg.Input(value,
size=(12, 1),
key=key_name),
sg.FolderBrowse()]
if num_opt > 10 and opt_cnt > num_opt//2:
option_layout2.append(sy_layout)
else:
option_layout.append(sy_layout)
# Use FileBrowse()
elif 'weightsfile' in name.lower():
weight_layout = \
[sg.Text(name, size=(16, 1)),
sg.Input(value,
size=(12, 1),
key=key_name),
sg.FileBrowse()]
if num_opt > 10 and opt_cnt > num_opt//2:
option_layout2.append(
weight_layout)
else:
option_layout.append(
weight_layout)
else:
str_layout = \
[sg.Text(name, size=(16, 1)),
sg.Input(value, size=(16, 1),
key=key_name)]
if num_opt > 10 and opt_cnt > num_opt//2:
option_layout2.append(
str_layout)
else:
option_layout.append(
str_layout)
elif type_ == 'combo':
key_name = type_ + '_' + name + '_k'
opt_name.append([name, key_name])
var = o['choices']
combo_layout = [
sg.Text(name, size=(16, 1)),
sg.Combo(var, default_value=value,
size=(12, 1),
key=key_name)]
if num_opt > 10 and opt_cnt > num_opt//2:
option_layout2.append(combo_layout)
else:
option_layout.append(combo_layout)
break
option_layout.append([sg.OK(), sg.Cancel()])
if len(option_layout2) > 1:
tab1 = [[sg.Column(option_layout)]]
tab2 = [[sg.Column(option_layout2)]]
modify_layout = [[sg.Column(tab1), sg.Column(tab2)]]
else:
modify_layout = option_layout
edit_win.Disable()
modify_win = sg.Window(button_title,
layout=modify_layout,
icon=ico_path[platform]['pecg'])
is_cancel_modify_win = False
while True:
e1, v1 = modify_win.Read(timeout=100)
if e1 is None or e1 == 'Cancel':
is_cancel_modify_win = True
break
if e1 == 'OK':
engine_id_name = v1['string_name_k']
for o in opt_name:
d = {o[0]: v1[o[1]]}
ret_opt_name.append(d)
break
edit_win.Enable()
modify_win.Close()
break # Get out of edit_win loop
# Outside edit_win while loop
# Save the new configured engine to pecg_engines.json file
if not is_cancel_edit_win and not is_cancel_modify_win:
self.update_engine_to_config_file(
engine_path_file, engine_id_name,
orig_idname, ret_opt_name)
self.engine_id_name_list = self.get_engine_id_name_list()
edit_win.Close()
window.Enable()
continue
# Mode: Neutral
if button == 'Delete':
button_title = 'Engine/Manage/' + button
delete_layout = [
[sg.Text('Current configured engine names')],
[sg.Listbox(values=self.engine_id_name_list, size=(48, 10),
key='engine_id_name_k')],
[sg.Button('Delete'), sg.Cancel()]
]
window.Disable()
delete_win = sg.Window(button_title, layout=delete_layout,
icon=ico_path[platform]['pecg'])
is_cancel = False
while True:
e, v = delete_win.Read(timeout=100)
if e is None or e == 'Cancel':
is_cancel = True
break
if e == 'Delete':
try:
engine_id_name = v['engine_id_name_k'][0]
except Exception:
sg.Popup('Please select an engine to delete.',
title=button_title,
icon=ico_path[platform]['pecg'])
continue
with open(self.engine_config_file, 'r') as json_file:
data = json.load(json_file)
for i in range(len(data)):
if data[i]['name'] == engine_id_name:
logging.info('{} is found for deletion.'.format(
engine_id_name))
data.pop(i)
break
# Save data to pecg_engines.json
with open(self.engine_config_file, 'w') as h:
json.dump(data, h, indent=4)
break
# Save the new configured engine to pecg_engines.json file
if not is_cancel:
self.engine_id_name_list = self.get_engine_id_name_list()
delete_win.Close()
window.Enable()
continue
# Mode: Neutral, Allow user to change opponent engine settings
if button == 'Set Engine Opponent':
current_engine_file = self.opp_file
current_engine_id_name = self.opp_id_name
logging.info('Backup current engine list and file.')
logging.info('Current engine file: {}'.format(
current_engine_file))
layout = [
[sg.T('Current Opponent: {}'.format(self.opp_id_name),
size=(40,1))],
[sg.Listbox(values=self.engine_id_name_list, size=(48,10),
key='engine_id_k')],
[sg.OK(), sg.Cancel()]
]
# Create new window and disable the main window
w = sg.Window(BOX_TITLE + '/Select opponent', layout,
icon=ico_path[platform]['enemy'])
window.Disable()
while True:
e, v = w.Read(timeout=10)
if e is None or e == 'Cancel':
# Restore current engine list and file
logging.info('User cancels engine selection. ' +
'We restore the current engine data.')
self.opp_file = current_engine_file
logging.info('Current engine data were restored.')
logging.info('current engine file: {}'.format(
self.opp_file))
break
if e == 'OK':
# We use try/except because user can press OK without
# selecting an engine
try:
engine_id_name = self.opp_id_name = v['engine_id_k'][0]
self.opp_file, self.opp_path_and_file = self.get_engine_file(
engine_id_name)
except IndexError:
logging.info('User presses OK but did not select '
'an engine.')
except Exception:
logging.exception('Failed to set engine.')
finally:
if current_engine_id_name != self.opp_id_name:
logging.info('User selected a new opponent {'
'}.'.format(self.opp_id_name))
break
window.Enable()
w.Close()
# Update the player box in main window
self.update_labels_and_game_tags(window, human=self.username)
continue
# Mode: Neutral, Set Adviser engine
if button == 'Set Engine Adviser':
current_adviser_engine_file = self.adviser_file
current_adviser_path_and_file = self.adviser_path_and_file
layout = [
[sg.T('Current Adviser: {}'.format(self.adviser_id_name),
size=(40,1))],
[sg.Listbox(values=self.engine_id_name_list, size=(48,10),
key='adviser_id_name_k')],
[sg.T('Movetime (sec)', size=(12, 1)),
sg.Spin([t for t in range(1, 3600, 1)],
initial_value=self.adviser_movetime_sec,
size=(8, 1), key='adviser_movetime_k')],
[sg.OK(), sg.Cancel()]
]
# Create new window and disable the main window
w = sg.Window(BOX_TITLE + '/Select Adviser', layout,
icon=ico_path[platform]['adviser'])
window.Disable()
while True:
e, v = w.Read(timeout=10)
if e is None or e == 'Cancel':
self.adviser_file = current_adviser_engine_file
self.adviser_path_and_file = current_adviser_path_and_file
break
if e == 'OK':
movetime_sec = int(v['adviser_movetime_k'])
self.adviser_movetime_sec = min(3600, max(1, movetime_sec))
# We use try/except because user can press OK without selecting an engine
try:
adviser_eng_id_name = self.adviser_id_name = v['adviser_id_name_k'][0]
self.adviser_file, self.adviser_path_and_file = self.get_engine_file(
adviser_eng_id_name)
except IndexError:
logging.info('User presses OK but did not select an engine')
except Exception:
logging.exception('Failed to set engine.')
break
window.Enable()
w.Close()
continue
# Mode: Neutral
if button == 'Set Depth':
self.set_depth_limit()
continue
# Mode: Neutral, Allow user to change book settings
if button == 'Set Book::book_set_k':
# Backup current values, we will restore these value in case
# the user presses cancel or X button
current_is_use_gui_book = self.is_use_gui_book
current_is_random_book = self.is_random_book
current_max_book_ply = self.max_book_ply
layout = [
[sg.Text('This is the book used by your '
'engine opponent.')],
[sg.T('Book File', size=(8, 1)),
sg.T(self.gui_book_file, size=(36, 1), relief='sunken')],
[sg.T('Max Ply', size=(8, 1)),
sg.Spin([t for t in range(1, 33, 1)],
initial_value=self.max_book_ply,
size=(6, 1), key='book_ply_k')],
[sg.CBox('Use book', key = 'use_gui_book_k',
default=self.is_use_gui_book)],
[sg.Radio('Best move', 'Book Radio',
default = False if self.is_random_book else True),
sg.Radio('Random move', 'Book Radio',
key='random_move_k',
default = True if self.is_random_book else False)],
[sg.OK(), sg.Cancel()],
]
w = sg.Window(BOX_TITLE + '/Set Book', layout,
icon=ico_path[platform]['pecg'])
window.Disable()
while True:
e, v = w.Read(timeout=10)
# If user presses X button
if e is None:
self.is_use_gui_book = current_is_use_gui_book
self.is_random_book = current_is_random_book
self.max_book_ply = current_max_book_ply
logging.info('Book setting is exited.')
break
if e == 'Cancel':
self.is_use_gui_book = current_is_use_gui_book
self.is_random_book = current_is_random_book
self.max_book_ply = current_max_book_ply
logging.info('Book setting is cancelled.')
break
if e == 'OK':
self.max_book_ply | |
None
if axis is None:
self.result = counts
elif axis == 'x':
if not raw_sum:
self.result = counts[[0], :]
else:
self.result = np.nansum(counts[1:, :], axis=0, keepdims=True)
elif axis == 'y':
if not raw_sum:
self.result = counts[:, [0]]
else:
if self.x == '@' or self.y == '@':
self.result = counts[:, [0]]
else:
self.result = np.nansum(counts[:, 1:], axis=1, keepdims=True)
self._organize_margins(margin)
if as_df:
self.to_df()
self.unweight()
return self
def _empty_result(self):
if self._res_is_stat() or self.current_agg == 'summary':
self.factorized = 'x'
xdim = 1 if self._res_is_stat() else 8
if self.ydef is None:
ydim = 1
elif self.ydef is not None and len(self.ydef) == 0:
ydim = 2
else:
ydim = len(self.ydef) + 1
else:
if self.xdef is not None:
if len(self.xdef) == 0:
xdim = 2
else:
xdim = len(self.xdef) + 1
if self.ydef is None:
ydim = 1
elif self.ydef is not None and len(self.ydef) == 0:
ydim = 2
else:
ydim = len(self.ydef) + 1
elif self.xdef is None:
xdim = 2
if self.ydef is None:
ydim = 1
elif self.ydef is not None and len(self.ydef) == 0:
ydim = 2
else:
ydim = len(self.ydef) + 1
return np.zeros((xdim, ydim))
def _effective_n(self, axis=None, margin=True):
self.weight()
effective = (np.nansum(self.matrix, axis=0)**2 /
np.nansum(self.matrix**2, axis=0))
self.unweight()
start_on = 0 if margin else 1
if axis is None:
return effective[start_on:, start_on:]
elif axis == 'x':
return effective[[0], start_on:]
else:
return effective[start_on:, [0]]
def summarize(self, stat='summary', axis='x', margin=True, as_df=True):
"""
Calculate distribution statistics across the given axis.
Parameters
----------
stat : {'summary', 'mean', 'median', 'var', 'stddev', 'sem', varcoeff',
'min', 'lower_q', 'upper_q', 'max'}, default 'summary'
The measure to calculate. Defaults to a summary output of the most
important sample statistics.
axis : {'x', 'y'}, default 'x'
The axis which is reduced in the aggregation, e.g. column vs. row
means.
margin : bool, default True
Controls whether statistic(s) of the marginal distribution are
shown.
as_df : bool, default True
Controls whether the aggregation is transformed into a Quantipy-
multiindexed (following the Question/Values convention)
pandas.DataFrame or will be left in its numpy.array format.
Returns
-------
self
Passes a pandas.DataFrame or numpy.array of the descriptive (summary)
statistic(s) to the ``result`` property.
"""
self.current_agg = stat
if self.is_empty:
self.result = self._empty_result()
else:
self._autodrop_stats_missings()
if stat == 'summary':
stddev, mean, base = self._dispersion(axis, measure='sd',
_return_mean=True,
_return_base=True)
self.result = np.concatenate([
base, mean, stddev,
self._min(axis),
self._percentile(perc=0.25),
self._percentile(perc=0.50),
self._percentile(perc=0.75),
self._max(axis)
], axis=0)
elif stat == 'mean':
self.result = self._means(axis)
elif stat == 'var':
self.result = self._dispersion(axis, measure='var')
elif stat == 'stddev':
self.result = self._dispersion(axis, measure='sd')
elif stat == 'sem':
self.result = self._dispersion(axis, measure='sem')
elif stat == 'varcoeff':
self.result = self._dispersion(axis, measure='varcoeff')
elif stat == 'min':
self.result = self._min(axis)
elif stat == 'lower_q':
self.result = self._percentile(perc=0.25)
elif stat == 'median':
self.result = self._percentile(perc=0.5)
elif stat == 'upper_q':
self.result = self._percentile(perc=0.75)
elif stat == 'max':
self.result = self._max(axis)
self._organize_margins(margin)
if as_df:
self.to_df()
return self
def _factorize(self, axis='x', inplace=True):
self.factorized = axis
if inplace:
factorized = self
else:
factorized = self._copy()
if axis == 'y':
factorized._switch_axes()
np.copyto(factorized.matrix[:, 1:, :],
np.atleast_3d(factorized.xdef),
where=factorized.matrix[:, 1:, :]>0)
if not inplace:
return factorized
def _means(self, axis, _return_base=False):
fact = self._factorize(axis=axis, inplace=False)
if not self.w == '@1':
fact.weight()
fact_prod = np.nansum(fact.matrix, axis=0)
fact_prod_sum = np.nansum(fact_prod[1:, :], axis=0, keepdims=True)
bases = fact_prod[[0], :]
means = fact_prod_sum/bases
if axis == 'y':
self._switch_axes()
means = means.T
bases = bases.T
if _return_base:
return means, bases
else:
return means
def _dispersion(self, axis='x', measure='sd', _return_mean=False,
_return_base=False):
"""
Extracts measures of dispersion from the incoming distribution of
X vs. Y. Can return the arithm. mean by request as well. Dispersion
measure supported are standard deviation, variance, coeffiecient of
variation and standard error of the mean.
"""
means, bases = self._means(axis, _return_base=True)
unbiased_n = bases - 1
self.unweight()
factorized = self._factorize(axis, inplace=False)
factorized.matrix[:, 1:] -= means
factorized.matrix[:, 1:] *= factorized.matrix[:, 1:, :]
if not self.w == '@1':
factorized.weight()
diff_sqrt = np.nansum(factorized.matrix[:, 1:], axis=1)
disp = np.nansum(diff_sqrt/unbiased_n, axis=0, keepdims=True)
disp[disp <= 0] = np.NaN
disp[np.isinf(disp)] = np.NaN
if measure == 'sd':
disp = np.sqrt(disp)
elif measure == 'sem':
disp = np.sqrt(disp) / np.sqrt((unbiased_n + 1))
elif measure == 'varcoeff':
disp = np.sqrt(disp) / means
self.unweight()
if _return_mean and _return_base:
return disp, means, bases
elif _return_mean:
return disp, means
elif _return_base:
return disp, bases
else:
return disp
def _max(self, axis='x'):
factorized = self._factorize(axis, inplace=False)
vals = np.nansum(factorized.matrix[:, 1:, :], axis=1)
return np.nanmax(vals, axis=0, keepdims=True)
def _min(self, axis='x'):
factorized = self._factorize(axis, inplace=False)
vals = np.nansum(factorized.matrix[:, 1:, :], axis=1)
if 0 not in factorized.xdef: np.place(vals, vals == 0, np.inf)
return np.nanmin(vals, axis=0, keepdims=True)
def _percentile(self, axis='x', perc=0.5):
"""
Computes percentiles from the incoming distribution of X vs.Y and the
requested percentile value. The implementation mirrors the algorithm
used in SPSS Dimensions and the EXAMINE procedure in SPSS Statistics.
It based on the percentile defintion #6 (adjusted for survey weights)
in:
Hyndman, <NAME>. and <NAME> (1996) -
"Sample Quantiles in Statistical Packages",
The American Statistician, 50, No. 4, 361-365.
Parameters
----------
axis : {'x', 'y'}, default 'x'
The axis which is reduced in the aggregation, i.e. column vs. row
medians.
perc : float, default 0.5
Defines the percentile to be computed. Defaults to 0.5,
the sample median.
Returns
-------
percs : np.array
Numpy array storing percentile values.
"""
percs = []
factorized = self._factorize(axis, inplace=False)
vals = np.nansum(np.nansum(factorized.matrix[:, 1:, :], axis=1,
keepdims=True), axis=1)
weights = (vals/vals)*self.wv
for shape_i in range(0, vals.shape[1]):
iter_weights = weights[:, shape_i]
iter_vals = vals[:, shape_i]
mask = ~np.isnan(iter_weights)
iter_weights = iter_weights[mask]
iter_vals = iter_vals[mask]
sorter = np.argsort(iter_vals)
iter_vals = np.take(iter_vals, sorter)
iter_weights = np.take(iter_weights, sorter)
iter_wsum = np.nansum(iter_weights, axis=0)
iter_wcsum = np.cumsum(iter_weights, axis=0)
k = (iter_wsum + 1.0) * perc
if iter_vals.shape[0] == 0:
percs.append(0.00)
elif iter_vals.shape[0] == 1:
percs.append(iter_vals[0])
elif iter_wcsum[0] > k:
wcsum_k = iter_wcsum[0]
percs.append(iter_vals[0])
elif iter_wcsum[-1] <= k:
percs.append(iter_vals[-1])
else:
wcsum_k = iter_wcsum[iter_wcsum <= k][-1]
p_k_idx = np.searchsorted(np.ndarray.flatten(iter_wcsum), wcsum_k)
p_k = iter_vals[p_k_idx]
p_k1 = iter_vals[p_k_idx+1]
w_k1 = iter_weights[p_k_idx+1]
excess = k - wcsum_k
if excess >= 1.0:
percs.append(p_k1)
else:
if w_k1 >= 1.0:
percs.append((1.0-excess)*p_k + excess*p_k1)
else:
percs.append((1.0-(excess/w_k1))*p_k +
(excess/w_k1)*p_k1)
return np.array(percs)[None, :]
def _organize_margins(self, margin):
if self._res_is_stat():
if self.type == 'array' or self.y == '@' or self.x == '@':
self._has_y_margin = self._has_x_margin = False
else:
if self.factorized == 'x':
if not margin:
self._has_x_margin = False
self._has_y_margin = False
self.result = self.result[:, 1:]
else:
self._has_x_margin = False
self._has_y_margin = True
else:
if not margin:
self._has_x_margin = False
self._has_y_margin = False
self.result = self.result[1:, :]
else:
self._has_x_margin = True
self._has_y_margin = False
if self._res_is_margin():
if self.y == '@' or self.x == '@':
if self.current_agg in ['cbase', 'x_sum']:
self._has_y_margin = self._has_x_margin = False
if self.current_agg in ['rbase', 'y_sum']:
if not margin:
self._has_y_margin = self._has_x_margin = False
self.result = self.result[1:, :]
else:
self._has_x_margin = True
self._has_y_margin = False
else:
if self.current_agg in ['cbase', 'x_sum']:
if not margin:
self._has_y_margin = self._has_x_margin = False
self.result = self.result[:, 1:]
else:
self._has_x_margin = False
self._has_y_margin = True
if self.current_agg in ['rbase', 'y_sum']:
if not margin:
self._has_y_margin = self._has_x_margin = False
self.result = self.result[1:, :]
else:
self._has_x_margin = True
self._has_y_margin = False
elif self.current_agg in ['freq', 'summary', 'calc']:
if self.type == 'array' or self.y == '@' or self.x == '@':
if not margin:
self.result = self.result[1:, :]
self._has_x_margin = False
self._has_y_margin = False
else:
self._has_x_margin = True
self._has_y_margin = False
else:
if not margin:
self.result = self.result[1:, 1:]
self._has_x_margin = False
self._has_y_margin = False
else:
self._has_x_margin = True
self._has_y_margin = True
else:
pass
def _sort_indexer_as_codes(self, indexer, codes):
mapping = sorted(zip(indexer, codes), key=lambda l: l[1])
return [i[0] for i in mapping]
def _get_y_indexers(self):
if self._squeezed or self.type in ['simple', 'nested']:
if self.ydef is not None:
idxs = list(range(1, len(self.ydef)+1))
return self._sort_indexer_as_codes(idxs, self.ydef)
else:
return [1]
else:
y_indexers = []
xdef_len = len(self.xdef)
zero_based_ys = [idx for idx in range(0, xdef_len)]
for y_no in range(0, len(self.ydef)):
if y_no == 0:
y_indexers.append(zero_based_ys)
else:
y_indexers.append([idx + y_no * xdef_len
for idx in zero_based_ys])
return | |
from gromet import * # never do this :)
# -----------------------------------------------------------------------------
# GroMEt instance
# -----------------------------------------------------------------------------
def generate_gromet() -> Gromet:
# ----- Metadata -----
# -- code span reference metadata
file_simple_sir_py_uid = UidCodeFileReference("simple_sir_code")
code_s_in = \
CodeSpanReference(uid=UidMetadatum("code_s_in"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='IDENTIFIER',
file_id=file_simple_sir_py_uid,
line_begin=31,
line_end=None,
col_begin=9,
col_end=None)
code_s_out = \
CodeSpanReference(uid=UidMetadatum("code_s_out"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='IDENTIFIER',
file_id=file_simple_sir_py_uid,
line_begin=53,
line_end=None,
col_begin=13,
col_end=None)
code_i_in = \
CodeSpanReference(uid=UidMetadatum("code_i_in"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='IDENTIFIER',
file_id=file_simple_sir_py_uid,
line_begin=31,
line_end=None,
col_begin=19,
col_end=None)
code_i_out = \
CodeSpanReference(uid=UidMetadatum("code_i_out"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='IDENTIFIER',
file_id=file_simple_sir_py_uid,
line_begin=53,
line_end=None,
col_begin=16,
col_end=None)
code_r_in = \
CodeSpanReference(uid=UidMetadatum("code_r_in"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='IDENTIFIER',
file_id=file_simple_sir_py_uid,
line_begin=31,
line_end=None,
col_begin=29,
col_end=None)
code_r_out = \
CodeSpanReference(uid=UidMetadatum("code_r_out"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='IDENTIFIER',
file_id=file_simple_sir_py_uid,
line_begin=53,
line_end=None,
col_begin=19,
col_end=None)
code_beta_in = \
CodeSpanReference(uid=UidMetadatum("code_beta_in"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='IDENTIFIER',
file_id=file_simple_sir_py_uid,
line_begin=31,
line_end=None,
col_begin=39,
col_end=42)
code_gamma_in = \
CodeSpanReference(uid=UidMetadatum("code_gamma_in"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='IDENTIFIER',
file_id=file_simple_sir_py_uid,
line_begin=31,
line_end=None,
col_begin=52,
col_end=56)
code_dt_in = \
CodeSpanReference(uid=UidMetadatum("code_gamma_in"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='IDENTIFIER',
file_id=file_simple_sir_py_uid,
line_begin=31,
line_end=None,
col_begin=66,
col_end=67)
code_infected_id = \
CodeSpanReference(uid=UidMetadatum("code_infected_id"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='IDENTIFIER',
file_id=file_simple_sir_py_uid,
line_begin=46,
line_end=None,
col_begin=5,
col_end=12)
code_recovered_id = \
CodeSpanReference(uid=UidMetadatum("code_infected_id"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='IDENTIFIER',
file_id=file_simple_sir_py_uid,
line_begin=47,
line_end=None,
col_begin=5,
col_end=13)
code_infected_exp = \
CodeSpanReference(uid=UidMetadatum("code_infected_exp"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='CODE_BLOCK',
file_id=file_simple_sir_py_uid,
line_begin=46,
line_end=None,
col_begin=5,
col_end=50)
code_recovered_exp = \
CodeSpanReference(uid=UidMetadatum("code_recovered_exp"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='CODE_BLOCK',
file_id=file_simple_sir_py_uid,
line_begin=47,
line_end=None,
col_begin=5,
col_end=32)
code_s_update_exp = \
CodeSpanReference(uid=UidMetadatum("code_s_update_exp"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='CODE_BLOCK',
file_id=file_simple_sir_py_uid,
line_begin=49,
line_end=None,
col_begin=5,
col_end=21)
code_i_update_exp = \
CodeSpanReference(uid=UidMetadatum("code_i_update_exp"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='CODE_BLOCK',
file_id=file_simple_sir_py_uid,
line_begin=50,
line_end=None,
col_begin=5,
col_end=33)
code_r_update_exp = \
CodeSpanReference(uid=UidMetadatum("code_r_update_exp"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='CODE_BLOCK',
file_id=file_simple_sir_py_uid,
line_begin=51,
line_end=None,
col_begin=5,
col_end=22)
code_sir_fn = \
CodeSpanReference(uid=UidMetadatum("code_sir_fn"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
code_type='CODE_BLOCK',
file_id=file_simple_sir_py_uid,
line_begin=31,
line_end=53,
col_begin=None,
col_end=None)
# -- model interface metadata
simple_sir_model_interface = \
ModelInterface(uid=UidMetadatum("simple_sir_model_interface"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
variables=[UidVariable("S"), UidVariable("I"), UidVariable("R"),
UidVariable("S_2"), UidVariable("I_2"), UidVariable("R_2"),
UidVariable("beta"), UidVariable("gamma"),
UidVariable("dt"), UidVariable("infected"), UidVariable("recovered")],
parameters=[UidVariable("beta"), UidVariable("gamma"),
UidVariable("dt")],
initial_conditions=[UidVariable("S"), UidVariable("I"), UidVariable("R")])
# -- code collection reference metadata
file_simple_sir_py_code_file_reference = \
CodeFileReference(uid=file_simple_sir_py_uid,
name="Simple_SIR",
path="SIR-simple.py")
askeid_simple_sir_code = \
GlobalReferenceId(type='aske_id',
id='fa2a6b75-2dfd-4124-99b3-e6a8587a7f55')
metadatum_code_collection_ref = \
CodeCollectionReference(uid=UidMetadatum("simple_sir_code_collection_ref"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
global_reference_id=askeid_simple_sir_code,
file_ids=[file_simple_sir_py_code_file_reference])
# -- textual document reference set
askeid_simple_sir_doc_wiki = \
GlobalReferenceId(type='aske_id',
id='4b429087-7e7c-4623-80fd-64fb934a8be6')
text_doc_simple_sir_wiki = \
TextualDocumentReference(uid=UidDocumentReference("text_doc_simple_sir_wiki"),
global_reference_id=askeid_simple_sir_doc_wiki,
cosmos_id="COSMOS",
cosmos_version_number="3.0",
automates_id="AutoMATES-TR",
automates_version_number="2.0",
bibjson=Bibjson(title="The SIR Model Without Vital Dynamics - Wikipedia",
author=[BibjsonAuthor(name="Wikimedia Foundation")],
type="wikipedia",
website={"url": "https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SIR_model_without_vital_dynamics"},
timestamp="2021-01-21T20:13",
file="ideal_sir_model_without_vital_dynamics.pdf",
file_url="https://drive.google.com/file/d/1lexWCycLLTZq6FtQZ4AtBw30Bjeo5hRD/view?usp=sharing",
identifier=[{"type":"aske_id","id":"4b429087-7e7c-4623-80fd-64fb934a8be6"}]))
metadatum_textual_document_reference_set = \
TextualDocumentReferenceSet(uid=UidMetadatum("simple_sir_textual_document_ref_set"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
documents=[text_doc_simple_sir_wiki])
# -- Variable text definition metadata
# NOTE: The TextExtracton coordinates are made up here,
# just to show examples of the kinds of values that will appear.
variable_S_text_definition_extraction = \
TextExtraction(document_reference_uid=UidDocumentReference("text_doc_simple_sir_wiki"),
page=0,
block=1,
char_begin=1,
char_end=7)
variable_S_text_definition = \
TextDefinition(uid=UidMetadatum("S_text_definition"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
text_extraction=variable_S_text_definition_extraction,
variable_identifier="S",
variable_definition="the stock of susceptible population")
variable_I_text_definition_extraction = \
TextExtraction(document_reference_uid=UidDocumentReference("text_doc_simple_sir_wiki"),
page=0,
block=1,
char_begin=8,
char_end=13)
variable_I_text_definition = \
TextDefinition(uid=UidMetadatum("I_text_definition"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
text_extraction=variable_I_text_definition_extraction,
variable_identifier="I",
variable_definition="stock of infected")
variable_R_text_definition_extraction = \
TextExtraction(document_reference_uid=UidDocumentReference("text_doc_simple_sir_wiki"),
page=0,
block=1,
char_begin=15,
char_end=21)
variable_R_text_definition = \
TextDefinition(uid=UidMetadatum("R_text_definition"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
text_extraction=variable_R_text_definition_extraction,
variable_identifier="R",
variable_definition="the stock of recovered population")
variable_beta_text_definition_extraction = \
TextExtraction(document_reference_uid=UidDocumentReference("text_doc_simple_sir_wiki"),
page=0,
block=2,
char_begin=32,
char_end=45)
variable_beta_text_definition = \
TextDefinition(uid=UidMetadatum("beta_text_definition"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
text_extraction=variable_beta_text_definition_extraction,
variable_identifier="β",
variable_definition="Rate of transmission via contact")
variable_gamma_text_definition_extraction = \
TextExtraction(document_reference_uid=UidDocumentReference("text_doc_simple_sir_wiki"),
page=0,
block=2,
char_begin=52,
char_end=65)
variable_gamma_text_definition = \
TextDefinition(uid=UidMetadatum("gamma_text_definition"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
text_extraction=variable_gamma_text_definition_extraction,
variable_identifier="γ",
variable_definition="Rate of recovery from infection")
variable_dt_text_definition_extraction = \
TextExtraction(document_reference_uid=UidDocumentReference("text_doc_simple_sir_wiki"),
page=1,
block=4,
char_begin=22,
char_end=32)
variable_dt_text_definition = \
TextDefinition(uid=UidMetadatum("dt_text_definition"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
text_extraction=variable_dt_text_definition_extraction,
variable_identifier="dt",
variable_definition="Next inter-event time")
# -- Variable text parameter metadata
# NOTE: This is made up (the text_doc_simple_sir_wiki does not include parameter mentions)
# This is just an example of what a TextParameter metadata might look like
beta_text_parameter_extraction = \
TextExtraction(document_reference_uid=UidDocumentReference("text_doc_simple_sir_wiki"),
page=0,
block=5,
char_begin=67,
char_end=79)
beta_text_parameter = \
TextParameter(uid=UidMetadatum("example_beta_text_parameter"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
text_extraction=beta_text_parameter_extraction,
variable_identifier="β",
value="0.0000001019")
# -- Equation definition metadata
s_diff_equation_extraction = \
EquationExtraction(document_reference_uid=UidDocumentReference("text_doc_simple_sir_wiki"),
equation_number=0,
equation_source_latex="\frac{dS}{dt} = - \frac{\beta I S}{N}",
equation_source_mml='<?xml version="1.0" encoding="utf-8" standalone="no"?> <math xmlns="http://www.w3.org/1998/Math/MathML" display="block" title="\frac{dS}{dt} = - \frac{\beta I S}{N} "> <mrow> <mfrac> <mrow> <mi>d</mi> <mi>S</mi> </mrow> <mrow> <mi>d</mi> <mi>t</mi> </mrow> </mfrac> <mo>=</mo> <mo>-</mo> <mfrac> <mrow> <mi>β</mi> <mi>I</mi> <mi>S</mi> </mrow> <mrow> <mi>N</mi> </mrow> </mfrac> </mrow> </math>')
s_diff_equation_definition = \
EquationDefinition(uid=UidMetadatum("s_diff_equation_definition"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
equation_extraction=s_diff_equation_extraction)
i_diff_equation_extraction = \
EquationExtraction(document_reference_uid=UidDocumentReference("text_doc_simple_sir_wiki"),
equation_number=1,
equation_source_latex="\frac{dI}{dt} = \frac{\beta I S}{N} - \gamma I",
equation_source_mml='<?xml version="1.0" encoding="utf-8" standalone="no"?> <math xmlns="http://www.w3.org/1998/Math/MathML" display="block" title="\frac{dI}{dt} = \frac{\beta I S}{N} - \gamma I "> <mrow> <mfrac> <mrow> <mi>d</mi> <mi>I</mi> </mrow> <mrow> <mi>d</mi> <mi>t</mi> </mrow> </mfrac> <mo>=</mo> <mfrac> <mrow> <mi>β</mi> <mi>I</mi> <mi>S</mi> </mrow> <mrow> <mi>N</mi> </mrow> </mfrac> <mo>-</mo> <mi>γ</mi> <mi>I</mi> </mrow> </math>')
i_diff_equation_definition = \
EquationDefinition(uid=UidMetadatum("i_diff_equation_definition"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
equation_extraction=i_diff_equation_extraction)
r_diff_equation_extraction = \
EquationExtraction(document_reference_uid=UidDocumentReference("text_doc_simple_sir_wiki"),
equation_number=2,
equation_source_latex="\frac{dR}{dt} = \gamma I",
equation_source_mml='<?xml version="1.0" encoding="utf-8" standalone="no"?> <math xmlns="http://www.w3.org/1998/Math/MathML" display="block" title="\frac{dR}{dt} = \gamma I "> <mrow> <mfrac> <mrow> <mi>d</mi> <mi>R</mi> </mrow> <mrow> <mi>d</mi> <mi>t</mi> </mrow> </mfrac> <mo>=</mo> <mi>γ</mi> <mi>I</mi> </mrow> </math>')
r_diff_equation_definition = \
EquationDefinition(uid=UidMetadatum("r_diff_equation_definition"),
provenance=Provenance(method=MetadatumMethod('Manual_claytonm@az'),
timestamp=get_current_datetime()),
equation_extraction=r_diff_equation_extraction)
# ----- Model component definitions -----
variables = [
# state input
Variable(uid=UidVariable("S"), name="S", type=UidType("Float"),
states=[UidPort("P:sir.in.S"),
# UidWire("W:S1.1"), UidWire("W:S1.2"),
# UidPort("P:infected_exp.in.S"),
# UidPort("P:S_update_exp.in.S")
],
metadata=[variable_S_text_definition]),
Variable(uid=UidVariable("I"), name="I", type=UidType("Float"),
states=[UidPort("P:sir.in.I"),
# UidWire("W:I1.1"), UidWire("W:I1.2"), UidWire("W:I1.3"),
# UidPort("P:infected_exp.in.I"),
# UidPort("P:recovered_exp.in.I"),
# UidPort("P:I_update_exp.in.I")
],
metadata=[variable_I_text_definition]),
Variable(uid=UidVariable("R"), name="R", type=UidType("Float"),
states=[UidPort("P:sir.in.R"),
# UidWire("W:R1.1"), UidWire("W:R1.2"),
# UidPort("P:infected_exp.in.R"),
# UidPort("P:R_update_exp.in.R")
],
metadata=[variable_R_text_definition]),
# state output
Variable(uid=UidVariable("S_2"), name="S", type=UidType("Float"),
states=[UidPort("P:sir.out.S"), # out
# UidWire("W:S1.1"), UidWire("W:S1.2"),
# UidWire("W:S2"),
# UidPort("P:S_update_exp.out.S")
],
metadata=[variable_S_text_definition]),
Variable(uid=UidVariable("I_2"), name="I", type=UidType("Float"),
states=[UidPort("P:sir.out.I"),
# UidWire("W:I1.1"), UidWire("W:I1.2"), UidWire("W:I1.3"),
# UidWire("W:I2"),
# UidPort("P:I_update_exp.out.I")
],
metadata=[variable_I_text_definition]),
Variable(uid=UidVariable("R_2"), name="R", type=UidType("Float"),
states=[UidPort("P:sir.out.R"),
# UidWire("W:R1.1"), UidWire("W:R1.2"),
# UidWire("W:R2"),
# UidPort("P:R_update_exp.out.R")
],
metadata=[variable_R_text_definition]),
# parameters
Variable(uid=UidVariable("beta"), name="beta", type=UidType("Float"),
states=[UidPort("P:sir.in.beta"),
# UidWire("W:beta"),
# UidPort("P:infected_exp.in.beta")
],
metadata=[variable_beta_text_definition,
beta_text_parameter]),
Variable(uid=UidVariable("gamma"), name="gamma", type=UidType("Float"),
states=[UidPort("P:sir.in.gamma"),
# UidWire("W:gamma"),
# UidPort("P:recovered_exp.in.gamma")
],
metadata=[variable_gamma_text_definition]),
Variable(uid=UidVariable("dt"), name="dt", type=UidType("Float"),
states=[UidPort("P:sir.in.dt"),
# UidWire("W:dt.1"), UidWire("W:dt.2"),
# UidPort("P:infected_exp.in.dt"),
# UidPort("P:recovered_exp.in.dt")
],
metadata=[variable_dt_text_definition]),
# internal
Variable(uid=UidVariable("infected"), name="infected", type=UidType("Float"),
states=[UidPort("P:infected_exp.out.infected"),
# UidWire("W:infected.1"), UidWire("W:infected.2"),
# UidPort("P:S_update_exp.in.infected"),
# UidPort("P:I_update_exp.in.infected")
],
metadata=None),
Variable(uid=UidVariable("recovered"), name="recovered", type=UidType("Float"),
states=[UidPort("P:recovered_exp.out.recovered"),
# UidWire("W:recovered.1"), UidWire("W:recovered.2"),
# UidPort("P:I_update_exp.in.recovered"),
# UidPort("P:R_update_exp.in.recovered")
],
metadata=None),
]
wires = [
# Var "S"
Wire(uid=UidWire("W:S1.1"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:sir.in.S"),
tgt=UidPort("P:infected_exp.in.S")),
Wire(uid=UidWire("W:S1.2"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:sir.in.S"),
tgt=UidPort("P:S_update_exp.in.S")),
# Var "I"
Wire(uid=UidWire("W:I1.1"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:sir.in.I"),
tgt=UidPort("P:infected_exp.in.I")),
Wire(uid=UidWire("W:I1.2"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:sir.in.I"),
tgt=UidPort("P:recovered_exp.in.I")),
Wire(uid=UidWire("W:I1.3"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:sir.in.I"),
tgt=UidPort("P:I_update_exp.in.I")),
# Var "R"
Wire(uid=UidWire("W:R1.1"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:sir.in.R"),
tgt=UidPort("P:infected_exp.in.R")),
Wire(uid=UidWire("W:R1.2"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:sir.in.R"),
tgt=UidPort("P:R_update_exp.in.R")),
# Var "beta"
Wire(uid=UidWire("W:beta"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:sir.in.beta"),
tgt=UidPort("P:infected_exp.in.beta")),
# Var "gamma"
Wire(uid=UidWire("W:gamma"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:sir.in.gamma"),
tgt=UidPort("P:recovered_exp.in.gamma")),
# Var "dt"
Wire(uid=UidWire("W:dt.1"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:sir.in.dt"),
tgt=UidPort("P:infected_exp.in.dt")),
Wire(uid=UidWire("W:dt.2"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:sir.in.dt"),
tgt=UidPort("P:recovered_exp.in.dt")),
# Wire for Var "infected"
Wire(uid=UidWire("W:infected.1"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:infected_exp.out.infected"),
tgt=UidPort("P:S_update_exp.in.infected")),
Wire(uid=UidWire("W:infected.2"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:infected_exp.out.infected"),
tgt=UidPort("P:I_update_exp.in.infected")),
# Wire for Var "recovered"
Wire(uid=UidWire("W:recovered.1"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:recovered_exp.out.recovered"),
tgt=UidPort("P:I_update_exp.in.recovered")),
Wire(uid=UidWire("W:recovered.2"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:recovered_exp.out.recovered"),
tgt=UidPort("P:R_update_exp.in.recovered")),
# part of Var "S"
Wire(uid=UidWire("W:S2"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:S_update_exp.out.S"),
tgt=UidPort("P:sir.out.S")),
# part of Var "I"
Wire(uid=UidWire("W:I2"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:I_update_exp.out.I"),
tgt=UidPort("P:sir.out.I")),
# part of Var "R"
Wire(uid=UidWire("W:R2"),
type=None,
value_type=UidType("Float"),
name=None, value=None, metadata=None,
src=UidPort("P:R_update_exp.out.R"),
tgt=UidPort("P:sir.out.R")),
]
ports = [
# The input ports to the 'sir' outer/parent Function
Port(uid=UidPort("P:sir.in.S"), box=UidBox("B:sir"),
type=UidType("PortInput"),
value_type=UidType("Float"),
name="S", value=None,
metadata=[code_s_in]),
Port(uid=UidPort("P:sir.in.I"), box=UidBox("B:sir"),
type=UidType("PortInput"),
value_type=UidType("Float"),
name="I", value=None,
metadata=[code_i_in]),
Port(uid=UidPort("P:sir.in.R"), box=UidBox("B:sir"),
type=UidType("PortInput"),
value_type=UidType("Float"),
name="R", value=None,
metadata=[code_r_in]),
Port(uid=UidPort("P:sir.in.beta"), box=UidBox("B:sir"),
type=UidType("PortInput"),
value_type=UidType("Float"),
name="beta", value=None,
metadata=[code_beta_in]),
Port(uid=UidPort("P:sir.in.gamma"), box=UidBox("B:sir"),
type=UidType("PortInput"),
value_type=UidType("Float"),
name="gamma", value=None,
metadata=[code_gamma_in]),
Port(uid=UidPort("P:sir.in.dt"), box=UidBox("B:sir"),
type=UidType("PortInput"),
value_type=UidType("Float"),
name="dt", value=None,
metadata=[code_dt_in]),
# The output ports to the 'sir' outer/parent Function
Port(uid=UidPort("P:sir.out.S"), box=UidBox("B:sir"),
type=UidType("PortOutput"),
value_type=UidType("Float"),
name="S", value=None,
metadata=[code_s_out]),
Port(uid=UidPort("P:sir.out.I"), box=UidBox("B:sir"),
type=UidType("PortOutput"),
value_type=UidType("Float"),
name="I", value=None,
metadata=[code_i_out]),
Port(uid=UidPort("P:sir.out.R"), box=UidBox("B:sir"),
type=UidType("PortOutput"),
value_type=UidType("Float"),
name="R", value=None,
metadata=[code_r_out]),
# The input ports to the 'infected_exp' anonymous assignment Expression
Port(uid=UidPort("P:infected_exp.in.S"), box=UidBox("B:infected_exp"),
type=UidType("PortInput"),
value_type=UidType("Float"), name="S", value=None, metadata=None),
Port(uid=UidPort("P:infected_exp.in.I"), box=UidBox("B:infected_exp"),
type=UidType("PortInput"),
value_type=UidType("Float"), name="I", value=None, metadata=None),
Port(uid=UidPort("P:infected_exp.in.R"), box=UidBox("B:infected_exp"),
type=UidType("PortInput"),
value_type=UidType("Float"), name="R", value=None, metadata=None),
Port(uid=UidPort("P:infected_exp.in.beta"), box=UidBox("B:infected_exp"),
type=UidType("PortInput"),
value_type=UidType("Float"), name="beta", value=None, metadata=None),
Port(uid=UidPort("P:infected_exp.in.dt"), box=UidBox("B:infected_exp"),
type=UidType("PortInput"),
value_type=UidType("Float"), name="dt", value=None, metadata=None),
# The output ports to the 'infected_exp' anonymous assignment Expression
Port(uid=UidPort("P:infected_exp.out.infected"), box=UidBox("B:infected_exp"),
type=UidType("PortOutput"),
value_type=UidType("Float"), name="infected", value=None,
metadata=[code_infected_id]),
# The input ports to the 'recovered_exp' anonymous assignment Expression
Port(uid=UidPort("P:recovered_exp.in.I"), box=UidBox("B:recovered_exp"),
type=UidType("PortInput"),
value_type=UidType("Float"), name="I", value=None, metadata=None),
Port(uid=UidPort("P:recovered_exp.in.gamma"), box=UidBox("B:recovered_exp"),
type=UidType("PortInput"),
value_type=UidType("Float"), name="gamma", value=None, metadata=None),
Port(uid=UidPort("P:recovered_exp.in.dt"), box=UidBox("B:recovered_exp"),
type=UidType("PortInput"),
value_type=UidType("Float"), name="dt", value=None, metadata=None),
# The output ports to the 'recovered_exp' anonymous assignment Expression
Port(uid=UidPort("P:recovered_exp.out.recovered"), box=UidBox("B:recovered_exp"),
type=UidType("PortOutput"),
value_type=UidType("Float"), name="recovered", value=None,
metadata=[code_recovered_id]),
# The input ports to the 'S_update_exp' anonymous assignment Expression
Port(uid=UidPort("P:S_update_exp.in.S"), box=UidBox("B:S_update_exp"),
type=UidType("PortInput"),
value_type=UidType("Float"), name="S", value=None, metadata=None),
Port(uid=UidPort("P:S_update_exp.in.infected"), box=UidBox("B:S_update_exp"),
type=UidType("PortInput"),
value_type=UidType("Float"), name="infected", value=None, metadata=None),
# The output ports to the 'S_update_exp' anonymous | |
<filename>src/ur10_tests.py<gh_stars>0
# Adapted from ur5_reacher_6d.py
# Copyright (c) 2018, The SenseAct Authors.
# All rights reserved.
import time
import copy
import numpy as np
import sys
import baselines.common.tf_util as U
from multiprocessing import Process, Value, Manager, Queue
from baselines.trpo_mpi.trpo_mpi import learn
from baselines.ppo1.mlp_policy import MlpPolicy
from senseact.envs.ur.reacher_env import ReacherEnv
from senseact.utils import tf_set_seeds, NormalizedEnv
from tensorflow.train import Saver
from tensorflow.saved_model import simple_save, loader
#sys.path.append("/home/oli/SenseAct/examples/advanced")
#from helper import create_callback
from callback import create_callback
from run_policy import run_policy
from senseact.devices.ur import ur_utils
from senseact import utils
import builtins
import csv
import os
# an environment that allows points to be selected on a x_points x y_points x z _points grid within the end effector bounds and tests each point num_test times
class GridTestEnv(ReacherEnv):
def __init__(self,
setup,
host=None,
dof=6,
control_type='position',
derivative_type='none',
reset_type='random',
reward_type='linear',
deriv_action_max=10,
first_deriv_max=10, # used only with second derivative control
vel_penalty=0,
obs_history=1,
actuation_sync_period=1,
episode_length_time=None,
episode_length_step=None,
rllab_box = False,
servoj_t=ur_utils.COMMANDS['SERVOJ']['default']['t'],
servoj_gain=ur_utils.COMMANDS['SERVOJ']['default']['gain'],
speedj_a=ur_utils.COMMANDS['SPEEDJ']['default']['a'],
speedj_t_min=ur_utils.COMMANDS['SPEEDJ']['default']['t_min'],
movej_t=2, # used for resetting
accel_max=None,
speed_max=None,
dt=0.008,
delay=0.0, # to simulate extra delay in the system
x_points=10,
y_points=10,
z_points=10,
num_test=10,
**kwargs):
assert(x_points > 0)
assert(y_points > 0)
assert(num_test > 0)
self._x_points = x_points
self._y_points = y_points
self._z_points = z_points
self._num_test = num_test
self._target_generator_ = self._target_generator_()
super(GridTestEnv, self).__init__(setup=setup,
host=host,
dof=dof,
control_type=control_type,
derivative_type=derivative_type,
target_type='position',
reset_type=reset_type,
reward_type=reward_type,
deriv_action_max=deriv_action_max,
first_deriv_max=first_deriv_max, # used only with second derivative control
vel_penalty=vel_penalty,
obs_history=obs_history,
actuation_sync_period=actuation_sync_period,
episode_length_time=episode_length_time,
episode_length_step=episode_length_step,
rllab_box = rllab_box,
servoj_t=servoj_t,
servoj_gain=servoj_gain,
speedj_a=speedj_a,
speedj_t_min=speedj_t_min,
movej_t=movej_t, # used for resetting
accel_max=accel_max,
speed_max=speed_max,
dt=dt,
delay=delay, # to simulate extra delay in the system
**kwargs)
def _reset_(self):
"""Resets the environment episode.
Moves the arm to either fixed reference or random position and
generates a new target from _target_generator_.
"""
print("Resetting")
x_target = self._target_generator_.__next__()
np.copyto(self._x_target_, x_target)
self._target_ = self._x_target_[self._end_effector_indices]
self._action_ = self._rand_obj_.uniform(self._action_low, self._action_high)
self._cmd_prev_ = np.zeros(len(self._action_low)) # to be used with derivative control of velocity
if self._reset_type != 'none':
if self._reset_type == 'random':
reset_angles, _ = self._pick_random_angles_()
elif self._reset_type == 'zero':
reset_angles = self._q_ref[self._joint_indices]
self._reset_arm(reset_angles)
rand_state_array_type, rand_state_array_size, rand_state_array = utils.get_random_state_array(
self._rand_obj_.get_state()
)
np.copyto(self._shared_rstate_array_, np.frombuffer(rand_state_array, dtype=rand_state_array_type))
print("Reset done")
def _target_generator_(self):
# increments for each dimension
x_inc = (self._end_effector_high[0] - self._end_effector_low[0]) / (self._x_points+1)
y_inc = (self._end_effector_high[1] - self._end_effector_low[1]) / (self._y_points+1)
z_inc = (self._end_effector_high[2] - self._end_effector_low[2]) / (self._z_points+1)
# lists of x, y, z coords
x_points = [self._end_effector_low[0] + x_inc * x_point for x_point in range(1, self._x_points+2)]
y_points = [self._end_effector_low[1] + y_inc * y_point for y_point in range(1, self._y_points+2)]
z_points = [self._end_effector_low[2] + z_inc * z_point for z_point in range(1, self._z_points+2)]
for x in range(self._x_points):
for y in range(self._y_points):
for z in range(self._z_points):
for test in range(self._num_test):
yield x_points[x], y_points[y], z_points[z]
yield 0, 0, 0 # just here so that final callback can happen without program crashing
# callback to use for logging
def grid_test_callback(locals, globals):
shared_returns = globals['__builtins__']['shared_returns']
if locals['iters_so_far'] > 0:
ep_rets = locals['seg']['ep_rets']
ep_lens = locals['seg']['ep_lens']
target = locals['env']._x_target_
if len(ep_rets):
if not shared_returns is None:
shared_returns['write_lock'] = True
shared_returns['episodic_returns'] += ep_rets
shared_returns['episodic_lengths'] += ep_lens
shared_returns['write_lock'] = False
with open('experiment_data/gridtest_trpo03.csv', 'a', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
row = [np.mean(ep_rets), *target]
csvwriter.writerow(row)
# Load a policy from policy_path and runs a grid test on it with x, y, z_points points testing each point num_test times
def run_grid_test(x_points, y_points, z_points, num_test, policy_path):
# use fixed random state
rand_state = np.random.RandomState(1).get_state()
np.random.set_state(rand_state)
tf_set_seeds(np.random.randint(1, 2**31 - 1))
# set up coordination between eps per iteration and num_test
episode_length_time = 4.0
dt = 0.04
timesteps_per_ep = episode_length_time / dt
timesteps_per_batch = int(timesteps_per_ep * num_test)
total_timesteps = timesteps_per_batch * x_points * y_points * z_points
# Create GridTest environment
env = GridTestEnv(
setup="UR10_6dof",
host=None,
dof=6,
control_type="velocity",
reset_type="zero",
reward_type="precision",
derivative_type="none",
deriv_action_max=5,
first_deriv_max=2,
accel_max=1.4, # was 1.4
speed_max=0.3, # was 0.3
speedj_a=1.4,
episode_length_time=episode_length_time,
episode_length_step=None,
actuation_sync_period=1,
dt=dt,
run_mode="multiprocess",
rllab_box=False,
movej_t=2.0,
delay=0.0,
random_state=rand_state,
x_points=x_points,
y_points=y_points,
z_points=z_points,
num_test=num_test
)
env = NormalizedEnv(env)
# Start environment processes
env.start()
# Create baselines TRPO policy function
sess = U.single_threaded_session()
sess.__enter__()
# Create and start plotting process
plot_running = Value('i', 1)
shared_returns = Manager().dict({"write_lock": False,
"episodic_returns": [],
"episodic_lengths": [], })
builtins.shared_returns = shared_returns
# Spawn plotting process
pp = Process(target=plot_ur5_reacher, args=(env, timesteps_per_batch, shared_returns, plot_running))
pp.start()
# Run TRPO policy
run_policy(network='mlp',
num_layers=2, # these are network_kwargs for the MLP network
num_hidden=64,
env=env,
total_timesteps=total_timesteps, #Originally 200,000
timesteps_per_batch=timesteps_per_batch,
callback=grid_test_callback,
load_path=policy_path
)
# Safely terminate plotter process
plot_running.value = 0 # shutdown plotting process
time.sleep(2)
pp.join()
env.close()
def main():
# use fixed random state
rand_state = np.random.RandomState(1).get_state()
np.random.set_state(rand_state)
tf_set_seeds(np.random.randint(1, 2**31 - 1))
# Create UR5 Reacher2D environment
env = ReacherEnv(
setup="UR10_6dof",
host=None,
dof=6,
control_type="velocity",
target_type="position",
reset_type="zero",
reward_type="precision",
derivative_type="none",
deriv_action_max=5,
first_deriv_max=2,
accel_max=1.4, # was 1.4
speed_max=0.3, # was 0.3
speedj_a=1.4,
episode_length_time=4.0,
episode_length_step=None,
actuation_sync_period=1,
dt=0.04,
run_mode="multiprocess",
rllab_box=False,
movej_t=2.0,
delay=0.0,
random_state=rand_state
)
env = NormalizedEnv(env)
# Start environment processes
env.start()
# Create baselines TRPO policy function
sess = U.single_threaded_session()
sess.__enter__()
# Load previously trained model if it exists
# No longer needed
"""def policy_fn(name, ob_space, ac_space):
return MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2)"""
# Create and start plotting process
plot_running = Value('i', 1)
shared_returns = Manager().dict({"write_lock": False,
"episodic_returns": [],
"episodic_lengths": [], })
# Spawn plotting process
pp = Process(target=plot_ur5_reacher, args=(env, 2048, shared_returns, plot_running))
pp.start()
# Create callback function for logging data from baselines TRPO learn
kindred_callback = create_callback(shared_returns)
# Train baselines TRPO
learn(network='mlp',
num_layers=2, # these are network_kwargs for the MLP network
num_hidden=64,
env=env,
total_timesteps=50000, #Originally 200,000
timesteps_per_batch=1000,
max_kl=0.05,
cg_iters=10,
cg_damping=0.1,
vf_iters=5,
vf_stepsize=0.001,
gamma=0.995,
lam=0.995,
callback=kindred_callback,
load_path=None,
save_path='saved_policies/trpo02',
)
# Safely terminate plotter process
plot_running.value = 0 # shutdown ploting process
time.sleep(2)
pp.join()
env.close()
class MovingPointEnv(ReacherEnv):
def __init__(self,
setup,
host=None,
dof=6,
control_type='position',
derivative_type='none',
reset_type='random',
reward_type='linear',
deriv_action_max=10,
first_deriv_max=10, # used only with second derivative control
vel_penalty=0,
obs_history=1,
actuation_sync_period=1,
episode_length_time=None,
episode_length_step=None,
rllab_box = False,
servoj_t=ur_utils.COMMANDS['SERVOJ']['default']['t'],
servoj_gain=ur_utils.COMMANDS['SERVOJ']['default']['gain'],
speedj_a=ur_utils.COMMANDS['SPEEDJ']['default']['a'],
speedj_t_min=ur_utils.COMMANDS['SPEEDJ']['default']['t_min'],
movej_t=2, # used for resetting
accel_max=None,
speed_max=None,
dt=0.008,
delay=0.0, # to simulate extra delay in the system
move_shape='circle', # circle or line
move_vel=0.1, # velocity of moving point in m/s or rad/s
line_midpoint=[0, 0, 0],
line_length=0.5,
line_dir='x', # direction for line to move in
circle_radius=0.3,
circle_plane='xy', # plane which circle is on (xy, yz, xz)
**kwargs):
assert(move_shape == 'circle' or move_shape == 'line')
assert(len(line_midpoint) == 3)
assert(line_length > 0)
assert(circle_radius > 0)
self._move_shape_ = move_shape
self._move_vel_ = move_vel
self._line_length_ = line_length
self._circle_radius_ = circle_radius
# Seems like XYZ points are actually denoted [z, y, x]
dirs = {
'x': 2,
'y': 1,
'z': 0
}
planes = {
'xy': 0,
'xz': 1,
'yz': 2
}
self._line_dir_ = dirs.get(line_dir)
self._circle_plane_ = planes.get(circle_plane)
if(move_shape == 'circle'):
self._move_generator_ = self._circle_generator_(self._circle_plane_)
elif(move_shape == 'line'):
self._move_generator_ = self._line_generator_(self._line_dir_)
super(MovingPointEnv, self).__init__(setup=setup,
host=host,
dof=dof,
control_type=control_type,
derivative_type=derivative_type,
target_type='position',
reset_type=reset_type,
reward_type=reward_type,
deriv_action_max=deriv_action_max,
first_deriv_max=first_deriv_max, # used only with second derivative control
vel_penalty=vel_penalty,
obs_history=obs_history,
actuation_sync_period=actuation_sync_period,
episode_length_time=episode_length_time,
episode_length_step=episode_length_step,
rllab_box = rllab_box,
servoj_t=servoj_t,
servoj_gain=servoj_gain,
speedj_a=speedj_a,
speedj_t_min=speedj_t_min,
movej_t=movej_t, # used for resetting
accel_max=accel_max,
speed_max=speed_max,
dt=dt,
delay=delay, # to simulate extra delay in the system
**kwargs)
self._target_queue_ = Queue()
self._line_midpoint_ = (self._end_effector_high + self._end_effector_low)/2 + np.array(line_midpoint)
self._circle_centrepoint_ = (self._end_effector_high + self._end_effector_low)/2
# overrides start() in rtrl_base_env to allow for queue in process/thread
def start(self):
"""Starts all manager threads and communicator processes."""
self._running = True
# Start the communicator process
for comm in self._all_comms.values():
comm.start()
time.sleep(0.5) # let the communicator buffer have some packets
self._new_obs_time = time.time()
# Create a process/thread to read and write to all communicators
if self._run_mode == 'multithread':
# multithread case we don't need the check, but assigning here
# to keep the polling loop the same
self._parent_pid = os.getppid()
self._polling_loop = Thread(target=self._run_loop_, args=(self._target_queue_, ))
self._polling_loop.start()
elif self._run_mode == 'multiprocess':
self._parent_pid = os.getpid()
self._polling_loop = Process(target=self._run_loop_, args=(self._target_queue_, ))
self._polling_loop.start()
# overrides step function in RTRLBaseEnv to allow for update of target each step
def step(self, action):
"""Optional step function for OpenAI Gym compatibility.
Returns: a tuple (observation, reward, {} ('info', for gym compatibility))
"""
# Set the desired action
self.act(action)
# Update target
self._x_target_ = self._move_generator_.__next__()
self._target_queue_.put(self._x_target_)
#print(self._target_)
# Wait for one time-step
next_obs, reward, done = self.sense_wait()
return next_obs, reward, done, {}
def _line_generator_(self, line_dir):
point = np.zeros(3)
np.copyto(point, self._line_midpoint_)
direction = 1
yield point
while(True):
point[line_dir] += self._move_vel_ * direction * self._dt
if(abs(point[line_dir] - self._line_midpoint_[line_dir]) > self._line_length_/2):
point[line_dir] -= | |
predict_res = predict_once(data_fit, model, input_dim, time_step, predict_step)
predict_res = np.squeeze(predict_res)
step_to_train += predict_step
for i in range(predict_step):
data_list.append(predict_res[i])
step_to_train = step_to_train+step_high
if step_to_train <= low_step:
step_to_train = low_step
return step_to_train
# def step_predict_nnls(data,step_in):
def step_predict_twice(data,model,input_dim,predict_step,time_step,div,top_step,low_step,measure):
pre_list = measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
filename = '%s.json' % measure_s
config = load_config(filename)
# config['high'] = step_high
# config['low'] = step_low
# save_config(config, measure)
#
#
# max_loss = config['loss_max']
step_high = config['high']
max_loss_read = config['loss_max']
data_array = np.array(data).astype(float)
data_array = data_array / max_loss_read
data_use = list(data_array)
fit_step = 0 - time_step - 2*predict_step
data_fit = data_use[fit_step:]
data_list = list(data_fit[:])
data_fit_1 = np.array(data_fit[-time_step:]).astype(float)
data_fit_2 = np.array(data_fit[-1*2*time_step:-time_step]).astype(float)
data_fit_1 = np.reshape(data_fit_1,(1,time_step,input_dim))
data_fit_2 = np.reshape(data_fit_2,(1,time_step,input_dim))
# data = np.reshape(data, (1, time_step, input_dim))
predict_res = predict_once_t(data_fit_1,data_fit_2,model,input_dim,time_step,predict_step)
predict_res = np.squeeze(predict_res)
step_to_train = predict_step
tmp_base = 0 - 3*predict_step
for i in range(predict_step):
data_list.append(predict_res[i])
while True:
print(step_to_train)
if step_to_train + step_high >= top_step:
break
data_div_pre = data_list[tmp_base:]
print(data_div_pre)
data_div_base = []
for i in range(1,3*predict_step):
tmp_div = derivation(data_div_pre[i-1],data_div_pre[i])
data_div_base.append(tmp_div)
der_base = np.mean(data_div_base)
print(der_base)
if der_base < div:
break
data_fit = data_list[fit_step:]
data_list = list(data_fit[:])
data_fit_1 = np.array(data_fit[-time_step:]).astype(float)
data_fit_2 = np.array(data_fit[-1 * 2 * time_step:-time_step]).astype(float)
data_fit_1 = np.reshape(data_fit_1, (1, time_step, input_dim))
data_fit_2 = np.reshape(data_fit_2, (1, time_step, input_dim))
# data = np.reshape(data, (1, time_step, input_dim))
predict_res = predict_once_t(data_fit_1, data_fit_2, model, input_dim, time_step, predict_step)
predict_res = np.squeeze(predict_res)
step_to_train += predict_step
for i in range(predict_step):
data_list.append(predict_res[i])
step_to_train = step_to_train+step_high
if step_to_train <= low_step:
step_to_train = low_step
return step_to_train
def get_ns(v1):
ns_list = []
for i in v1.list_namespace().items:
ns_list.append(i.metadata.name)
return ns_list
def save_config2(config,filename):
config_content = {}
for key,value in config.items():
# if key != 'job' and key != 'ns':
config_content[key] = value
# task_content['task_id'] = tasks['task_id']
fw = open(filename, 'w', encoding='utf-8')
# ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,则会类似\uXXXX的显示数据,设置成False后,就能正常显示
dic_json = json.dumps(config_content, ensure_ascii=False, indent=4) # 字典转成json,字典转成字符串
fw.write(dic_json)
fw.close()
def check_path(name):
#check_path!!!
# train_dir = os.path.join('/tfdata/k8snfs/', name)
train_dir = os.path.join('/tfdata/k8snfs/setad2/', name)
created = False
print(train_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
created = True
return train_dir,created
def step_resource_predict_handle(conn,dictionary,lock,pool_size,connect_try=5,predict_fre=150):
#measure,db="PREDICT",host='192.168.128.10'
aToken = '<KEY>'
aConfiguration = kubernetes.client.Configuration()
aConfiguration.host = "https://192.168.128.10:6443"
aConfiguration.verify_ssl = False
aConfiguration.api_key = {"authorization": "Bearer " + aToken}
aApiClient = kubernetes.client.ApiClient(aConfiguration)
v1 = kubernetes.client.CoreV1Api(aApiClient)
try:
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp + 1
dictionary['running_number'] = tmp
lock.release()
except Exception as e:
print(e)
lock.release()
print("now running number is: %d" % tmp)
influx_client = influxdb.InfluxDBClient(host='192.168.128.10',port=8086,username='admin',password='<PASSWORD>',database="PREDICT")
try_times = 1
legal_pattern = '\w+ \d+'
msg_from_client = conn.recv(4096)
matched = None
while True:
if try_times > connect_try:
break
msg_from_client_str = str(msg_from_client.decode('utf-8'))
print(msg_from_client_str+" "+"try_time: "+str(try_times))
# try_times = try_times + 1
matched = re.match(legal_pattern,msg_from_client_str)
if matched is not None:
break
if not msg_from_client:
break
response = "403 "+"Message-error!"
conn.send(bytes(response, 'utf-8'))
msg_from_client = conn.recv(4096)
try_times = try_times + 1
# msg_from_client_str = str(msg_from_client.decode('utf-8'))
if matched is None:
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
return
print("connect success!")
measure = matched.group()
pre_list = measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
measure_up = pre_list[0] + 'U' + pre_list[-1]
measure_write = pre_list[0]+'W'+pre_list[-1]
lock.acquire()
# lock.release()
tmp_running = dictionary['running_number']
lock.release()
res_pool = pool_size - tmp_running
print("resuming pool size: %d" % res_pool)
response = "400 "+pre_list[0]+" "+pre_list[-1]+" "+str(res_pool)
conn.send(bytes(response,'utf-8'))
catched_job = pre_list[0]
catched_job = catched_job.lower()
if catched_job == 'xce':
aim_ns = 'xception-' + pre_list[-1] + '-' + pre_list[-1]
else:
aim_ns = catched_job + "-" + pre_list[-1] + "-" + pre_list[-1]
print("this is work for %s" % (aim_ns))
try:
# job_con_path = "/tfdata/k8snfs/%s/%s.json" % (aim_ns, aim_ns)
job_con_path = "/tfdata/k8snfs/setad2/%s/%s.json" % (aim_ns, aim_ns)
job_config = load_config(job_con_path)
print("load job config success!!")
# allow path!!!
allow_path = '/tfdata/k8snfs/setad2/%s/%s.json' % (aim_ns, measure_t)
# allow_path = "/tfdata/k8snfs/%s/%s.json" % (aim_ns, measure_t)
except Exception as e:
print(e)
# allow_path2 = "/tfdata/k8snfs/%s/%s_r.json" % (measure_t,measure_t)
allow_p, created = check_path(aim_ns)
print(allow_p)
if created:
allow_read = {}
# allow_readr = {}
allow_read['OK'] = True
allow_read['retry'] = job_config['retry']
save_config2(allow_read,allow_path)
# save_config2(allow_readr,allow_path2)
if not os.path.exists(allow_path):
allow_read = {}
# allow_readr = {}
allow_read['OK'] = True
allow_read['retry'] = job_config['retry']
save_config2(allow_read, allow_path)
ns_list = get_ns(v1)
print(ns_list)
print(aim_ns)
print(aim_ns in ns_list)
ceshi_count = 0
ceshi_in = False
while True:
if ceshi_count > 210:
break
ns_list = get_ns(v1)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
print(write_items[:])
write_now = int(write_items[0]['modulate'])
if aim_ns not in ns_list and (write_now==0):
ceshi_count+=1
time.sleep(2.5)
else:
ceshi_in = True
break
if not ceshi_in:
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
print("namespace created error!")
return
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
print(key)
result_inter = result[key[0]]
result_items = list(result_inter)
print(result_items)
trains_step = int(result_items[0]['training_step'])
tmp_item = dict(result_items[0])
key_tmp = list(tmp_item.keys())
if 'retry' not in key_tmp:
retry_now = int(job_config['retry'])
else:
retry_now = int(result_items[0]['retry'])
allow_read = load_config(allow_path)
print("Reload success!!")
allow_read['retry'] = retry_now
# 'ps_replicas': job.ps_replicas,
# 'worker_replicas': job.worker_replicas
if 'ps' not in key_tmp:
ps_now = int(job_config['ps_replicas'])
else:
ps_now = int(result_items[0]['ps'])
if 'worker' not in key_tmp:
worker_now = int(job_config['worker_replicas'])
else:
worker_now = int(result_items[0]['worker'])
allow_read['worker'] = worker_now
allow_read['ps'] = ps_now
save_config2(allow_read,allow_path)
print("save success!!")
result2 = influx_client.query("select * from " + measure_up + " order by desc limit 1")
key2 = result2.keys()
print(key2)
result_inter2 = result2[key2[0]]
result_items2 = list(result_inter2)
print(result_items2)
retry_top = int(result_items2[0]['retry'])
print(retry_top)
print(type(retry_top))
print(retry_now)
print(type(retry_now))
if retry_top != retry_now:
new_ps = int(result_items2[0]['ps'])
new_worker = int(result_items2[0]['worker'])
trains_step = math.ceil(trains_step*worker_now/new_worker)
allow_read = load_config(allow_path)
allow_read['retry'] = retry_top
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read,allow_path)
print("saved successful!!")
print(trains_step)
modekk = 0
if trains_step <= 200:
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry_top)
},
'fields': {
'training_step': int(trains_step),
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
print(trains_step)
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
# conn.close()
# lock.acquire()
# # lock.release()
# tmp = dictionary['running_number']
# tmp = tmp - 1
# dictionary['running_number'] = tmp
# lock.release()
print("Do not need to predict,return")
modekk = 1
min_steps = math.ceil(trains_step*0.2)
length = math.ceil(min_steps*0.6)
print("Initial Config Success!"+"min_steps:"+str(min_steps))
time_start = time.time()
print("start to load data")
loss,max_loss,modekk_z = load_data(min_steps=min_steps,length=length,measure=measure,first=True)
if not loss:
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
return
# loss_array = normalization(loss,max_loss)
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
result_inter = result[key[0]]
result_items = list(result_inter)
trains_step = int(result_items[0]['training_step'])
step_to_train = trains_step
if trains_step<=200:
modekk_z = 1
if modekk_z!=1:
print("Get data first time")
data_x, data_y, data_twice_x, data_twice_y = make_dataset(loss[:], max_loss, 20, 10, 1)
data_x_lstm = reshape_for_lstm(data_x[:])
# data_y_lstm = reshape_for_lstm(data_y[:])
# data_twice_x_1 = data_twice_x[:,1,:]
# data_twice_x_2 = data_twice_x[:,0,:]
# # data_twice_y = reshape_for_lstm(data_twice_y[:])
# data_twice_x_1_lstm = reshape_for_lstm(data_twice_x_1[:])
# data_twice_x_2_lstm = reshape_for_lstm(data_twice_x_2[:])
print("Make dataset first time")
# model = load_model('save_model/31122019-031018-e10.h5')
if os.path.exists("save_model/%s.h5" % measure_s):
model = load_model('save_model/%s.h5' % measure_s)
else:
model = build_lstm_model(time_step=20, predict_step=10, input_dim=1)
print("Start to train")
history, model = train(x=data_x_lstm, y=data_y, epochs=100, batch_size=64, save_dir='save_model', model=model,
measure=measure)
step_to_train = step_predict(data=loss[:], model=model, input_dim=1, predict_step=10, time_step=20, div=0.01,
top_step=trains_step, low_step=math.ceil(trains_step * 0.5), measure=measure)
else:
step_to_train = trains_step
res1 = influx_client.query("select * from "+measure_up+" order by desc limit 1")
key1 = res1.keys()
res1_inter = res1[key1[0]]
res1_items = list(res1_inter)
retry = int(res1_items[0]['retry'])
allow_read = load_config(allow_path)
retry_now = int(allow_read['retry'])
if retry_now != retry:
new_ps = int(res1_items[0]['ps'])
new_worker = int(res1_items[0]['worker'])
step_to_train = math.ceil(step_to_train*int(allow_read['worker'])/new_worker)
allow_read['retry'] = retry
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read,allow_path)
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry)
},
'fields': {
'training_step': step_to_train,
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
print(step_to_train)
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
print("First prdict cost time: "+str(time.time() - time_start))
iftrain = 0
time_total = 0
if modekk != 1:
modekk = modekk_z
countt00 = 0
# iikk =0
# tmp_panduan_key = -1
while True:
if modekk == 1:
break
# selected_node = select_node(influx_client,measure_s)
res1 = influx_client.query("select * from " + measure_s + | |
<reponame>zipated/src
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: disable=relative-import
"""Generate template values for attributes.
Extends IdlType with property |constructor_type_name|.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import idl_types
from idl_types import inherits_interface
from v8_globals import includes
import v8_types
import v8_utilities
from v8_utilities import (cpp_name_or_partial, capitalize, cpp_name, has_extended_attribute,
has_extended_attribute_value, scoped_name, strip_suffix,
uncapitalize, extended_attribute_value_as_list, is_unforgeable,
is_legacy_interface_type_checking)
def attribute_context(interface, attribute, interfaces):
"""Creates a Jinja template context for an attribute of an interface.
Args:
interface: An interface which |attribute| belongs to
attribute: An attribute to create the context for
interfaces: A dict which maps an interface name to the definition
which can be referred if needed
Returns:
A Jinja template context for |attribute|
"""
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
idl_type.add_includes_for_type(extended_attributes)
if idl_type.enum_values:
includes.add('core/inspector/console_message.h')
# [CheckSecurity]
is_cross_origin = 'CrossOrigin' in extended_attributes
is_check_security_for_receiver = (
has_extended_attribute_value(interface, 'CheckSecurity', 'Receiver') and
is_cross_origin)
is_check_security_for_return_value = (
has_extended_attribute_value(attribute, 'CheckSecurity', 'ReturnValue'))
if is_check_security_for_receiver or is_check_security_for_return_value:
includes.add('bindings/core/v8/binding_security.h')
if is_check_security_for_return_value:
includes.add('core/frame/use_counter.h')
# [CrossOrigin]
if has_extended_attribute_value(attribute, 'CrossOrigin', 'Setter'):
includes.add('bindings/core/v8/v8_cross_origin_setter_info.h')
# [Constructor]
# TODO(yukishiino): Constructors are much like methods although constructors
# are not methods. Constructors must be data-type properties, and we can
# support them as a kind of methods.
constructor_type = idl_type.constructor_type_name if is_constructor_attribute(attribute) else None
# [CEReactions]
is_ce_reactions = 'CEReactions' in extended_attributes
if is_ce_reactions:
includes.add('core/html/custom/ce_reactions_scope.h')
# [CustomElementCallbacks], [Reflect]
is_custom_element_callbacks = 'CustomElementCallbacks' in extended_attributes
is_reflect = 'Reflect' in extended_attributes
if is_custom_element_callbacks or is_reflect:
includes.add('core/html/custom/v0_custom_element_processing_stack.h')
# [PerWorldBindings]
if 'PerWorldBindings' in extended_attributes:
assert idl_type.is_wrapper_type or 'LogActivity' in extended_attributes, '[PerWorldBindings] should only be used with wrapper types: %s.%s' % (interface.name, attribute.name)
# [SaveSameObject]
is_save_same_object = (
'SameObject' in attribute.extended_attributes and
'SaveSameObject' in attribute.extended_attributes)
if is_save_same_object:
includes.add('platform/bindings/v8_private_property.h')
if (base_idl_type == 'EventHandler' and
interface.name in ['Window', 'WorkerGlobalScope'] and
attribute.name == 'onerror'):
includes.add('bindings/core/v8/v8_error_handler.h')
cached_attribute_validation_method = extended_attributes.get('CachedAttribute')
keep_alive_for_gc = is_keep_alive_for_gc(interface, attribute)
if cached_attribute_validation_method or keep_alive_for_gc:
includes.add('platform/bindings/v8_private_property.h')
# [CachedAccessor]
is_cached_accessor = 'CachedAccessor' in extended_attributes
if is_cached_accessor:
includes.add('platform/bindings/v8_private_property.h')
context = {
'activity_logging_world_list_for_getter': v8_utilities.activity_logging_world_list(attribute, 'Getter'), # [ActivityLogging]
'activity_logging_world_list_for_setter': v8_utilities.activity_logging_world_list(attribute, 'Setter'), # [ActivityLogging]
'activity_logging_world_check': v8_utilities.activity_logging_world_check(attribute), # [ActivityLogging]
'cached_attribute_validation_method': cached_attribute_validation_method,
'constructor_type': constructor_type,
'context_enabled_feature_name': v8_utilities.context_enabled_feature_name(attribute),
'cpp_name': cpp_name(attribute),
'cpp_type': idl_type.cpp_type,
'cpp_type_initializer': idl_type.cpp_type_initializer,
'deprecate_as': v8_utilities.deprecate_as(attribute), # [DeprecateAs]
'enum_type': idl_type.enum_type,
'enum_values': idl_type.enum_values,
'exposed_test': v8_utilities.exposed(attribute, interface), # [Exposed]
'getter_has_no_side_effect': has_extended_attribute_value(attribute, 'Affects', 'Nothing'),
'has_cross_origin_getter':
has_extended_attribute_value(attribute, 'CrossOrigin', None) or
has_extended_attribute_value(attribute, 'CrossOrigin', 'Getter'),
'has_cross_origin_setter': has_extended_attribute_value(attribute, 'CrossOrigin', 'Setter'),
'has_custom_getter': has_custom_getter(attribute),
'has_custom_setter': has_custom_setter(attribute),
'has_promise_type': idl_type.name == 'Promise',
'has_setter': has_setter(interface, attribute),
'idl_type': str(idl_type),
'is_cached_accessor': is_cached_accessor,
'is_call_with_execution_context': has_extended_attribute_value(attribute, 'CallWith', 'ExecutionContext'),
'is_call_with_script_state': has_extended_attribute_value(attribute, 'CallWith', 'ScriptState'),
'is_ce_reactions': is_ce_reactions,
'is_check_security_for_receiver': is_check_security_for_receiver,
'is_check_security_for_return_value': is_check_security_for_return_value,
'is_custom_element_callbacks': is_custom_element_callbacks,
# TODO(yukishiino): Make all DOM attributes accessor-type properties.
'is_data_type_property': is_data_type_property(interface, attribute),
'is_getter_raises_exception': # [RaisesException]
'RaisesException' in extended_attributes and
extended_attributes['RaisesException'] in (None, 'Getter'),
'is_keep_alive_for_gc': keep_alive_for_gc,
'is_lenient_this': 'LenientThis' in extended_attributes,
'is_nullable': idl_type.is_nullable,
'is_explicit_nullable': idl_type.is_explicit_nullable,
'is_named_constructor': is_named_constructor_attribute(attribute),
'is_partial_interface_member':
'PartialInterfaceImplementedAs' in extended_attributes,
'is_per_world_bindings': 'PerWorldBindings' in extended_attributes,
'is_put_forwards': 'PutForwards' in extended_attributes,
'is_read_only': attribute.is_read_only,
'is_reflect': is_reflect,
'is_replaceable': 'Replaceable' in attribute.extended_attributes,
'is_save_same_object': is_save_same_object,
'is_static': attribute.is_static,
'is_url': 'URL' in extended_attributes,
'is_unforgeable': is_unforgeable(interface, attribute),
'on_instance': v8_utilities.on_instance(interface, attribute),
'on_interface': v8_utilities.on_interface(interface, attribute),
'on_prototype': v8_utilities.on_prototype(interface, attribute),
'origin_trial_feature_name': v8_utilities.origin_trial_feature_name(attribute), # [OriginTrialEnabled]
'use_output_parameter_for_result': idl_type.use_output_parameter_for_result,
'measure_as': v8_utilities.measure_as(attribute, interface), # [MeasureAs]
'name': attribute.name,
'property_attributes': property_attributes(interface, attribute),
'reflect_empty': extended_attributes.get('ReflectEmpty'),
'reflect_invalid': extended_attributes.get('ReflectInvalid', ''),
'reflect_missing': extended_attributes.get('ReflectMissing'),
'reflect_only': extended_attribute_value_as_list(attribute, 'ReflectOnly'),
'runtime_enabled_feature_name': v8_utilities.runtime_enabled_feature_name(attribute), # [RuntimeEnabled]
'secure_context_test': v8_utilities.secure_context(attribute, interface), # [SecureContext]
'cached_accessor_name': '%s%sCachedAccessor' % (interface.name, attribute.name.capitalize()),
'world_suffixes': (
['', 'ForMainWorld']
if 'PerWorldBindings' in extended_attributes
else ['']), # [PerWorldBindings]
}
if not has_custom_getter(attribute):
getter_context(interface, attribute, context)
if not has_custom_setter(attribute) and has_setter(interface, attribute):
setter_context(interface, attribute, interfaces, context)
# [RuntimeCallStatsCounter]
runtime_call_stats_context(interface, attribute, context)
# [CrossOrigin] is incompatible with a number of other attributes, so check
# for them here.
if is_cross_origin:
if context['has_cross_origin_setter'] and context['has_custom_setter']:
raise Exception('[CrossOrigin] and [Custom] are incompatible on the same setter: %s.%s', interface.name, attribute.name)
if context['is_per_world_bindings']:
raise Exception('[CrossOrigin] and [PerWorldBindings] are incompatible: %s.%s', interface.name, attribute.name)
if context['constructor_type']:
raise Exception('[CrossOrigin] cannot be used for constructors: %s.%s', interface.name, attribute.name)
return context
def runtime_call_stats_context(interface, attribute, context):
includes.add('platform/bindings/runtime_call_stats.h')
generic_counter_name = 'Blink_' + v8_utilities.cpp_name(interface) + '_' + attribute.name
(counter, extended_attribute_defined) = v8_utilities.rcs_counter_name(attribute, generic_counter_name)
runtime_call_stats = {
'extended_attribute_defined': extended_attribute_defined,
'getter_counter': '%s_Getter' % counter,
'setter_counter': '%s_Setter' % counter,
'constructor_getter_callback_counter': '%s_ConstructorGetterCallback' % generic_counter_name,
}
context.update({
'runtime_call_stats': runtime_call_stats
})
def is_origin_trial_enabled(attribute):
return bool(attribute['origin_trial_feature_name'])
def is_secure_context(attribute):
return bool(attribute['secure_context_test'])
def filter_accessors(attributes):
return [attribute for attribute in attributes if
not (attribute['exposed_test'] or
is_secure_context(attribute) or
attribute['context_enabled_feature_name'] or
is_origin_trial_enabled(attribute) or
attribute['runtime_enabled_feature_name']) and
not attribute['is_data_type_property']]
def is_data_attribute(attribute):
return (not (attribute['exposed_test'] or
is_secure_context(attribute) or
attribute['context_enabled_feature_name'] or
is_origin_trial_enabled(attribute) or
attribute['runtime_enabled_feature_name']) and
attribute['is_data_type_property'])
def is_lazy_data_attribute(attribute):
return ((attribute['constructor_type'] and not
(attribute['measure_as'] or attribute['deprecate_as'])) or
(attribute['idl_type'] == 'Window' and attribute['name'] == 'frames') or
(attribute['idl_type'] == 'Window' and attribute['name'] == 'self') or
(attribute['idl_type'] == 'Window' and attribute['name'] == 'window'))
def filter_data_attributes(attributes):
return [attribute for attribute in attributes if is_data_attribute(attribute) and not is_lazy_data_attribute(attribute)]
def filter_lazy_data_attributes(attributes):
return [attribute for attribute in attributes if is_data_attribute(attribute) and is_lazy_data_attribute(attribute)]
def filter_runtime_enabled(attributes):
return [attribute for attribute in attributes if
not (attribute['exposed_test'] or
is_secure_context(attribute)) and
attribute['runtime_enabled_feature_name']]
def filter_conditionally_enabled(attributes):
return [attribute for attribute in attributes if
attribute['exposed_test'] or
(is_secure_context(attribute) and
not is_origin_trial_enabled(attribute))]
################################################################################
# Getter
################################################################################
def getter_context(interface, attribute, context):
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
cpp_value = getter_expression(interface, attribute, context)
# Normally we can inline the function call into the return statement to
# avoid the overhead of using a Ref<> temporary, but for some cases
# (nullable types, EventHandler, [CachedAttribute], or if there are
# exceptions), we need to use a local variable.
# FIXME: check if compilers are smart enough to inline this, and if so,
# always use a local variable (for readability and CG simplicity).
if (idl_type.is_explicit_nullable or
base_idl_type == 'EventHandler' or
'CachedAttribute' in extended_attributes or
'ReflectOnly' in extended_attributes or
context['is_keep_alive_for_gc'] or
context['is_getter_raises_exception']):
context['cpp_value_original'] = cpp_value
cpp_value = 'cppValue'
def v8_set_return_value_statement(for_main_world=False):
if context['is_keep_alive_for_gc'] or 'CachedAttribute' in extended_attributes:
return 'V8SetReturnValue(info, v8Value)'
return idl_type.v8_set_return_value(
cpp_value, extended_attributes=extended_attributes, script_wrappable='impl',
for_main_world=for_main_world, is_static=attribute.is_static)
cpp_value_to_script_wrappable = cpp_value
if idl_type.is_array_buffer_view_or_typed_array:
cpp_value_to_script_wrappable += '.View()'
context.update({
'cpp_value': cpp_value,
'cpp_value_to_script_wrappable': cpp_value_to_script_wrappable,
'cpp_value_to_v8_value': idl_type.cpp_value_to_v8_value(
cpp_value=cpp_value, creation_context='holder',
extended_attributes=extended_attributes),
'v8_set_return_value_for_main_world': v8_set_return_value_statement(for_main_world=True),
'v8_set_return_value': v8_set_return_value_statement(),
})
def getter_expression(interface, attribute, context):
arguments = []
this_getter_base_name = getter_base_name(interface, attribute, arguments)
getter_name = scoped_name(interface, attribute, this_getter_base_name)
arguments.extend(v8_utilities.call_with_arguments(
attribute.extended_attributes.get('CallWith')))
# Members of IDL partial interface definitions are implemented in C++ as
# static member functions, which for instance members (non-static members)
# take *impl as their first argument
if ('PartialInterfaceImplementedAs' in attribute.extended_attributes and
not attribute.is_static):
arguments.append('*impl')
if attribute.idl_type.is_explicit_nullable:
arguments.append('isNull')
if context['is_getter_raises_exception']:
arguments.append('exceptionState')
if attribute.idl_type.use_output_parameter_for_result:
arguments.append('result')
expression = '%s(%s)' % (getter_name, ', '.join(arguments))
# Needed to handle getter expressions returning Type& as the
# use site for |expression| expects Type*.
if (attribute.idl_type.is_interface_type and len(arguments) == 0 and
not attribute.idl_type.is_array_buffer_view_or_typed_array):
return 'WTF::GetPtr(%s)' % expression
return expression
CONTENT_ATTRIBUTE_GETTER_NAMES = {
'boolean': 'FastHasAttribute',
'long': 'GetIntegralAttribute',
'unsigned long': 'GetUnsignedIntegralAttribute',
}
def getter_base_name(interface, attribute, arguments):
extended_attributes = attribute.extended_attributes
if 'Reflect' not in extended_attributes:
name = cpp_name(attribute)
return name if 'ImplementedAs' in extended_attributes \
else uncapitalize(name)
content_attribute_name = extended_attributes['Reflect'] or attribute.name.lower()
if content_attribute_name in ['class', 'id', 'name']:
# Special-case for performance optimization.
return 'Get%sAttribute' % content_attribute_name.capitalize()
arguments.append(scoped_content_attribute_name(interface, attribute))
base_idl_type = attribute.idl_type.base_type
if base_idl_type in CONTENT_ATTRIBUTE_GETTER_NAMES:
| |
"""Objects representing distributions that can be sampled from."""
import collections
import itertools
import random
import math
import typing
import warnings
import numpy
import decorator
from scenic.core.lazy_eval import (LazilyEvaluable,
requiredProperties, needsLazyEvaluation, valueInContext, makeDelayedFunctionCall)
from scenic.core.utils import DefaultIdentityDict, argsToString, areEquivalent, cached, sqrt2
from scenic.core.errors import RuntimeParseError
## Misc
def dependencies(thing):
"""Dependencies which must be sampled before this value."""
return getattr(thing, '_dependencies', ())
def needsSampling(thing):
"""Whether this value requires sampling."""
return isinstance(thing, Distribution) or dependencies(thing)
def supportInterval(thing):
"""Lower and upper bounds on this value, if known."""
if hasattr(thing, 'supportInterval'):
return thing.supportInterval()
elif isinstance(thing, (int, float)):
return thing, thing
else:
return None, None
def underlyingFunction(thing):
"""Original function underlying a distribution wrapper."""
func = getattr(thing, '__wrapped__', thing)
return getattr(func, '__func__', func)
def canUnpackDistributions(func):
"""Whether the function supports iterable unpacking of distributions."""
return getattr(func, '_canUnpackDistributions', False)
def unpacksDistributions(func):
"""Decorator indicating the function supports iterable unpacking of distributions."""
func._canUnpackDistributions = True
return func
class RejectionException(Exception):
"""Exception used to signal that the sample currently being generated must be rejected."""
pass
## Abstract distributions
class Samplable(LazilyEvaluable):
"""Abstract class for values which can be sampled, possibly depending on other values.
Samplables may specify a proxy object which must have the same distribution as the
original after conditioning on the scenario's requirements. This allows transparent
conditioning without modifying Samplable fields of immutable objects.
Args:
dependencies: sequence of values that this value may depend on (formally, objects
for which sampled values must be provided to `sampleGiven`). It is legal to
include values which are not instances of `Samplable`, e.g. integers.
Attributes:
_conditioned: proxy object as described above; set using `conditionTo`.
_dependencies: tuple of other samplables which must be sampled before this one;
set by the initializer and subsequently immutable.
"""
def __init__(self, dependencies):
deps = []
props = set()
for dep in dependencies:
if needsSampling(dep) or needsLazyEvaluation(dep):
deps.append(dep)
props.update(requiredProperties(dep))
super().__init__(props)
self._dependencies = tuple(deps) # fixed order for reproducibility
self._conditioned = self # version (partially) conditioned on requirements
@staticmethod
def sampleAll(quantities):
"""Sample all the given Samplables, which may have dependencies in common.
Reproducibility note: the order in which the quantities are given can affect the
order in which calls to random are made, affecting the final result.
"""
subsamples = DefaultIdentityDict()
for q in quantities:
if q not in subsamples:
subsamples[q] = q.sample(subsamples) if isinstance(q, Samplable) else q
return subsamples
def sample(self, subsamples=None):
"""Sample this value, optionally given some values already sampled."""
if subsamples is None:
subsamples = DefaultIdentityDict()
for child in self._conditioned._dependencies:
if child not in subsamples:
subsamples[child] = child.sample(subsamples)
return self._conditioned.sampleGiven(subsamples)
def sampleGiven(self, value):
"""Sample this value, given values for all its dependencies.
The default implementation simply returns a dictionary of dependency values.
Subclasses must override this method to specify how actual sampling is done.
Args:
value (DefaultIdentityDict): dictionary mapping objects to their sampled
values. Guaranteed to provide values for all objects given in the set of
dependencies when this `Samplable` was created.
"""
return DefaultIdentityDict({ dep: value[dep] for dep in self._dependencies })
def conditionTo(self, value):
"""Condition this value to another value with the same conditional distribution."""
assert isinstance(value, Samplable)
self._conditioned = value
def evaluateIn(self, context):
"""See `LazilyEvaluable.evaluateIn`."""
value = super().evaluateIn(context)
# Check that all dependencies have been evaluated
assert all(not needsLazyEvaluation(dep) for dep in value._dependencies)
return value
def dependencyTree(self):
"""Debugging method to print the dependency tree of a Samplable."""
l = [str(self)]
for dep in dependencies(self):
for line in dep.dependencyTree():
l.append(' ' + line)
return l
class ConstantSamplable(Samplable):
"""A samplable which always evaluates to a constant value.
Only for internal use.
"""
def __init__(self, value):
assert not needsSampling(value)
assert not needsLazyEvaluation(value)
self.value = value
super().__init__(())
def sampleGiven(self, value):
return self.value
class Distribution(Samplable):
"""Abstract class for distributions.
.. note::
When called during dynamic simulations (vs. scenario compilation), constructors
for distributions return *actual sampled values*, not `Distribution` objects.
Args:
dependencies: values which this distribution may depend on (see `Samplable`).
valueType: **_valueType** to use (see below), or `None` for the default.
Attributes:
_valueType: type of the values sampled from this distribution, or `object` if the
type is not known.
"""
#: Default valueType for distributions of this class, when not otherwise specified.
_defaultValueType = object
def __new__(cls, *args, **kwargs):
dist = super().__new__(cls)
# at runtime, return a sample from the distribution immediately
import scenic.syntax.veneer as veneer
if veneer.simulationInProgress():
dist.__init__(*args, **kwargs)
return dist.sample()
else:
return dist
def __init__(self, *dependencies, valueType=None):
super().__init__(dependencies)
if valueType is None:
valueType = self._defaultValueType
self._valueType = valueType
def clone(self):
"""Construct an independent copy of this Distribution."""
raise NotImplementedError('clone() not supported by this distribution')
@property
@cached
def isPrimitive(self):
"""Whether this is a primitive Distribution."""
try:
self.clone()
return True
except NotImplementedError:
return False
def bucket(self, buckets=None):
"""Construct a bucketed approximation of this Distribution.
This function factors a given Distribution into a discrete distribution over
buckets together with a distribution for each bucket. The argument *buckets*
controls how many buckets the domain of the original Distribution is split into.
Since the result is an independent distribution, the original must support
`clone`.
"""
raise NotImplementedError('bucket() not supported by this distribution')
def supportInterval(self):
"""Compute lower and upper bounds on the value of this Distribution.
By default returns :samp:`(None, None)` indicating that no lower or upper bounds
are known. Subclasses may override this method to provide more accurate results.
"""
return None, None
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'): # ignore special attributes
return object.__getattribute__(self, name)
return AttributeDistribution(name, self)
def __call__(self, *args):
return OperatorDistribution('__call__', self, args)
def __iter__(self):
raise TypeError(f'distribution {self} is not iterable')
def _comparisonError(self, other):
raise RuntimeParseError('random values cannot be compared '
'(and control flow cannot depend on them)')
__lt__ = _comparisonError
__le__ = _comparisonError
__gt__ = _comparisonError
__ge__ = _comparisonError
__eq__ = _comparisonError
__ne__ = _comparisonError
def __hash__(self): # need to explicitly define since we overrode __eq__
return id(self)
def __len__(self):
raise RuntimeParseError('cannot take the len of a random value')
def __bool__(self):
raise RuntimeParseError('control flow cannot depend on a random value')
## Derived distributions
class CustomDistribution(Distribution):
"""Distribution with a custom sampler given by an arbitrary function"""
def __init__(self, sampler, *dependencies, name='CustomDistribution', evaluator=None):
super().__init__(*dependencies)
self.sampler = sampler
self.name = name
self.evaluator = evaluator
def sampleGiven(self, value):
return self.sampler(value)
def evaluateInner(self, context):
if self.evaluator is None:
raise NotImplementedError('evaluateIn() not supported by this distribution')
return self.evaluator(self, context)
def isEquivalentTo(self, other):
if not type(other) is CustomDistribution:
return False
return (self.sampler == other.sampler
and self.name == other.name
and self.evaluator == other.evaluator)
def __str__(self):
return f'{self.name}{argsToString(self.dependencies)}'
class TupleDistribution(Distribution, collections.abc.Sequence):
"""Distributions over tuples (or namedtuples, or lists)."""
def __init__(self, *coordinates, builder=tuple):
super().__init__(*coordinates)
self.coordinates = coordinates
self.builder = builder
def __len__(self):
return len(self.coordinates)
def __getitem__(self, index):
return self.coordinates[index]
def __iter__(self):
yield from self.coordinates
def sampleGiven(self, value):
return self.builder(value[coordinate] for coordinate in self.coordinates)
def evaluateInner(self, context):
coordinates = (valueInContext(coord, context) for coord in self.coordinates)
return TupleDistribution(*coordinates, builder=self.builder)
def isEquivalentTo(self, other):
if not type(other) is TupleDistribution:
return False
return (areEquivalent(self.coordinates, other.coordinates)
and self.builder == other.builder)
def __str__(self):
coords = ', '.join(str(c) for c in self.coordinates)
return f'({coords}, builder={self.builder})'
def toDistribution(val):
"""Wrap Python data types with Distributions, if necessary.
For example, tuples containing Samplables need to be converted into TupleDistributions
in order to keep track of dependencies properly.
"""
if isinstance(val, (tuple, list)):
coords = [toDistribution(c) for c in val]
if any(needsSampling(c) or needsLazyEvaluation(c) for c in coords):
if isinstance(val, tuple) and hasattr(val, '_fields'): # namedtuple
builder = type(val)._make
else:
builder = type(val)
return TupleDistribution(*coords, builder=builder)
return val
class FunctionDistribution(Distribution):
"""Distribution resulting from passing distributions to a function"""
def __init__(self, func, args, kwargs, support=None, valueType=None):
args = tuple(toDistribution(arg) for arg in args)
kwargs = { name: toDistribution(arg) for name, arg in kwargs.items() }
if valueType is None:
valueType = typing.get_type_hints(func).get('return')
super().__init__(*args, *kwargs.values(), valueType=valueType)
self.function = func
self.arguments = args
self.kwargs = kwargs
self.support = support
def sampleGiven(self, value):
args = []
for arg in self.arguments:
if isinstance(arg, StarredDistribution):
val = value[arg]
try:
iter(val)
except TypeError: # TODO improve backtrace
raise TypeError(f"'{type(val).__name__}' object on line {arg.lineno} "
"is not iterable") from None
args.extend(val)
else:
args.append(value[arg])
kwargs = { name: value[arg] for name, arg in self.kwargs.items() }
return self.function(*args, **kwargs)
def evaluateInner(self, context):
function = valueInContext(self.function, context)
arguments = tuple(valueInContext(arg, context) for arg in self.arguments)
kwargs = { name: valueInContext(arg, context) for name, arg in self.kwargs.items() }
return FunctionDistribution(function, arguments, kwargs)
def supportInterval(self):
if self.support is None:
return None, None
subsupports = (supportInterval(arg) for arg in self.arguments)
kwss = { name: supportInterval(arg) for name, arg in self.kwargs.items() }
return self.support(*subsupports, **kwss)
def isEquivalentTo(self, other):
if not type(other) is FunctionDistribution:
return False
return (self.function == other.function
and areEquivalent(self.arguments, other.arguments)
and areEquivalent(self.kwargs, other.kwargs)
and self.support == other.support)
def __str__(self):
args = argsToString(itertools.chain(self.arguments, self.kwargs.items()))
return f'{self.function.__name__}{args}'
def distributionFunction(wrapped=None, *, support=None, valueType=None):
"""Decorator for wrapping a function so that it can take distributions as arguments."""
if wrapped is None: # written without arguments as @distributionFunction
return lambda wrapped: distributionFunction(wrapped,
support=support, valueType=valueType)
def helper(wrapped, *args, **kwargs):
args = tuple(toDistribution(arg) for arg in args)
kwargs = { name: toDistribution(arg) for name, arg in kwargs.items() }
if any(needsSampling(arg) for arg in itertools.chain(args, kwargs.values())):
return FunctionDistribution(wrapped, args, kwargs, support, valueType)
elif any(needsLazyEvaluation(arg)
for arg in itertools.chain(args, kwargs.values())):
# recursively call this helper (not the original function), since the
# delayed arguments may evaluate to distributions, in which case we'll
# have to make a FunctionDistribution
return makeDelayedFunctionCall(helper, (wrapped,) + args, kwargs)
else:
return wrapped(*args, **kwargs)
return unpacksDistributions(decorator.decorate(wrapped, helper, kwsyntax=True))
def monotonicDistributionFunction(method, valueType=None):
"""Like distributionFunction, but additionally specifies that the function is monotonic."""
def support(*subsupports, **kwss):
mins, maxes = zip(*subsupports)
kwmins = { name: interval[0] for name, interval in kwss.items() }
kwmaxes = { name: interval[1] for name, interval in kwss.items() }
l = None if None in mins or None in kwmins else method(*mins, **kwmins)
r = None if None in maxes or None in kwmaxes else method(*maxes, **kwmaxes)
return l, r
return distributionFunction(method, support=support, valueType=valueType)
class StarredDistribution(Distribution):
"""A placeholder for the iterable unpacking operator * applied to a distribution."""
def __init__(self, value, lineno):
assert isinstance(value, Distribution)
self.value = value
self.lineno = lineno # for error handling when unpacking fails
super().__init__(value, valueType=value._valueType)
def | |
<filename>theme/views.py
from json import dumps
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login as auth_login
from django.views.generic import TemplateView
from django.http import HttpResponse
from django.shortcuts import redirect
from django.contrib.messages import info
from django.utils.translation import ugettext_lazy as _
from django.db.models import Q
from django.db import transaction
from django.http import HttpResponseRedirect
from django.contrib import messages
from django.core.mail import send_mail
from django.utils.http import int_to_base36
from django.template.response import TemplateResponse
from mezzanine.conf import settings
from mezzanine.generic.views import initial_validation
from mezzanine.utils.views import set_cookie, is_spam
from mezzanine.utils.cache import add_cache_bypass
from mezzanine.utils.email import send_verification_mail, send_approve_mail, subject_template, \
default_token_generator, send_mail_template
from mezzanine.utils.urls import login_redirect, next_url
from mezzanine.accounts.forms import LoginForm
from mezzanine.utils.views import render
from hs_core.views.utils import run_ssh_command
from hs_access_control.models import GroupMembershipRequest
from theme.forms import RatingForm, UserProfileForm, UserForm
from theme.models import UserProfile
from theme.utils import get_quota_message
from .forms import SignupForm
class UserProfileView(TemplateView):
template_name='accounts/profile.html'
def get_context_data(self, **kwargs):
if 'user' in kwargs:
try:
u = User.objects.get(pk=int(kwargs['user']))
except:
u = User.objects.get(username=kwargs['user'])
else:
try:
u = User.objects.get(pk=int(self.request.GET['user']))
except:
u = User.objects.get(username=self.request.GET['user'])
# get all resources the profile user owns
resources = u.uaccess.owned_resources
# get a list of groupmembershiprequests
group_membership_requests = GroupMembershipRequest.objects.filter(invitation_to=u).all()
# if requesting user is not the profile user, then show only resources that the requesting user has access
if self.request.user != u:
if self.request.user.is_authenticated():
if self.request.user.is_superuser:
# admin can see all resources owned by profile user
pass
else:
# filter out any resources the requesting user doesn't have access
resources = resources.filter(Q(pk__in=self.request.user.uaccess.view_resources) |
Q(raccess__public=True) | Q(raccess__discoverable=True))
else:
# for anonymous requesting user show only resources that are either public or discoverable
resources = resources.filter(Q(raccess__public=True) | Q(raccess__discoverable=True))
return {
'profile_user': u,
'resources': resources,
'quota_message': get_quota_message(u),
'group_membership_requests': group_membership_requests,
}
# added by <NAME> to address issue #186 to customize Mezzanine-based rating form and view
def rating(request):
"""
Handle a ``RatingForm`` submission and redirect back to its
related object.
"""
response = initial_validation(request, "rating")
if isinstance(response, HttpResponse):
return response
obj, post_data = response
url = add_cache_bypass(obj.get_absolute_url().split("#")[0])
response = redirect(url + "#rating-%s" % obj.id)
resource_mode = post_data.get('resource-mode', 'view')
request.session['resource-mode'] = resource_mode
rating_form = RatingForm(request, obj, post_data)
if rating_form.is_valid():
rating_form.save()
if request.is_ajax():
# Reload the object and return the rating fields as json.
obj = obj.__class__.objects.get(id=obj.id)
rating_name = obj.get_ratingfield_name()
json = {}
for f in ("average", "count", "sum"):
json["rating_" + f] = getattr(obj, "%s_%s" % (rating_name, f))
response = HttpResponse(dumps(json))
ratings = ",".join(rating_form.previous + [rating_form.current])
set_cookie(response, "mezzanine-rating", ratings)
return response
def signup(request, template="accounts/account_signup.html", extra_context=None):
"""
Signup form. Overriding mezzanine's view function for signup submit
"""
form = SignupForm(request, request.POST, request.FILES)
if request.method == "POST" and form.is_valid():
try:
new_user = form.save()
except ValidationError as e:
messages.error(request, e.message)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
if not new_user.is_active:
if settings.ACCOUNTS_APPROVAL_REQUIRED:
send_approve_mail(request, new_user)
info(request, _("Thanks for signing up! You'll receive "
"an email when your account is activated."))
else:
send_verification_mail(request, new_user, "signup_verify")
info(request, _("A verification email has been sent with "
"a link for activating your account. If you "
"do not receive this email please check your "
"spam folder as sometimes the confirmation email "
"gets flagged as spam. If you entered an incorrect "
"email address, please request an account again."))
return redirect(next_url(request) or "/")
else:
info(request, _("Successfully signed up"))
auth_login(request, new_user)
return login_redirect(request)
# remove the key 'response' from errors as the user would have no idea what it means
form.errors.pop('response', None)
messages.error(request, form.errors)
# TODO: User entered data could be retained only if the following
# render function would work without messing up the css
# context = {
# "form": form,
# "title": _("Sign up"),
# }
# context.update(extra_context or {})
# return render(request, template, context)
# This one keeps the css but not able to retained user entered data.
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@login_required
def update_user_profile(request):
user = request.user
old_email = user.email
user_form = UserForm(request.POST, instance=user)
user_profile = UserProfile.objects.filter(user=user).first()
profile_form = UserProfileForm(request.POST, request.FILES, instance=user_profile)
try:
with transaction.atomic():
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
password1 = request.POST['<PASSWORD>']
password2 = request.POST['<PASSWORD>']
if len(password1) > 0:
if password1 == <PASSWORD>:
user.set_password(<PASSWORD>)
user.save()
else:
raise ValidationError("Passwords do not match.")
profile = profile_form.save(commit=False)
profile.user = request.user
profile.save()
messages.success(request, "Your profile has been successfully updated.")
# if email was updated, reset to old email and send confirmation
# email to the user's new email - email will be updated upon confirmation
if old_email != user.email:
new_email = user.email
user.email = old_email
user.save()
# send a confirmation email to the new email address
send_verification_mail_for_email_update(request, user, new_email, "email_verify")
info(request, _("A verification email has been sent to your new email with "
"a link for updating your email. If you "
"do not receive this email please check your "
"spam folder as sometimes the confirmation email "
"gets flagged as spam. If you entered an incorrect "
"email address, please request email update again. "
))
# send an email to the old address notifying the email change
message = """Dear {first_name}
<p>{c_name} received a request to change the email address associated with
{s_name} account {user_name} from {user_email} to {new_email}. You are receiving this email to the old
email address as a precaution. If this is correct you may ignore this email
and click on the link in the email sent to the new address to confirm this change.</p>
<p>If you did not originate this request, there is a danger someone else has
accessed your account. You should log into {s_name}, change your password,
and set the email address to the correct address. If you are unable to do this
contact {support}.
<p>Thank you</p>
<p>The {s_name} Team</p>
""".format(first_name=user.first_name,
user_name=user.username,
user_email=user.email,
new_email=new_email,
c_name=settings.XDCI_SITE_NAME_CAPS,
s_name=settings.XDCI_SITE_NAME_MIXED,
support=settings.XDCI_SUPPORT_EMAIL)
subject = "Change of {s_name} email address.".format(s_name=settings.XDCI_SITE_NAME_MIXED)
send_mail(subject=subject,
message=message,
html_message=message,
from_email= settings.XDCI_FROM_EMAIL, recipient_list=[old_email], fail_silently=True)
else:
errors = {}
if not user_form.is_valid():
errors.update(user_form.errors)
if not profile_form.is_valid():
errors.update(profile_form.errors)
msg = ' '.join([err[0] for err in errors.values()])
messages.error(request, msg)
except Exception as ex:
messages.error(request, ex.message)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def send_verification_mail_for_email_update(request, user, new_email, verification_type):
"""
Sends an email with a verification link to users when
they update their email. The email is sent to the new email.
The actual update of the email happens only after
the verification link is clicked.
The ``verification_type`` arg is both the name of the urlpattern for
the verification link, as well as the names of the email templates
to use.
"""
verify_url = reverse(verification_type, kwargs={
"uidb36": int_to_base36(user.id),
"token": default_token_generator.make_token(user),
"new_email": new_email
}) + "?next=" + (next_url(request) or "/")
context = {
"request": request,
"user": user,
"new_email": new_email,
"verify_url": verify_url,
}
subject_template_name = "email/%s_subject.txt" % verification_type
subject = subject_template(subject_template_name, context)
send_mail_template(subject, "email/%s" % verification_type,
settings.DEFAULT_FROM_EMAIL, new_email,
context=context)
def login(request, template="accounts/account_login.html",
form_class=LoginForm, extra_context=None):
"""
Login form - customized from Mezzanine login form so that quota warning message can be
displayed when the user is logged in.
"""
form = form_class(request.POST or None)
if request.method == "POST" and form.is_valid():
login_msg = "Successfully logged in"
authenticated_user = form.save()
# Comment out for now to hide quota info message until backend script is hooked up
# add_msg = get_quota_message(authenticated_user)
# if add_msg:
# login_msg += add_msg
info(request, _(login_msg))
auth_login(request, authenticated_user)
return login_redirect(request)
context = {"form": form, "title": _("Log in")}
context.update(extra_context or {})
return TemplateResponse(request, template, context)
def email_verify(request, new_email, uidb36=None, token=None):
"""
View for the link in the verification email sent to a user
when they update their email as part of profile update.
User email is set to new_email and logs them in,
redirecting to the URL they tried to access profile.
"""
user = authenticate(uidb36=uidb36, token=token, is_active=True)
if user is not None:
user.email = new_email
user.save()
auth_login(request, user)
messages.info(request, _("Successfully updated email"))
# redirect to user profile page
return HttpResponseRedirect('/user/{}/'.format(user.id))
else:
messages.error(request, _("The link you clicked is no longer valid."))
return redirect("/")
@login_required
def deactivate_user(request):
user = request.user
user.is_active = False
user.save()
messages.success(request, "Your account has been successfully deactivated.")
return HttpResponseRedirect('/accounts/logout/')
@login_required
def delete_irods_account(request):
if request.method == 'POST':
user = request.user
try:
exec_cmd = "{0} {1}".format(settings.HS_USER_ZONE_PROXY_USER_DELETE_USER_CMD, user.username)
output = run_ssh_command(host=settings.HS_USER_ZONE_HOST, uname=settings.HS_USER_ZONE_PROXY_USER, pwd=settings.HS_USER_ZONE_PROXY_USER_PWD,
exec_cmd=exec_cmd)
if output:
if 'ERROR:' in output.upper():
# there is an error from icommand run, report the error
return HttpResponse(
dumps({"error": 'iRODS server failed to delete this iRODS account {0}. Check the server log for details.'.format(user.username)}),
| |
fr.write('}\n')
except KeyError:
pass
fr.write('}\n')
fr.close() # close .tf file
if cde:
with open(rfilename) as f:
print f.read()
tfrm.write('terraform state rm '+tfp+'.'+rg+'__'+rname + '\n')
tfim.write('echo "importing ' + str(i) + ' of ' + str(count-1) + '"' + '\n')
tfcomm='terraform import '+tfp+'.'+rg+'__'+rname+' '+id+'\n'
tfim.write(tfcomm)
# end for i loop
tfrm.close()
tfim.close()
#end stub
#
# azurerm_function_app
#
# azurerm_function_app
def azurerm_function_app(crf,cde,crg,headers,requests,sub,json,az2tfmess):
tfp="azurerm_function_app"
tcode="620-"
azr=""
if crf in tfp:
# REST or cli
# print "REST Function App"
url="https://management.azure.com/subscriptions/" + sub + "/providers/Microsoft.Web/sites"
params = {'api-version': '2018-02-01'}
r = requests.get(url, headers=headers, params=params)
azr= r.json()["value"]
tfrmf=tcode+tfp+"-staterm.sh"
tfimf=tcode+tfp+"-stateimp.sh"
tfrm=open(tfrmf, 'a')
tfim=open(tfimf, 'a')
print "# " + tfp,
count=len(azr)
print count
for i in range(0, count):
kind=azr[i]["kind"]
if kind != "functionapp": continue
name=azr[i]["name"]
loc=azr[i]["location"]
id=azr[i]["id"]
rg=id.split("/")[4].replace(".","-").lower()
rgs=id.split("/")[4]
if crg is not None:
if rgs.lower() != crg.lower():
continue # back to for
if cde:
print(json.dumps(azr[i], indent=4, separators=(',', ': ')))
rname=name.replace(".","-")
prefix=tfp+"."+rg+'__'+rname
#print prefix
rfilename=prefix+".tf"
fr=open(rfilename, 'w')
fr.write(az2tfmess)
fr.write('resource ' + tfp + ' ' + rg + '__' + rname + ' {\n')
fr.write('\t name = "' + name + '"\n')
fr.write('\t location = "'+ loc + '"\n')
fr.write('\t resource_group_name = "'+ rgs + '"\n')
https=azr[i]["properties"]["httpsOnly"]
#prg=azr[i]["properties"]["serverFarmId"].split("/")[4].lower()
#pnam=azr[i]["properties"]["serverFarmId"].split("/")[8]
appplid=azr[i]["properties"]["serverFarmId"]
# case issues - so use resource id directly
# fr.write('\t app_service_plan_id = "${azurerm_app_service_plan. + '__' + .id}'"' prg pnam + '"\n')
fr.write('\t app_service_plan_id = "' + appplid + '"\n')
# dummy entry
fr.write('\t https_only = ' + str(https).lower() + '\n')
blog=False
strcon=""
url="https://management.azure.com/" + id + "/config/appsettings/list"
#print url
params = {'api-version': '2018-02-01'}
r = requests.post(url, headers=headers, params=params)
appset= r.json()
#print(json.dumps(appset, indent=4, separators=(',', ': ')))
fr.write('\t app_settings = { \n')
try:
strcon=appset["properties"]["AzureWebJobsStorage"]
except KeyError:
pass
try:
vers=appset["properties"]["FUNCTIONS_EXTENSION_VERSION"]
except KeyError:
pass
try:
aval=appset["properties"]["WEBSITE_NODE_DEFAULT_VERSION"]
fr.write('\t WEBSITE_NODE_DEFAULT_VERSION = "' + aval + '"\n')
except KeyError:
pass
try:
aval=appset["properties"]["FUNCTIONS_WORKER_RUNTIME"]
fr.write('\t FUNCTIONS_WORKER_RUNTIME = "' + aval + '"\n')
except KeyError:
pass
try:
aval=appset["properties"]["APPINSIGHTS_INSTRUMENTATIONKEY"]
fr.write('\t APPINSIGHTS_INSTRUMENTATIONKEY = "' + aval + '"\n')
except KeyError:
pass
try:
aval=appset["properties"]["mykey"]
fr.write('\t mykey = "' + aval + '"\n')
except KeyError:
pass
try:
aval=appset["properties"]["myten"]
fr.write('\t myten = "' + aval + '"\n')
except KeyError:
pass
try:
aval=appset["properties"]["usern"]
fr.write('\t usern = "' + aval + '"\n')
except KeyError:
pass
#if aname == "WEBSITE_CONTENTSHARE" or aname == "WEBSITE_CONTENTAZUREFILECONNECTIONSTRING":
try:
aval=appset["properties"]["AzureWebJobsDashboard"]
if len(aval) > 3:
blog=True
except KeyError:
pass
fr.write('\t }' + '\n')
if len(strcon) >= 3 :
fr.write('\t storage_connection_string = "' + strcon + '" \n')
else:
fr.write('\t storage_connection_string = ""\n')
fr.write('\t version = "' + vers + '"\n')
fr.write('\t enable_builtin_logging = ' + str(blog).lower() + '\n')
# tags block
try:
mtags=azr[i]["tags"]
fr.write('tags = { \n')
for key in mtags.keys():
tval=mtags[key]
fr.write('\t "' + key + '"="' + tval + '"\n')
fr.write('}\n')
except KeyError:
pass
fr.write('}\n')
fr.close() # close .tf file
if cde:
with open(rfilename) as f:
print f.read()
tfrm.write('terraform state rm '+tfp+'.'+rg+'__'+rname + '\n')
tfim.write('echo "importing ' + str(i) + ' of ' + str(count-1) + '"' + '\n')
tfcomm='terraform import '+tfp+'.'+rg+'__'+rname+' '+id+'\n'
tfim.write(tfcomm)
# end for i loop
tfrm.close()
tfim.close()
#end stub
#
# azurerm_logic_app_workflow
#
# azurerm_dns_zone
def azurerm_logic_app_workflow(crf,cde,crg,headers,requests,sub,json,az2tfmess):
tfp="azurerm_logic_app_workflow"
tcode="630-"
azr=""
#cde=False
if crf in tfp:
# REST or cli
# print "REST Managed Disk"
url="https://management.azure.com/subscriptions/" + sub + "/providers/Microsoft.Logic/workflows"
#params = {'api-version': '2016-04-01'}
params = {'api-version': '2016-06-01'}
r = requests.get(url, headers=headers, params=params)
azr= r.json()["value"]
tfrmf=tcode+tfp+"-staterm.sh"
tfimf=tcode+tfp+"-stateimp.sh"
tfrm=open(tfrmf, 'a')
tfim=open(tfimf, 'a')
print "# " + tfp,
count=len(azr)
print count
for i in range(0, count):
name=azr[i]["name"]
loc=azr[i]["location"]
id=azr[i]["id"]
rg=id.split("/")[4].replace(".","-").lower()
rgs=id.split("/")[4]
if crg is not None:
if rgs.lower() != crg.lower():
continue # back to for
if cde:
print(json.dumps(azr[i], indent=4, separators=(',', ': ')))
rname=name.replace(".","-")
prefix=tfp+"."+rg+'__'+rname
#print prefix
rfilename=prefix+".tf"
fr=open(rfilename, 'w')
fr.write(az2tfmess)
fr.write('resource ' + tfp + ' ' + rg + '__' + rname + ' {\n')
fr.write('\t name = "' + name + '"\n')
fr.write('\t location = "'+ loc + '"\n')
fr.write('\t resource_group_name = "'+ rgs + '"\n')
###############
# specific code start
###############
try:
params=azr[i]["properties"]["definition"]["parameters"]
#print params
lp=len(params)
if lp > 0:
fr.write('parameters = { \n')
fr.write('"$connections" = "" \n')
#fr.write(json.dumps(params))
fr.write('}\n')
except KeyError:
pass
fr.write('}\n')
fr.close() # close .tf file
if cde:
with open(rfilename) as f:
print f.read()
tfrm.write('terraform state rm '+tfp+'.'+rg+'__'+rname + '\n')
tfim.write('echo "importing ' + str(i) + ' of ' + str(count-1) + '"' + '\n')
tfcomm='terraform import '+tfp+'.'+rg+'__'+rname+' '+id+'\n'
tfim.write(tfcomm)
# end for i loop
tfrm.close()
tfim.close()
#end stub
#
# azurerm_logic_app_trigger_http_request
#
# azurerm_dns_zone
def azurerm_logic_app_trigger_http_request(crf,cde,crg,headers,requests,sub,json,az2tfmess):
tfp="azurerm_logic_app_trigger_http_request"
tcode="631-"
azr=""
#cde=False
if crf in tfp:
# REST or cli
# print "REST Managed Disk"
url="https://management.azure.com/subscriptions/" + sub + "/providers/Microsoft.Logic/workflows"
#params = {'api-version': '2016-04-01'}
params = {'api-version': '2016-06-01'}
r = requests.get(url, headers=headers, params=params)
azr= r.json()["value"]
tfrmf=tcode+tfp+"-staterm.sh"
tfimf=tcode+tfp+"-stateimp.sh"
tfrm=open(tfrmf, 'a')
tfim=open(tfimf, 'a')
print "# " + tfp,
count=len(azr)
print count
for i in range(0, count):
try:
ttype=azr[i]["properties"]["definition"]["triggers"]["manual"]["kind"]
if ttype != "Http":
continue
name=azr[i]["name"]
loc=azr[i]["location"]
id=azr[i]["id"]
rg=id.split("/")[4].replace(".","-").lower()
rgs=id.split("/")[4]
if crg is not None:
if rgs.lower() != crg.lower():
continue # back to for
if cde:
print(json.dumps(azr[i], indent=4, separators=(',', ': ')))
rname=name.replace(".","-")
prefix=tfp+"."+rg+'__'+rname
#print prefix
rfilename=prefix+".tf"
fr=open(rfilename, 'w')
fr.write(az2tfmess)
fr.write('resource ' + tfp + ' ' + rg + '__' + rname + ' {\n')
fr.write('\t name = "' + name + '"\n')
fr.write('\t logic_app_id = "${azurerm_logic_app_workflow.' + rg + '__' + rname + '.id}"' + '\n')
###############
# specific code start
###############
try:
params=azr[i]["properties"]["definition"]["triggers"]["manual"]["inputs"]["schema"]
#print(json.dumps(params, indent=4, separators=(',', ': ')))
fr.write('schema = jsonencode(\n')
fr.write(json.dumps(params, indent=4, separators=(',', ': ')))
fr.write(')\n')
except KeyError:
pass
fr.write('}\n')
fr.close() # close .tf file
if cde:
with open(rfilename) as f:
print f.read()
tfrm.write('terraform state rm '+tfp+'.'+rg+'__'+rname + '\n')
tfim.write('echo "importing ' + str(i) + ' of ' + str(count-1) + '"' + '\n')
tfcomm='terraform import '+tfp+'.'+rg+'__'+rname+' '+id+'/triggers/' + name +'\n'
tfim.write(tfcomm)
except KeyError:
pass
# end for i loop
tfrm.close()
tfim.close()
#end stub
#
# azurerm_monitor_autoscale_setting
#
# azurerm_monitor_autoscale_setting
import ast
def azurerm_monitor_autoscale_setting(crf, cde, crg, headers, requests, sub, json, az2tfmess):
tfp = "azurerm_monitor_autoscale_setting"
tcode = "650-"
azr = ""
if crf in tfp:
# REST or cli
# print "REST monitor autoscale"
url = "https://management.azure.com/subscriptions/" + \
sub + "/providers/microsoft.insights/autoscalesettings"
params = {'api-version': '2015-04-01'}
r = requests.get(url, headers=headers, params=params)
azr = r.json()["value"]
tfrmf = tcode+tfp+"-staterm.sh"
tfimf = tcode+tfp+"-stateimp.sh"
tfrm = open(tfrmf, 'a')
tfim = open(tfimf, 'a')
print "# " + tfp,
count = len(azr)
print count
for i in range(0, count):
name = azr[i]["name"]
loc = azr[i]["location"]
id = azr[i]["id"]
rg = id.split("/")[4].replace(".", "-").lower()
rgs = id.split("/")[4]
if crg is not None:
if rgs.lower() != crg.lower():
continue # back to for
if cde:
print(json.dumps(azr[i], indent=4, separators=(',', ': ')))
rname = name.replace(".", "-")
prefix = tfp+"."+rg+'__'+rname
#print prefix
rfilename = prefix+".tf"
fr = open(rfilename, 'w')
fr.write(az2tfmess)
fr.write('resource ' + tfp + ' ' + rg + '__' + rname + ' {\n')
fr.write('name = "' + name + '"\n')
fr.write('location = "' + loc + '"\n')
fr.write('resource_group_name = "' + rgs + '"\n')
en = azr[i]["properties"]["enabled"]
# basic settings
fr.write('enabled = ' + str(en).lower() + '\n')
try:
triid = azr[i]["properties"]["targetResourceUri"]
parts = triid.split("/")
#print "parts=" + str(len(parts))
trrg = azr[i]["properties"]["targetResourceUri"].split(
"/")[4].replace(".", "-").lower()
trty = azr[i]["properties"]["targetResourceUri"].split(
"/")[6].replace(".", "-")
trid = azr[i]["properties"]["targetResourceUri"].split(
"/")[8].replace(".", "-")
# assume trty = Microsoft.Compute
tftyp = "azurerm_virtual_machine_scale_set"
if trty == "Microsoft-Web":
tftyp = "azurerm_app_service_plan"
# case sensitite so use actual ID
fr.write('target_resource_id = "' + triid + '"\n')
#fr.write('target_resource_id = "${'+ tftyp + '.' + trrg + '__' + trid+'.id}"\n')
except KeyError:
pass
# profiles block
try:
profs = azr[i]["properties"]["profiles"]
icount = len(profs)
if icount > 0:
for j in range(0, icount):
fr.write('profile {\n')
pn = azr[i]["properties"]["profiles"][j]["name"]
pn = pn.replace('"', '\\"')
# pn="dummy"
# pn=pn.replace('{','\{')
cdef = azr[i]["properties"]["profiles"][j]["capacity"]["default"]
cmin = azr[i]["properties"]["profiles"][j]["capacity"]["minimum"]
cmax = azr[i]["properties"]["profiles"][j]["capacity"]["maximum"]
fr.write('\tname = "'+pn + '"\n')
# capacity
fr.write('\tcapacity | |
tableToMarkdown(
"Execute Job:", filtered_results, headers=headers, headerTransform=pascalToSpace
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.ExecutedJobs",
outputs=filtered_results,
outputs_key_field="id",
)
def project_list_command(client: Client):
"""
This function returns a list of all existing projects.
:param client: Demisto client
:return: CommandResults object
"""
demisto.info("sending get project list request")
result = client.get_project_list()
demisto.info("finish get project list request")
if not isinstance(result, list):
raise DemistoException(f"Got unexpected output from api: {result}")
filtered_results = filter_results(result, ["url"], ["-"])
headers = [key.replace("_", " ") for key in [*filtered_results[0].keys()]]
readable_output = tableToMarkdown(
"Projects List:",
filtered_results,
headers=headers,
headerTransform=pascalToSpace,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.Projects",
outputs=filtered_results,
outputs_key_field="name",
)
def jobs_list_command(client: Client, args: dict):
"""
This function returns a list of all existing jobs.
:param client: Demisto client
:param args: command's arguments
:return: CommandResults object
"""
id_list: list = argToList(args.get("id_list", []))
group_path: str = args.get("group_path", "")
job_filter: str = args.get("job_filter", "")
job_exec_filter: str = args.get("job_exec_filter", "")
group_path_exact: str = args.get("group_path_exact", "")
scheduled_filter: str = args.get("scheduled_filter", "")
server_node_uuid_filter: str = args.get("server_node_uuid_filter", "")
max_results: Optional[int] = convert_str_to_int(
args.get("max_results", ""), "max_results"
)
project_name: str = args.get("project_name", "")
demisto.info("sending get jobs list request")
result = client.get_jobs_list(
id_list,
group_path,
job_filter,
job_exec_filter,
group_path_exact,
scheduled_filter,
server_node_uuid_filter,
project_name,
)
demisto.info("finish sending get jobs list request")
if not isinstance(result, list):
raise DemistoException(f"Got unexpected output from api: {result}")
if result:
max_entries: list = result[:max_results] if max_results else result[
:MAX_RESULTS
]
filtered_results = filter_results(max_entries, ["href", "permalink"], ["-"])
headers = [key.replace("_", " ") for key in [*filtered_results[0].keys()]]
readable_output = tableToMarkdown(
"Jobs List:",
filtered_results,
headers=headers,
headerTransform=pascalToSpace,
)
else:
filtered_results = result
readable_output = "No results were found"
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.Jobs",
outputs=filtered_results,
outputs_key_field="id",
)
def webhooks_list_command(client: Client, args: dict):
"""
This function returns a list of all existing webhooks.
:param client: Demisto client
:return: CommandResults object
"""
project_name: str = args.get("project_name", "")
max_results: Optional[int] = convert_str_to_int(args.get('max_results', ''), 'max_results')
demisto.info("sending get webhooks list request")
result = client.get_webhooks_list(project_name)
demisto.info("finish sending get webhooks list request")
if not isinstance(result, list):
raise DemistoException(f"Got unexpected output from api: {result}")
headers = [key.replace("_", " ") for key in [*result[0].keys()]]
returned_results = result[:max_results] if max_results else result[:MAX_RESULTS]
readable_output = tableToMarkdown(
"Webhooks List:", result, headers=headers, headerTransform=pascalToSpace
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.Webhooks",
outputs=returned_results,
outputs_key_field="id",
)
def job_execution_query_command(client: Client, args: dict):
"""
This function returns a list of all existing executions.
:param client: Demisto client
:param args: command's arguments
:return: CommandResults object
"""
status_filter: str = args.get("status_filter", "")
aborted_by_filter: str = args.get("aborted_by_filter", "")
user_filter: str = args.get("user_filter", "")
recent_filter: str = args.get("recent_filter", "")
older_filter: str = args.get("older_filter", "")
begin: str = args.get("begin", "")
end: str = args.get("end", "")
adhoc: str = args.get("adhoc", "")
job_id_list_filter: list = argToList(args.get("job_id_list_filter", []))
exclude_job_id_list_filter: list = argToList(
args.get("exclude_job_id_list_filter", [])
)
job_list_filter: list = argToList(args.get("job_list_filter", []))
exclude_job_list_filter: list = argToList(args.get("exclude_job_list_filter", []))
group_path: str = args.get("group_path", "")
group_path_exact: str = args.get("group_path_exact", "")
exclude_group_path_exact: str = args.get("exclude_group_path_exact", "")
job_filter: str = args.get("job_filter", "")
exclude_job_filter: str = args.get("exclude_job_filter", "")
job_exact_filter: str = args.get("job_exact_filter", "")
exclude_job_exact_filter: str = args.get("exclude_job_exact_filter", "")
execution_type_filter: str = args.get("execution_type_filter", "")
max_results: Optional[int] = convert_str_to_int(args.get("max_results"), "max")
offset: Optional[int] = convert_str_to_int(args.get("offset"), "offset")
project_name: str = args.get("project_name", "")
exclude_group_path: str = args.get("exclude_group_path", "")
demisto.info("sending job execution query request")
result = client.job_execution_query(
status_filter,
aborted_by_filter,
user_filter,
recent_filter,
older_filter,
begin,
end,
adhoc,
job_id_list_filter,
exclude_job_id_list_filter,
job_list_filter,
exclude_job_list_filter,
group_path,
group_path_exact,
exclude_group_path,
exclude_group_path_exact,
job_filter,
exclude_job_filter,
job_exact_filter,
exclude_job_exact_filter,
execution_type_filter,
max_results,
offset,
project_name,
)
demisto.info("finish sending job execution query request")
if not isinstance(result, dict):
raise DemistoException(f"got unexpected results from api: {result}")
executions: list = result.get("executions", [])
demisto.info("start filter results from the api")
filtered_executions = filter_results(executions, ["href", "permalink"], ["-"])
demisto.info("finish filter results from the api")
if isinstance(filtered_executions, list):
headers = [key.replace("_", " ") for key in [*filtered_executions[0].keys()]]
else:
raise DemistoException(f"Got unexpected results from the api: {result}")
readable_output = tableToMarkdown(
f'Job Execution Query - got total results: {result.get("paging",{}).get("total")}',
filtered_executions,
headers=headers,
headerTransform=pascalToSpace,
)
result["executions"] = filtered_executions
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.ExecutionsQuery",
outputs=result,
outputs_key_field="id",
)
def job_execution_output_command(client: Client, args: dict):
"""
This function gets metadata regarding workflow state
:param client: demisto client object
:param args: command's arguments
:return: CommandRusult object
"""
execution_id: Optional[int] = convert_str_to_int(
args.get("execution_id"), "execution_id"
)
return_full_output: bool = argToBoolean(args.get("return_full_output", False))
max_results: Optional[int] = convert_str_to_int(
args.get("max_results", ""), "max_results"
)
aggregate_log: bool = argToBoolean(args.get("aggregate_log", False))
demisto.info("sending job execution output request")
result: dict = client.job_execution_output(execution_id) # type: ignore
demisto.info("finish sending job execution output request")
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected response: {result}")
headers_general = [key.replace("_", " ") for key in [*result.keys()]]
readable_output_general = tableToMarkdown(
"Job Execution Output:",
result,
headers=headers_general,
headerTransform=pascalToSpace,
)
if result["entries"]:
result["entries"] = result["entries"][:max_results] if max_results else result["entries"][:MAX_RESULTS]
readable_output_entries = tableToMarkdown(
"Job Execution Entries View:",
result["entries"],
headers=collect_headers(result["entries"]),
headerTransform=pascalToSpace,
)
if aggregate_log:
result["logEntries"] = collect_log_from_output(result["entries"])
human_readable = readable_output_general + readable_output_entries
else:
human_readable = readable_output_general
if return_full_output:
return fileResult(args.get("execution_id"), json.dumps(result))
else:
return CommandResults(
readable_output=human_readable,
outputs_prefix="Rundeck.ExecutionsOutput",
outputs=result,
outputs_key_field="id",
)
def job_execution_abort_command(client: Client, args: dict):
"""
This function abort an active execution
:param client: demisto client object
:param args: command's arguments
:return: CommandRusult object
"""
execution_id: Optional[int] = convert_str_to_int(
args.get("execution_id"), "execution_id"
)
demisto.info("sending job execution abort request")
result = client.job_execution_abort(execution_id) # type: ignore
demisto.info("finish sending job execution abort request")
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected response: {result}")
demisto.info("start filter results from the api")
filtered_results: dict = filter_results(result, ["href", "permalink"], ["-"]) # type: ignore
demisto.info("finish filter results from the api")
headers = [key.replace("_", " ") for key in [*filtered_results.keys()]]
readable_output = tableToMarkdown(
"Job Execution Abort:",
filtered_results,
headers=headers,
headerTransform=pascalToSpace,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.Aborted",
outputs=filtered_results,
outputs_key_field="id",
)
def adhoc_run_command(client: Client, args: dict):
project_name: str = args.get("project_name", "")
exec_command: str = args.get("exec_command", "")
node_thread_count: str = args.get("node_thread_count", "")
node_keepgoing: str = args.get("node_keepgoing", "")
as_user: str = args.get("as_user", "")
node_filter: str = args.get("node_filter", "")
demisto.info("sending adhoc run request")
result = client.adhoc_run(
project_name,
exec_command,
node_thread_count,
node_keepgoing,
as_user,
node_filter,
)
demisto.info("finish sending adhoc run request")
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected response: {result}")
demisto.info("start filter results from the api")
filtered_results: dict = filter_results(result, ["href", "permalink"], ["-"]) # type: ignore
demisto.info("finish filter results from the api")
headers = [key.replace("_", " ") for key in [*filtered_results.keys()]]
readable_output = tableToMarkdown(
"Adhoc Run:", filtered_results, headers=headers, headerTransform=pascalToSpace
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.ExecuteCommand",
outputs=filtered_results,
outputs_key_field="id",
)
def adhoc_script_run_command(client: Client, args: dict):
project_name: str = args.get("project_name", "")
arg_string: str = args.get("arg_string", "")
node_thread_count: str = args.get("node_thread_count", "")
node_keepgoing: str = args.get("node_keepgoing", "")
as_user: str = args.get("as_user", "")
script_interpreter: str = args.get("script_interpreter", "")
interpreter_args_quoted: str = args.get("interpreter_args_quoted", "")
file_extension: str = args.get("file_extension", "")
node_filter: str = args.get("node_filter", "")
entry_id: str = args.get("entry_id", "")
demisto.info("sending adhoc script run request")
result = client.adhoc_script_run(
project_name,
arg_string,
node_thread_count,
node_keepgoing,
as_user,
node_filter,
script_interpreter,
interpreter_args_quoted,
file_extension,
entry_id,
)
demisto.info("finish sending adhoc script run request")
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected response: {result}")
demisto.info("start filter results from the api")
filtered_results: dict = filter_results(result, ["href", "permalink"], ["-"]) # type: ignore
demisto.info("finish filter results from the api")
headers = [key.replace("_", " ") for key in [*filtered_results.keys()]]
readable_output = tableToMarkdown(
"Adhoc Run Script:",
filtered_results,
headers=headers,
headerTransform=pascalToSpace,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.ExecuteScriptFile",
outputs=filtered_results,
outputs_key_field="id",
)
def adhoc_script_run_from_url_command(client: Client, args: dict):
project_name: str = args.get("project_name", "")
script_url: str = args.get("script_url", "")
node_thread_count: str = args.get("node_thread_count", "")
node_keepgoing: str = args.get("node_keepgoing", "")
as_user: str = args.get("as_user", "")
script_interpreter: str = args.get("script_interpreter", "")
interpreter_args_quoted: str = args.get("interpreter_args_quoted", "")
file_extension: str = args.get("file_extension", "")
node_filter: str = args.get("node_filter", "")
arg_string: str = args.get("arg_string", "")
result = client.adhoc_script_run_from_url(
project_name,
script_url,
node_thread_count,
node_keepgoing,
as_user,
node_filter,
script_interpreter,
interpreter_args_quoted,
file_extension,
arg_string,
)
if not isinstance(result, dict):
raise DemistoException(f"Got unexpected response: {result}")
filtered_results: dict = filter_results(result, ["href", "permalink"], ["-"]) # type: ignore
headers = [key.replace("_", " ") for key in [*filtered_results.keys()]]
readable_output = tableToMarkdown(
"Adhoc Run Script From Url:",
filtered_results,
headers=headers,
headerTransform=pascalToSpace,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Rundeck.ScriptExecutionFromUrl",
outputs=filtered_results,
outputs_key_field="id",
)
def webhook_event_send_command(client: Client, args: dict):
auth_token = args.get("auth_token", "")
options: str = args.get("options", "")
free_json: str = args.get("json", "")
options_as_dict: dict = attribute_pairs_to_dict(options)
try:
demisto.info('start convert "options" argument to str')
if options_as_dict:
options_as_str: str = json.dumps(options_as_dict)
else:
options_as_str = free_json
demisto.info('finish convert "options" argument to str')
except Exception as e:
raise DemistoException(
f'There was a problem converting "json" to json. The reason is: {e}'
)
result = | |
# coding=utf-8#
# Copyright (c) 2021, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import datetime
# import hashlib
# import json
import logging as std_logging
import os
import signal
# import urllib
# from eventlet import greenthread
from time import strftime
# from time import time
# from requests import HTTPError
from oslo_config import cfg
# from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import importutils
from f5_endpoint_agent.endpoint.drivers.bigip import exceptions as f5ex
from f5.bigip import ManagementRoot
from f5_endpoint_agent.endpoint.drivers.bigip import constants_v2 as f5const
from f5_endpoint_agent.endpoint.drivers.bigip import stat_helper
from f5_endpoint_agent.endpoint.drivers.bigip.utils import serialized
from f5_endpoint_agent.endpoint.drivers.bigip.endpoint_driver import \
EndpointBaseDriver
LOG = logging.getLogger(__name__)
__VERSION__ = '0.1.1'
OPTS = [
cfg.StrOpt(
'f5_ha_type', default='pair',
help='Are we standalone, pair(active/standby), or scalen'
),
cfg.StrOpt(
'icontrol_hostname',
default="10.190.5.7",
help='The hostname (name or IP address) to use for iControl access'
),
cfg.StrOpt(
'icontrol_username', default='admin',
help='The username to use for iControl access'
),
cfg.StrOpt(
'icontrol_password', default='<PASSWORD>', secret=True,
help='The password to use for iControl access'
),
cfg.IntOpt(
'icontrol_connection_timeout', default=30,
help='How many seconds to timeout a connection to BIG-IP'
),
cfg.IntOpt(
'icontrol_connection_retry_interval', default=10,
help='How many seconds to wait between retry connection attempts'
),
cfg.StrOpt(
'icontrol_config_mode', default='objects',
help='Whether to use iapp or objects for bigip configuration'
),
cfg.StrOpt(
'auth_version',
default=None,
help='Keystone authentication version (v2 or v3) for Barbican client.'
),
cfg.StrOpt(
'os_project_id',
default='service',
help='OpenStack project ID.'
),
cfg.StrOpt(
'os_auth_url',
default=None,
help='OpenStack authentication URL.'
),
cfg.StrOpt(
'os_username',
default=None,
help='OpenStack user name for Keystone authentication.'
),
cfg.StrOpt(
'os_user_domain_name',
default=None,
help='OpenStack user domain name for Keystone authentication.'
),
cfg.StrOpt(
'os_project_name',
default=None,
help='OpenStack project name for Keystone authentication.'
),
cfg.StrOpt(
'os_project_domain_name',
default=None,
help='OpenStack domain name for Keystone authentication.'
),
cfg.StrOpt(
'os_password',
default=None,
help='OpenStack user password for Keystone authentication.'
),
cfg.StrOpt(
'f5_parent_ssl_profile',
default='clientssl',
help='Parent profile used when creating client SSL profiles '
'for listeners with TERMINATED_HTTPS protocols.'
),
cfg.StrOpt(
'os_tenant_name',
default=None,
help='OpenStack tenant name for Keystone authentication (v2 only).'
)
]
def is_operational(method):
# Decorator to check we are operational before provisioning.
def wrapper(*args, **kwargs):
instance = args[0]
if instance.operational:
try:
return method(*args, **kwargs)
except IOError as ioe:
LOG.error('IO Error detected: %s' % method.__name__)
LOG.error(str(ioe))
raise ioe
else:
LOG.error('Cannot execute %s. Not operational. Re-initializing.'
% method.__name__)
instance._init_bigips()
return wrapper
class iControlDriver(EndpointBaseDriver):
"""Control service deployment."""
positive_plugin_const_state = \
tuple([f5const.F5_ACTIVE, f5const.F5_PENDING_CREATE,
f5const.F5_PENDING_UPDATE])
def __init__(self, conf, registerOpts=True):
# The registerOpts parameter allows a test to
# turn off config option handling so that it can
# set the options manually instead.
super(iControlDriver, self).__init__(conf)
self.conf = conf
if registerOpts:
self.conf.register_opts(OPTS)
self.initialized = False
self.hostnames = None
self.device_type = conf.f5_device_type
self.plugin_rpc = None # overrides base, same value
self.agent_report_state = None # overrides base, same value
self.operational = False # overrides base, same value
self.driver_name = 'f5-endpoint-icontrol'
#
# BIG-IP containers
#
# BIG-IPs which currectly active
self.__bigips = {}
self.__last_connect_attempt = None
# HA and traffic group validation
self.ha_validated = False
self.tg_initialized = False
# traffic groups discovered from BIG-IPs for service placement
self.__traffic_groups = []
# base configurations to report to Neutron agent state reports
self.agent_configurations = {} # overrides base, same value
self.agent_configurations['device_drivers'] = [self.driver_name]
self.agent_configurations['icontrol_endpoints'] = {}
# to store the verified esd names
self.esd_names = []
self.esd_processor = None
# service component managers
self.tenant_manager = None
self.cluster_manager = None
self.system_helper = None
self.lbaas_builder = None
self.service_adapter = None
self.vlan_binding = None
self.l3_binding = None
self.cert_manager = None # overrides register_OPTS
# server helpers
self.stat_helper = stat_helper.StatHelper()
# self.network_helper = network_helper.NetworkHelper()
if self.conf.password_cipher_mode:
self.conf.icontrol_password = \
<PASSWORD>(self.conf.icontrol_password)
if self.conf.os_password:
self.conf.os_password = <PASSWORD>(self.conf.os_password)
try:
# debug logging of service requests recieved by driver
if self.conf.trace_service_requests:
path = '/var/log/neutron/service/'
if not os.path.exists(path):
os.makedirs(path)
self.file_name = path + strftime("%H%M%S-%m%d%Y") + '.json'
with open(self.file_name, 'w') as fp:
fp.write('[{}] ')
# driver mode settings - GRM vs L2 adjacent
if self.conf.f5_global_routed_mode:
LOG.info('WARNING - f5_global_routed_mode enabled.'
' There will be no L2 or L3 orchestration'
' or tenant isolation provisioned. All vips'
' and pool members must be routable through'
' pre-provisioned SelfIPs.')
self.conf.use_namespaces = False
self.conf.f5_snat_mode = True
self.conf.f5_snat_addresses_per_subnet = 0
self.agent_configurations['tunnel_types'] = []
self.agent_configurations['bridge_mappings'] = {}
else:
self.agent_configurations['tunnel_types'] = \
self.conf.advertised_tunnel_types
for net_id in self.conf.common_network_ids:
LOG.debug('network %s will be mapped to /Common/%s'
% (net_id, self.conf.common_network_ids[net_id]))
self.agent_configurations['common_networks'] = \
self.conf.common_network_ids
LOG.debug('Setting static ARP population to %s'
% self.conf.f5_populate_static_arp)
self.agent_configurations['f5_common_external_networks'] = \
self.conf.f5_common_external_networks
f5const.FDB_POPULATE_STATIC_ARP = \
self.conf.f5_populate_static_arp
# parse the icontrol_hostname setting
self._init_bigip_hostnames()
# instantiate the managers
self._init_bigip_managers()
self.initialized = True
LOG.debug('iControlDriver loaded successfully')
except Exception as exc:
LOG.error("exception in intializing driver %s" % str(exc))
self._set_agent_status(False)
def connect(self):
# initialize communications with BIG-IP via iControl
try:
self._init_bigips()
except Exception as exc:
LOG.error("exception in intializing communications to BIG-IPs %s"
% str(exc))
self._set_agent_status(False)
def _init_bigip_managers(self):
if self.conf.vlan_binding_driver:
try:
self.vlan_binding = importutils.import_object(
self.conf.vlan_binding_driver, self.conf, self)
except ImportError:
LOG.error('Failed to import VLAN binding driver: %s'
% self.conf.vlan_binding_driver)
if self.conf.l3_binding_driver:
try:
self.l3_binding = importutils.import_object(
self.conf.l3_binding_driver, self.conf, self)
except ImportError:
LOG.error('Failed to import L3 binding driver: %s'
% self.conf.l3_binding_driver)
else:
LOG.debug('No L3 binding driver configured.'
' No L3 binding will be done.')
if self.conf.cert_manager:
try:
self.cert_manager = importutils.import_object(
self.conf.cert_manager, self.conf)
except ImportError as import_err:
LOG.error('Failed to import CertManager: %s.' %
import_err.message)
raise
except Exception as err:
LOG.error('Failed to initialize CertManager. %s' % err.message)
# re-raise as ImportError to cause agent exit
raise ImportError(err.message)
def _init_bigip_hostnames(self):
# Validate and parse bigip credentials
if not self.conf.icontrol_hostname:
raise f5ex.F5InvalidConfigurationOption(
opt_name='icontrol_hostname',
opt_value='valid hostname or IP address'
)
if not self.conf.icontrol_username:
raise f5ex.F5InvalidConfigurationOption(
opt_name='icontrol_username',
opt_value='valid username'
)
if not self.conf.icontrol_password:
raise f5ex.F5InvalidConfigurationOption(
opt_name='icontrol_password',
opt_value='valid password'
)
self.hostnames = self.conf.icontrol_hostname.split(',')
self.hostnames = [item.strip() for item in self.hostnames]
self.hostnames = sorted(self.hostnames)
# initialize per host agent_configurations
for hostname in self.hostnames:
self.__bigips[hostname] = bigip = type('', (), {})()
bigip.hostname = hostname
bigip.status = 'creating'
bigip.status_message = 'creating BIG-IP from iControl hostnames'
bigip.device_interfaces = dict()
self.agent_configurations[
'icontrol_endpoints'][hostname] = {}
self.agent_configurations[
'icontrol_endpoints'][hostname]['failover_state'] = \
'undiscovered'
self.agent_configurations[
'icontrol_endpoints'][hostname]['status'] = 'unknown'
self.agent_configurations[
'icontrol_endpoints'][hostname]['status_message'] = ''
def _init_bigips(self):
# Connect to all BIG-IPs
LOG.debug('initializing communications to BIG-IPs')
try:
# setup logging options
if not self.conf.debug:
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.ERROR)
requests_log.propagate = False
else:
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.DEBUG)
requests_log.propagate = True
self.__last_connect_attempt = datetime.datetime.now()
for hostname in self.hostnames:
# connect to each BIG-IP and set it status
bigip = self._open_bigip(hostname)
if bigip.status == 'active':
continue
if bigip.status == 'connected':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
device_group_name = None
if not self.ha_validated:
device_group_name = self._validate_ha(bigip)
LOG.debug('HA validated from %s with DSG %s' %
(hostname, device_group_name))
self.ha_validated = True
if not self.tg_initialized:
self._init_traffic_groups(bigip)
LOG.debug('learned traffic groups from %s as %s' %
(hostname, self.__traffic_groups))
self.tg_initialized = True
LOG.debug('initializing bigip %s' % hostname)
self._init_bigip(bigip, hostname, device_group_name)
LOG.debug('initializing agent configurations %s'
% hostname)
self._init_agent_config(bigip)
# Assure basic BIG-IP HA is operational
LOG.debug('validating HA state for %s' % hostname)
bigip.status = 'validating_HA'
bigip.status_message = 'validating the current HA state'
if self._validate_ha_operational(bigip):
LOG.debug('setting status to active for %s' % hostname)
bigip.status = 'active'
bigip.status_message = 'BIG-IP ready for provisioning'
self._post_init()
else:
LOG.debug('setting status to error for %s' % hostname)
bigip.status = 'error'
bigip.status_message = 'BIG-IP is not operational'
self._set_agent_status(False)
else:
LOG.error('error opening BIG-IP %s - %s:%s'
% (hostname, bigip.status, bigip.status_message))
self._set_agent_status(False)
except Exception as exc:
LOG.error('Invalid agent configuration: %s' % exc.message)
raise
self._set_agent_status(force_resync=True)
def _init_errored_bigips(self):
try:
errored_bigips = self.get_errored_bigips_hostnames()
if errored_bigips:
LOG.debug('attempting to recover %s BIG-IPs' %
len(errored_bigips))
for hostname in errored_bigips:
# try to connect and set status
bigip = self._open_bigip(hostname)
if bigip.status == 'connected':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
LOG.debug('proceeding to initialize %s' | |
#!/usr/bin/env python
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
# Icebridge utility functions
import os, sys, datetime, time, subprocess, logging, re, hashlib, string
import psutil, errno, getpass, glob
# The path to the ASP python files
basepath = os.path.abspath(sys.path[0])
pythonpath = os.path.abspath(basepath + '/../IceBridge') # for dev ASP
pythonpath = os.path.abspath(basepath + '/../Python') # for dev ASP
libexecpath = os.path.abspath(basepath + '/../libexec') # for packaged ASP
sys.path.insert(0, basepath) # prepend to Python path
sys.path.insert(0, pythonpath)
sys.path.insert(0, libexecpath)
import asp_system_utils, asp_alg_utils, asp_geo_utils, asp_image_utils, asp_file_utils
asp_system_utils.verify_python_version_is_supported()
def switchWorkDir():
'''A work directory must be set before running a qsub job, and here
we switch to it.'''
workDir = ""
if 'OIB_WORK_DIR' in os.environ:
workDir = os.environ['OIB_WORK_DIR']
if os.path.isdir(workDir):
os.chdir(workDir)
else:
raise Exception("Work directory does not exist: " + workDir)
def getUser():
'''Return the current user name.'''
return getpass.getuser()
def fullPath(script):
'''The full path to a script on the icebridge folder.'''
basepath = os.path.dirname(os.path.realpath(__file__))
return os.path.join(basepath, script)
def outputFolder(site, yyyymmdd):
'''The output folder name.'''
return site + '_' + yyyymmdd
def makeSymLink(oldFile, newFile, verbose=True):
'''Safely create a symlink'''
oldPath = os.path.abspath(oldFile)
try:
asp_system_utils.mkdir_p(os.path.dirname(newFile))
if verbose:
print("ln -s " + oldPath + " " + newFile)
os.symlink(oldPath, newFile)
except OSError, e:
if e.errno == errno.EEXIST:
os.remove(newFile)
os.symlink(oldPath, newFile)
def getSmallestFrame():
'''Return the smallest possible frame number.'''
return 0
def getLargestFrame():
'''Return the largest possible frame number.'''
return 99999999 # 100 million should be enough
def fileExtension(filename):
'''Convenience function to get the file extension.'''
return os.path.splitext(filename)[1]
def hasImageExtension(filename):
'''Return true if the file is a recognized image extension.'''
extension = fileExtension(filename).lower()
validExtensions = ['.tif', '.jpg', '.jpeg', '.ntf']
if extension in validExtensions:
return True
return False
def getRunStatsFile():
return 'runStats.txt'
def getCameraFolder(outputFolder):
return os.path.join(outputFolder, 'camera')
def getImageFolder(outputFolder):
return os.path.join(outputFolder, 'image')
def getJpegFolder(outputFolder):
return os.path.join(outputFolder, 'jpeg')
def getOrthoFolder(outputFolder):
return os.path.join(outputFolder, 'ortho')
def getFireballFolder(outputFolder):
return os.path.join(outputFolder, 'fireball')
def getCorrFireballFolder(outputFolder):
return os.path.join(outputFolder, 'corr_fireball')
def getLidarFolder(outputFolder):
return os.path.join(outputFolder, 'lidar')
def getProcessedFolder(outputFolder):
return os.path.join(outputFolder, 'processed')
def getPairedLidarFolder(lidarFolder):
return os.path.join(lidarFolder, 'paired')
def getNavFolder(outputFolder):
return os.path.join(outputFolder, 'nav')
def getNavCameraFolder(outputFolder):
return os.path.join(outputFolder, 'nav_camera')
def getLabelFolder(outputFolder):
return os.path.join(outputFolder, 'labeled')
def getConvertedLidarIndexFile(lidarFolder):
return os.path.join(lidarFolder, 'converted_lidar_index.csv')
def getPairedIndexFile(pairedFolder):
return os.path.join(pairedFolder, 'paired_lidar_index.csv')
def folderToType(folder):
'''If input is myRun/ortho, return "ortho". Same for "fireball", "lidar", etc.'''
return os.path.basename(folder)
def htmlIndexFile(folder):
'''Return the html index file for this folder (if appropriate)'''
return os.path.join(folder, folderToType(folder) + "_index.html")
def csvIndexFile(folder):
'''Return the clean csv version of the html index file for this folder (if appropriate) '''
return htmlIndexFile(folder) + ".csv"
def getJpegDateTime(filepath):
'''Get the date and time from a raw jpeg file.'''
# TODO: For some files it is probably in the name.
# Use this tool to extract the metadata
cmd = [asp_system_utils.which('gdalinfo'), filepath]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, err = p.communicate()
lines = out.split('\n')
for line in lines:
if 'EXIF_DateTimeOriginal' not in line:
continue
parts = line.replace('=',' ').split()
dateString = parts[1].strip().replace(':','')
timeString = parts[2].strip().replace(':','')
return (dateString, timeString)
raise Exception('Failed to read date/time from file: ' + filepath)
def jpegToImageFile(jpegFile, orthoFile):
'''Given AN_20121107/jpeg/2012_11_08_17415.JPG and DMS_1381721_17415_20121108_00303910.tif
create AN_20121107/image/DMS_20121108_003039_17415.tif.
This can throw an exception.'''
jpegFolder = os.path.dirname(jpegFile)
imageFolder = getImageFolder(os.path.dirname(jpegFolder))
if not os.path.exists(jpegFolder):
raise Exception("Missing " + jpegFolder)
if not os.path.exists(imageFolder):
raise Exception("Missing " + imageFolder)
if not os.path.exists(jpegFile):
raise Exception("Missing " + jpegFile)
frame = getFrameNumberFromFilename(jpegFile)
# This was the original implementation, but it can give wrong results
# when the jpeg has incorrect time.
#(dateString, timeString) = getJpegDateTime(jpegFile)
[dateString, timeString] = parseTimeStamps(orthoFile)
outputName = formFilePrefix(dateString, timeString, frame) + ".tif"
outputPath = os.path.join(imageFolder, outputName)
return outputPath
def projectionBoundsFile(folder):
return os.path.join(folder, 'projection_bounds.csv')
def readProjectionBounds(indexFile):
'''Read projection bunds for each ortho image.'''
bounds = {}
# Nothing to
if not os.path.exists(indexFile):
return bounds
with open(indexFile, 'r') as f:
for line in f:
parts = line.strip().split(',')
for v in range(len(parts)):
parts[v] = parts[v].strip()
if parts[v] != "":
parts[v] = float(parts[v].strip())
if len(parts) != 6:
# Maybe when we wrote it last time we got interrupted.
# Note that the last value is just an empty space.
continue
frame = int(parts[0])
bounds[frame] = (parts[1], parts[2], parts[3], parts[4])
return bounds
def writeProjectionBounds(indexFile, bounds):
'''Write projection bounds for all images.'''
with open(indexFile, 'w') as f:
for frame in sorted(bounds.keys()):
a,b,c,d = bounds[frame]
vals = [frame, a, b, c, d]
for val in vals:
f.write(str(val) + ', ')
f.write('\n')
def readLinesInSet(fileName):
'''Read the lines from a file as elements in a set, while stripping all leading
and trailing spaces.'''
filesSet = set()
if not os.path.exists(fileName):
return filesSet
with open(fileName, 'r') as f:
for line in f:
line = line.strip()
filesSet.add(line)
return filesSet
def logFilePrefix():
return 'icebridge_batch_log'
def validFilesPrefix():
'''This one is used in multiple places.'''
return 'valid_files'
def manager_log_prefix():
return 'pleiades_manager_log'
def validFilesList(folder, startFrame, stopFrame):
'''File containing the list of fetched files that were validated.
for the given range. Need the range so that when we validate in
parallel, we do not overwrite the same file. Later these validation
files will be merged.'''
prefix = validFilesPrefix() + '_' + str(startFrame) + '_' + str(stopFrame) + '.csv'
return os.path.join(folder, prefix)
def updateValidFilesListFromDisk(filesList, filesSet):
'''Update the current set of valid files with any new info from disk.'''
# Nothing to
if not os.path.exists(filesList):
return filesSet
print("Reading: " + filesList)
with open(filesList, 'r') as f:
for line in f:
line = line.strip()
filesSet.add(line)
return filesSet
def writeValidFilesList(filesList, filesSet):
'''Write the list of valid files to disk.'''
print("Writing: " + filesList)
with open(filesList, 'w') as f:
for filename in sorted(filesSet):
f.write(filename + '\n')
def readIndexFile(parsedIndexPath, prependFolder = False):
'''Read an index file having frame number, filename, and url it came from.'''
frameDict = {}
urlDict = {}
with open(parsedIndexPath, 'r') as f:
for line in f:
parts = line.strip().split(',')
if len(parts) < 3:
# Odd index file
raise Exception("Invalid index file: " + parsedIndexPath)
frameNumber = int(parts[0])
frameDict[frameNumber] = parts[1].strip()
if prependFolder:
frameDict[frameNumber] = os.path.join(os.path.dirname(parsedIndexPath),
frameDict[frameNumber])
urlDict[frameNumber] = parts[2].strip()
return (frameDict, urlDict)
def writeIndexFile(indexPath, frameDict, urlDict):
'''Write an index file, optionally with urls.'''
with open(indexPath, 'w') as f:
for frame in sorted(frameDict.keys()):
frameName = frameDict[frame]
urlName = ""
if frame in urlDict:
urlName = urlDict[frame]
f.write(str(frame) + ', ' + frameName + ', ' + urlName + '\n')
def isValidImage(filename):
'''Check that an image file is not corrupted in some way. This check is not enough.'''
if not os.path.exists(filename):
return False
# Must always wipe .aux.xml. Always. Otherwise, if this function is called first time
# it may return False, but if called second time it may return True.
auxFile = filename + '.aux.xml'
if os.path.exists(auxFile):
os.remove(auxFile)
gdalinfoPath = asp_system_utils.which("gdalinfo")
cmd = gdalinfoPath + ' -stats ' + filename
if os.path.exists(auxFile):
os.remove(auxFile)
p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
return False
if error is not None:
output += error
m = re.match("^.*?(Block\s+failed|Premature\s+end)", output,
re.IGNORECASE|re.MULTILINE|re.DOTALL)
if m:
return False
return True
def isDEM(filename):
'''Return true if a file is a recognized DEM.'''
if 'crop' in filename or 'CMAP' in filename: return False # ignore some stray files
return (len(filename) >= 8 and filename[-8:] == '_DEM.tif')
def isLidar(filename):
'''Return true if the file is an input (not converted) lidar format'''
extension = fileExtension(filename)
return (extension == '.qi') or (extension == '.hdf5') or \
(extension == '.h5') or (extension == '.TXT')
def isValidLidarCSV(filename):
'''Check that a lidar csv file is valid. It must have at least threee entries on one line.'''
if not os.path.exists(filename):
return False
with open(filename, "r") as ins:
array = []
for line in ins:
# This will help | |
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.test.testsuite Performing a suite of SKIRT test cases
#
# An instance of the SkirtTestSuite class in this module represents a suite of SKIRT test cases, stored as
# a nested structure of files and directories according to a specific layout, and provides facilities to
# perform the tests, verify the results, and prepare a summary test report.
# -----------------------------------------------------------------
# Import standard modules
import filecmp
import sys
import os
import os.path
import re
import time
import datetime
import numpy as np
import multiprocessing
import pyfits
# Import the relevant PTS classes and modules
from ..simulation.execute import SkirtExec
from ..tools.logging import log
# -----------------------------------------------------------------
# SkirtTestSuite class
# -----------------------------------------------------------------
## An instance of the SkirtTestSuite class represents a suite of <tt>SKIRT</tt> test cases, stored as
# a nested structure of files and directories according to a specific layout, and provides facilities to
# perform the tests, verify the results, and prepare a summary test report.
#
# A test suite consists of a set of independent test cases (i.e. test cases can be executed in arbitrary order)
#
# Each test case in a test suite is defined by a collection of files and directories as follows:
# - a directory with arbitrary name containing all test case files and directories, called the "case directory"
# - immediately inside the case directory there is:
# - exactly one \em ski file with an arbitrary name (with the \c .ski filename extension) specifying the simulation
# to be performed for the test case
# - a directory named \c in containing the input files for the simulation, if any
# - a directory named \c ref containing the reference files for the test, i.e. a copy of the output files
# generated by a correct simulation run
# - a directory named \c out to receive the actual output files when the test is performed; this directory
# and its contents are automatically removed and created when running the test case
# - everything else is ignored, as long as there are no additional files with a \c .ski filename extension
#
# A test suite is defined by a collection of files and directories as follows:
# - a directory directly or indirectly containing all test cases, called the "suite directory";
# a test suite is named after this directory
# - each ski file directly or indirectly contained in the suite directory defines a test case that
# must adhere to the description above (no other ski files in the same directory, special directories
# next to the \em ski file, etc.)
#
# For example, a test suite may be structured with nested sub-suites as follows (where each \c CaseN directory
# contains a ski file plus \c ref, \c in, and \c out directories):
# \verbatim
# SKIRT Tests
# SPH simulations
# Case1
# Case2
# Geometries
# Radial
# Case1
# Case2
# Cilindrical
# Case1
# Case2
# Case3
# Full 3D
# Case1
# Case2
# Instruments
# \endverbatim
#
# It is also allowed to nest test cases inside another test case, but this is not recommended.
#
class SkirtTestSuite(object):
## The constructor accepts three arguments:
#
# - suitepath: the path of the directory containing the complete test suite
# - subsuite: the name of a certain subsuite (the name of one particular test case or an overarching name)
# - parallel: this flag indicates whether the test suite is to be executed in parallel mode or not
# - skirtpath: this optional argument specifies the path to the skirt executable. If none is given,
# the skirt version used will be the one found in the standard system path.
#
# Paths may be absolute, relative to a user's home folder, or relative to the
# current working directory.
#
def __init__(self, suitepath, subsuite=None, parallel=False, skirtpath=None):
# Set the basic characteristics of this test run
self._suitepath = os.path.realpath(os.path.expanduser(suitepath))
self._suitename = os.path.basename(self._suitepath)
self._subsuitepath = findsubdirectory(self._suitepath, subsuite)
self._parallel = parallel
# Create a SKIRT execution context
self._skirt = SkirtExec(skirtpath)
# Initialize some data structures for the test run
self._statistics = dict()
self._simulations = []
self._modes = []
self._modenames = []
self._skipatterns = []
# Find out the number of CPU cores on this system and determine the number of threads in singleprocessing mode,
# the number of processes and the number of threads per process in multiprocessing mode (based on whether
# parallel mode is enabled).
cores = multiprocessing.cpu_count()
if self._parallel:
threads = cores
processes = 2
threadspp = cores/processes
else:
threads = 1
#processes = cores # If we would choose to build the 'reference test case output' seperately on each computer
processes = 4 # If we want the 'reference test case output' to be portable from one computer to another, this has to be fixed
threadspp = 1
# Check in which modes the test suite should be executed (singleprocessing and/or multiprocessing mode)
# and create the appropriate ski file pattern(s).
if self._subsuitepath == self._suitepath:
# Add configurations for both singleprocessing and multiprocessing modes
self._modes = [(cores, threads, 1), (1, threadspp, processes)]
self._modenames = ["in singleprocessing mode ", "in multiprocessing mode "]
self._skipatterns = [os.path.join(self._suitepath, "Singleprocessing", "*.ski"), os.path.join(self._suitepath, "Multiprocessing", "*.ski")]
elif "Singleprocessing" in self._subsuitepath:
# Add the proper configuration, where 4 stands for the number of parallel simulations and 1 stands for
# the number of processes to use
self._modes = [(cores, threads, 1)]
self._modenames = [""]
self._skipatterns = [os.path.join(self._subsuitepath, "*.ski")]
elif "Multiprocessing" in self._subsuitepath:
# Add the proper configuration, where 1 stands for the number of parallel simulations and 4 stands for
# the number of processes per simulation
self._modes = [(1, threadspp, processes)]
self._modenames = [""]
self._skipatterns = [os.path.join(self._subsuitepath, "*.ski")]
## This function performs all tests in the test suite, verifies the results, and prepares a summary test report.
# As an argument, it can take the time in seconds to sleep before checking for simulation completion again.
# The default value is 60 seconds.
#
def perform(self, sleepsecs="60"):
# Define a name identifying this test run
timestamp = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
self._testname = os.path.basename(self._subsuitepath) + "_" + timestamp
# Inform the user of the fact that the test suite has been initiated
log.info("Starting report for test suite " + self._subsuitepath)
log.info("Using " + self._skirt.version() + " in " + self._skirt.root_directory)
# Create a report file to contain a detailed report of the test run
self._createreportfile()
# Clean the "out" directories
self._clean()
# Set the number of finished simulations to zero
self._finished = 0
# Perform singleprocessing and multiprocessing mode sequentially
self._numsimulations = 0
for mode, modename, skipattern in zip(self._modes, self._modenames, self._skipatterns):
# Start performing the simulations
simulations = self._skirt.execute(skipattern, recursive=True, inpath="in", outpath="out", skirel=True, parallel=mode[0], threads=mode[1], processes=mode[2], wait=False)
numsimulations = len(simulations)
# Inform the user on the number of test cases (in this mode)
log.info("Number of test cases " + modename + ": " + str(numsimulations))
self._report.write("Number of test cases " + modename + ": " + str(numsimulations) + "<br>\n")
# Add the new simulations to the list
self._simulations += simulations
self._numsimulations += numsimulations
# Verify the results for each test case
self._verify(sleepsecs)
# Wait for the skirt execution context to finish
self._skirt.wait()
# Write statistics about the number of successful test cases
self._writestatistics()
# Close the report file
self._report.close()
## This function creates a HTML file that contains a detailed report of the test run
def _createreportfile(self):
csscommands = """<style type="text/css">
[id^="togList"], /* HIDE CHECKBOX */
[id^="togList"] ~ .list, /* HIDE LIST */
[id^="togList"] + label span + span, /* HIDE "Collapse" */
[id^="togList"]:checked + label span{ /* HIDE "Expand" (IF CHECKED) */
display:none;
}
[id^="togList"]:checked + label span + span{
display:inline-block; /* SHOW "Collapse" (IF CHECKED) */
}
[id^="togList"]:checked ~ .list{
display:block; /* SHOW LIST (IF CHECKED) */
}
</style>"""
# Open the report file
filepath = os.path.join(self._suitepath, "report_" + self._testname + ".html")
self._report = open(filepath, 'w')
# Write some general info to the report file
self._report.write("<html>\n<head>\n</head>\n<body>\n")
self._report.write(csscommands + "\n")
self._report.write("Report file | |
if form.is_valid():
form.save()
transaction.updated_by = request.user
transaction.save()
success_message(request, 'admin_edit_transaction', {})
admin_mail(request, 'transaction_updated',
{'transaction': transaction})
log_action(request, model_object=transaction,
action_flag=9, change_message='edited Transaction')
return render_view(request, 'admin/edit_transaction.html',
{'transaction': transaction, 'form': form})
def stuff_transaction_list(user, status=1):
'''
status
(1)-successful,
(2)-pending,
(3)-Failed,
(4)-All,
(6)-successful bills
(7)-All bills
(8)-All non bill transactions
(9)-All pending bills
(10)-All failed bills
(11)-All cancelled bills
'''
transaction_list = False
if status == 1:
transaction_list = Transaction.objects.filter(
visa_success=True, is_processed=True, amount_sent__isnull=False, utility=False)
elif status == 2:
transaction_list = Transaction.objects.filter(
visa_success=True, is_processed=False, amount_sent__isnull=False, utility=False)
elif status == 3:
transaction_list = Transaction.objects.filter(
visa_success=False, utility=False)
elif status == 4:
#transaction_list = Transaction.objects.all()
transaction_list = Transaction.objects.filter(utility=False)
elif status == 5:
transaction_list = Transaction.objects.filter(
is_canceled=True, visa_success=True, is_processed=True, amount_sent__isnull=False, utility=False
)
elif status == 6:
transaction_list = Transaction.objects.filter(
visa_success=True, is_processed=True, amount_sent__isnull=False, utility=True
)
elif status == 7:
transaction_list = Transaction.objects.filter(
utility=True
)
elif status == 8:
transaction_list = Transaction.objects.filter(
utility=False
)
elif status == 9:
transaction_list = Transaction.objects.filter(
visa_success=True, is_processed=False, amount_sent__isnull=False, utility=True)
elif status == 10:
#
transaction_list = Transaction.objects.filter(
visa_success=False, utility=True)
elif status == 11:
transaction_list = Transaction.objects.filter(
is_canceled=True, visa_success=True, is_processed=True, amount_sent__isnull=False, utility=True
)
# else:
# if len(transaction_list) > 0:
# transaction_list = transaction_list.filter(utility=False)
'''get the transaction list our stuff users are allowed access to'''
if transaction_list and not user.is_superuser:
country_filter = network_filter = Q()
for value, keyword in get_country_access(user):
country_filter |= Q(to_country__code=value)
for value, keyword in get_network_access(user):
network_filter |= Q(mobile_network_code=value)
#transaction_list = Transaction.objects.filter(country_filter & network_filter)
transaction_list = transaction_list.filter(
country_filter & network_filter)
# if successful:
# transaction_list = transaction_list.filter(
# visa_success=True, is_processed=True, amount_sent__isnull=False)
return transaction_list
@permission_required('view_transaction')
def transactions(request, name=False, user_id=False):
'''
Transactions
'''
pretitle = 'Pending Transactions'
page_title = 'Pending Transactions'
#debug(get_country_access(request.user), 'country')
transaction_list = False
status = 4
if not name and request.user.is_superuser:
page_title = pretitle = 'Transactions'
elif name == 'pending':
status = 2
# transaction_list = transaction_list.filter(
# visa_success=True, is_processed=False, amount_sent__isnull=False)
elif name == 'successful':
status = 1
page_title = pretitle = 'Successful Transactions'
# transaction_list = transaction_list.filter(
# visa_success=True, is_processed=True, amount_sent__isnull=False)
elif name == 'failed':
status = 3
page_title = pretitle = 'Failed Transactions'
elif name == 'canceled':
status = 5
page_title = pretitle = 'Canceled Transactions'
#transaction_list = transaction_list.filter(visa_success=False)
elif name == 'search':
page_title = pretitle = 'Search Transactions'
elif name == 'billpayments':
status = 6
page_title = pretitle = 'Search Billpayments'
else:
return HttpResponseRedirect(reverse('admin:admin_dashboard'))
# search query
if 'q' in request.GET:
try:
id = int(request.GET['q']) ^ 0xABCDEFAB
transaction_list = transaction_list.filter(id=id)
except Exception, e:
messages.error(request, "The Transaction was not found")
if not transaction_list:
try:
num = str(request.GET['q'])
ctry_code = num[:3]
debug(ctry_code)
phone_num = num[3:]
debug(phone_num)
transaction_list.filter(receiver_number=phone_num)
except Exception, e:
debug(e)
# if request.user.is_superuser:
# transaction_list = Transaction.objects.all()
transaction_list = stuff_transaction_list(request.user, status)
# we are dealing with a specific user
if user_id and transaction_list:
user_id = int(user_id) ^ 0xABCDEFAB
profile = get_object_or_404(Profile.objects.filter(id=user_id))
transaction_list = transaction_list.filter(user=profile.user)
if transaction_list:
transaction_list = transaction_list.order_by('-id')
paginator = Paginator(transaction_list, settings.PAGNATION_LIMIT)
page = request.GET.get('page')
try:
transactions = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
transactions = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
transactions = paginator.page(paginator.num_pages)
log_action(request, model_object=transaction_list,
action_flag=6, change_message='view Transaction')
return render_view(request, 'admin/transactions.html', {'transactions': transactions, 'pretitle': pretitle, 'page_title': page_title, 'type': name})
def tradelance(request):
"""work with tradelance."""
pretitle = 'Pending Transactions'
page_title = 'Pending Transactions'
response_data = {}
return render_view(request,'admin/tradelance.html',
{'result':response_data
})
def tradelance_response(request):
"""Tradelance response."""
phone = None
amount = None
tlance_method = None
response_data = {}
pesapot = PesaPot()
if request.POST:
data = request.POST.copy()
amount = data.get('tlance_amount','')
number = data.get('tlance_number','')
tlance_id = data.get('tlance_status','')
tlance_method = data.get('selected_tmethod','')
if tlance_method == 'tlance_deposit':
response_data = pesapot.TradelanceDeposit(number,amount)
elif tlance_method == 'tlance_request':
response_data = pesapot.TradelanceRequest(number,amount)
elif tlance_method == 'tlance_balance':
response_data = pesapot.TradelanceBalance()
elif tlance_method == 'tlance_status':
response_data = pesapot.TradelanceStatus(tlance_id)
return render_view(request,'admin/tradelance_response.html',
{'result':response_data})
@permission_required('view_transaction')
def bill_transactions(request, name=False, user_id=False):
'''
Transactions
'''
pretitle = 'Pending Transactions'
page_title = 'Pending Transactions'
#debug(get_country_access(request.user), 'country')
transaction_list = False
status = 7
if not name and request.user.is_superuser:
page_title = pretitle = 'Bill Transactions'
elif name == 'pending':
status = 9
# transaction_list = transaction_list.filter(
# visa_success=True, is_processed=False, amount_sent__isnull=False)
elif name == 'successful':
status = 6
page_title = pretitle = 'Successful Bill Transactions'
# transaction_list = transaction_list.filter(
# visa_success=True, is_processed=True, amount_sent__isnull=False)
elif name == 'failed':
status = 10
page_title = pretitle = 'Failed Bill Transactions'
elif name == 'canceled':
status = 11
page_title = pretitle = 'Canceled Bill Transactions'
#transaction_list = transaction_list.filter(visa_success=False)
elif name == 'search':
page_title = pretitle = 'Search Transactions'
else:
return HttpResponseRedirect(reverse('admin:admin_dashboard'))
# search query
if 'q' in request.GET:
try:
id = int(request.GET['q']) ^ 0xABCDEFAB
transaction_list = transaction_list.filter(id=id)
except Exception, e:
messages.error(request, "The Transaction was not found")
if not transaction_list:
try:
num = str(request.GET['q'])
ctry_code = num[:3]
debug(ctry_code)
phone_num = num[3:]
debug(phone_num)
transaction_list.filter(receiver_number=phone_num)
except Exception, e:
debug(e)
# if request.user.is_superuser:
# transaction_list = Transaction.objects.all()
transaction_list = stuff_transaction_list(request.user, status)
# we are dealing with a specific user
if user_id and transaction_list:
user_id = int(user_id) ^ 0xABCDEFAB
profile = get_object_or_404(Profile.objects.filter(id=user_id))
transaction_list = transaction_list.filter(user=profile.user)
if transaction_list:
transaction_list = transaction_list.order_by('-id')
paginator = Paginator(transaction_list, settings.PAGNATION_LIMIT)
page = request.GET.get('page')
try:
transactions = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
transactions = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
transactions = paginator.page(paginator.num_pages)
log_action(request, model_object=transaction_list,
action_flag=6, change_message='view Transaction')
return render_view(request, 'admin/bill_transactions.html', {'transactions': transactions, 'pretitle': pretitle, 'page_title': page_title, 'type': name})
@permission_required('edit_transaction')
def resend_transaction(request):
'''
Resend the user transaction
'''
if request.POST:
if not 'resend_transaction' in request.POST:
return HttpResponseRedirect(reverse('admin:admin_dashboard'))
else:
name = int(request.POST['resend_transaction'])
id = name ^ 0xABCDEFAB
transaction = get_object_or_404(Transaction.objects.filter(
id=id, visa_success=True, is_processed=False, amount_sent__isnull=False), id=id, visa_success=True, is_processed=False, amount_sent__isnull=False)
response = {}
response = payments.process_mobilemoney(
transaction, response, request, processed_by=request.user)
#debug(response, 'Resend Response')
# if not response['error'] and 'delivered_to_mobile' in response :
# reget the transaction
transaction = get_object_or_404(Transaction.objects.filter(id=id))
if transaction.is_processed:
success_message(request, 'admin_resend_transaction', {
'response': response})
# else:
# error_message(request, 'admin_resend_transaction', {'response': response})
else:
error_message(request, 'admin_process_transaction', {
'response': response})
else:
return HttpResponseRedirect(reverse('custom_404'))
return HttpResponseRedirect(reverse('admin:admin_transaction', args=(name,)))
@permission_required('edit_transaction')
def process_transaction(request):
'''
Mark as processed with resending
'''
if request.POST:
cancel_transaction = request.POST.get('cancel_transaction', None)
process_transaction = request.POST.get('process_transaction', None)
if cancel_transaction:
name = cancel_transaction
id = int(name) ^ 0xABCDEFAB
transaction = get_object_or_404(Transaction.objects.filter(
id=id, visa_success=True, is_processed=False, amount_sent__isnull=False), id=id, visa_success=True, is_processed=False, amount_sent__isnull=False)
elif process_transaction:
name = process_transaction
id = int(name) ^ 0xABCDEFAB
transaction = get_object_or_404(Transaction.objects.filter(
id=id, visa_success=True, is_processed=False, amount_sent__isnull=False), id=id, visa_success=True, is_processed=False, amount_sent__isnull=False)
else:
return HttpResponseRedirect(reverse('custom_404'))
if process_transaction:
response = {'status_code': payments.RESPONSE_CODES['SUCCESS']}
payments.process_mobilemoney(
transaction, response, request, processed_by=request.user, mark_as_processed=True)
_process_error = response.get('error', None)
if not _process_error:
delivered_to_mobile = False
if 'delivered_to_mobile' in response:
delivered_to_mobile = response['delivered_to_mobile']
success_message(request, 'admin_process_transaction', {
'status_code': response['status_code'], 'delivered_to_mobile': delivered_to_mobile})
return HttpResponseRedirect(reverse('admin:admin_transaction', args=(name,)))
else:
error_message(request, 'admin_process_transaction', {
'status_code': response['status_code']})
if cancel_transaction:
transaction.is_processed = True
transaction.is_canceled = True
transaction.canceled_by = request.user
transaction.cancled_on = datetime.now()
transaction.save()
return HttpResponseRedirect(reverse('admin:admin_transactions', args=('canceled',)))
# return HttpResponseRedirect(settings.BASE_URL +
# 'admin/transactions/successful/')
return HttpResponseRedirect(reverse('admin:admin_transactions', args=('pending',)))
@admin_required
def users(request, name):
'''
@request request object
'''
# user_list = Profile.objects.filter(account_verified=True,user__isnull=False)
# print name
pretitle = 'verified users'
page_title = 'verified users'
if name == 'verified':
user_list = admin_utils.verified_users()
elif name == 'unverified':
user_list = Profile.objects.filter(
Q(id_pic=''),
account_verified=False,
user__isnull=False, account_blocked=False)
pretitle = 'Unverified Users'
page_title = 'verified users'
elif name == 'pending_verification':
pretitle = 'Users waiting to be verified'
page_title = 'users pending verification'
user_list = admin_utils.users_pending_verification()
elif name == 'blocked':
pretitle = 'Blocked Users'
page_title = 'Blocked Users'
user_list = admin_utils.blocked_users()
elif name == 'top':
pretitle = 'Blocked Users'
page_title = 'Blocked Users'
user_list = Profile.objects.filter(account_blocked=False)
elif name == 'search':
pretitle = 'User Search'
page_title = 'User Search'
user_list = Profile.objects.filter(user__isnull=False)
else:
return HttpResponseRedirect(reverse('custom_404'))
user_list = user_list.filter().order_by('-id')
# search query
if 'q' in request.GET:
pretitle += ' | %s' % request.GET['q']
page_title += ' | %s' % request.GET['q']
user_list = user_list.filter(
Q(firstname__icontains='' + request.GET['q'] + '') | Q(lastname__icontains='' + request.GET['q'] + ''))
paginator = Paginator(user_list, settings.PAGNATION_LIMIT)
page = request.GET.get('page')
try:
users = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
users = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
users = | |
E501
List the complete time series of a person property. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_person_property_time_series_with_http_info(id_type_scope, id_type_code, code, property_key, async_req=True)
>>> result = thread.get()
:param id_type_scope: Scope of the person identifier type. (required)
:type id_type_scope: str
:param id_type_code: Code of the person identifier type. (required)
:type id_type_code: str
:param code: Code of the person under specified identifier type's scope and code. This together with stated identifier type uniquely identifies the person. (required)
:type code: str
:param property_key: The property key of the property that will have its history shown. These must be in the format {domain}/{scope}/{code} e.g. \"Person/CompanyDetails/Role\". Each property must be from the \"Person\" domain. (required)
:type property_key: str
:param as_at: The asAt datetime at which to list the person's property history. Defaults to return the current datetime if not supplied.
:type as_at: datetime
:param filter: Expression to filter the result set. Read more about filtering results from LUSID here https://support.lusid.com/filtering-results-from-lusid.
:type filter: str
:param page: The pagination token to use to continue listing properties from a previous call to get property time series. This value is returned from the previous call. If a pagination token is provided the filter and asAt fields must not have changed since the original request.
:type page: str
:param limit: When paginating, limit the number of returned results to this many.
:type limit: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfPropertyInterval, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id_type_scope',
'id_type_code',
'code',
'property_key',
'as_at',
'filter',
'page',
'limit'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_person_property_time_series" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'property_key' is set
if self.api_client.client_side_validation and ('property_key' not in local_var_params or # noqa: E501
local_var_params['property_key'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `property_key` when calling `get_person_property_time_series`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_person_property_time_series`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_person_property_time_series`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_person_property_time_series`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 5000: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `get_person_property_time_series`, must be a value less than or equal to `5000`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `get_person_property_time_series`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id_type_scope' in local_var_params:
path_params['idTypeScope'] = local_var_params['id_type_scope'] # noqa: E501
if 'id_type_code' in local_var_params:
path_params['idTypeCode'] = local_var_params['id_type_code'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'property_key' in local_var_params and local_var_params['property_key'] is not None: # noqa: E501
query_params.append(('propertyKey', local_var_params['property_key'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfPropertyInterval",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/persons/{idTypeScope}/{idTypeCode}/{code}/properties/time-series', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_person_relations(self, id_type_scope, id_type_code, code, **kwargs): # noqa: E501
"""[EXPERIMENTAL] GetPersonRelations: Get Relations for Person # noqa: E501
Get relations for the specified person. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_person_relations(id_type_scope, id_type_code, code, async_req=True)
>>> result = thread.get()
:param id_type_scope: Scope of the person identifier type. (required)
:type id_type_scope: str
:param id_type_code: Code of the person identifier type. (required)
:type id_type_code: str
:param code: Code of the person under specified identifier type's scope and code. This together with stated identifier type uniquely identifies the person. (required)
:type code: str
:param effective_at: The effective datetime or cut label at which to get relations. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to retrieve the person's relations. Defaults to return the latest LUSID AsAt time if not specified.
:type as_at: datetime
:param filter: Expression to filter the relations. Users should provide null or empty string for this field until further notice.
:type filter: str
:param identifier_types: Identifiers types (as property keys) used for referencing Persons or Legal Entities. These take the format {domain}/{scope}/{code} e.g. \"Person/CompanyDetails/Role\". They must be from the \"Person\" or \"LegalEntity\" domain. Only identifier types stated will be used to look up relevant entities in relations. If not applicable, provide an empty array.
:type identifier_types: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ResourceListOfRelation
"""
kwargs['_return_http_data_only'] = True
return self.get_person_relations_with_http_info(id_type_scope, id_type_code, code, **kwargs) # noqa: E501
def get_person_relations_with_http_info(self, id_type_scope, id_type_code, code, **kwargs): # noqa: E501
"""[EXPERIMENTAL] GetPersonRelations: Get Relations for Person # noqa: E501
Get relations for the specified person. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_person_relations_with_http_info(id_type_scope, id_type_code, code, async_req=True)
>>> result = thread.get()
:param id_type_scope: Scope of the person identifier type. (required)
:type id_type_scope: str
:param id_type_code: Code of the person identifier type. (required)
:type id_type_code: str
:param code: Code of the person under specified identifier type's scope and code. This together with stated identifier type uniquely identifies the person. (required)
:type code: str
:param effective_at: The effective datetime or cut label at which to get relations. Defaults to the current LUSID system datetime if not specified.
:type | |
# -*- coding: utf-8 -*-
import os
import numpy as np
def _import_networkx():
try:
import networkx as nx
except Exception as e:
raise ImportError('Cannot import networkx. Use graph-tool or try to '
'install it with pip (or conda) install networkx. '
'Original exception: {}'.format(e))
return nx
def _import_graphtool():
try:
import graph_tool as gt
except Exception as e:
raise ImportError('Cannot import graph-tool. Use networkx or try to '
'install it. Original exception: {}'.format(e))
return gt
class IOMixIn(object):
def _break_signals(self):
r"""Break N-dimensional signals into N 1D signals."""
for name in list(self.signals.keys()):
if self.signals[name].ndim == 2:
for i, signal_1d in enumerate(self.signals[name].T):
self.signals[name + '_' + str(i)] = signal_1d
del self.signals[name]
def _join_signals(self):
r"""Join N 1D signals into one N-dimensional signal."""
joined = dict()
for name in self.signals:
name_base = name.rsplit('_', 1)[0]
names = joined.get(name_base, list())
names.append(name)
joined[name_base] = names
for name_base, names in joined.items():
if len(names) > 1:
names = sorted(names) # ensure dim ordering (_0, _1, etc.)
signal_nd = np.stack([self.signals[n] for n in names], axis=1)
self.signals[name_base] = signal_nd
for name in names:
del self.signals[name]
def to_networkx(self):
r"""Export the graph to NetworkX.
Edge weights are stored as an edge attribute,
under the name "weight".
Signals are stored as node attributes,
under their name in the :attr:`signals` dictionary.
`N`-dimensional signals are broken into `N` 1-dimensional signals.
They will eventually be joined back together on import.
Returns
-------
graph : :class:`networkx.Graph`
A NetworkX graph object.
See Also
--------
to_graphtool : export to graph-tool
save : save to a file
Examples
--------
>>> import networkx as nx
>>> from matplotlib import pyplot as plt
>>> graph = graphs.Path(4, directed=True)
>>> graph.set_signal(np.full(4, 2.3), 'signal')
>>> graph = graph.to_networkx()
>>> print(nx.info(graph))
DiGraph named 'Path' with 4 nodes and 3 edges
>>> nx.is_directed(graph)
True
>>> graph.nodes()
NodeView((0, 1, 2, 3))
>>> graph.edges()
OutEdgeView([(0, 1), (1, 2), (2, 3)])
>>> graph.nodes()[2]
{'signal': 2.3}
>>> graph.edges()[(0, 1)]
{'weight': 1.0}
>>> # nx.draw(graph, with_labels=True)
Another common goal is to use NetworkX to compute some properties to be
be imported back in the PyGSP as signals.
>>> import networkx as nx
>>> from matplotlib import pyplot as plt
>>> graph = graphs.Sensor(100, seed=42)
>>> graph.set_signal(graph.coords, 'coords')
>>> graph = graph.to_networkx()
>>> betweenness = nx.betweenness_centrality(graph, weight='weight')
>>> nx.set_node_attributes(graph, betweenness, 'betweenness')
>>> graph = graphs.Graph.from_networkx(graph)
>>> graph.compute_fourier_basis()
>>> graph.set_coordinates(graph.signals['coords'])
>>> fig, axes = plt.subplots(1, 2)
>>> _ = graph.plot(graph.signals['betweenness'], ax=axes[0])
>>> _ = axes[1].plot(graph.e, graph.gft(graph.signals['betweenness']))
"""
nx = _import_networkx()
def convert(number):
# NetworkX accepts arbitrary python objects as attributes, but:
# * the GEXF writer does not accept any NumPy types (on signals),
# * the GraphML writer does not accept NumPy ints.
if issubclass(number.dtype.type, (np.integer, np.bool_)):
return int(number)
else:
return float(number)
def edges():
for source, target, weight in zip(*self.get_edge_list()):
yield int(source), int(target), {'weight': convert(weight)}
def nodes():
for vertex in range(self.n_vertices):
signals = {name: convert(signal[vertex])
for name, signal in self.signals.items()}
yield vertex, signals
self._break_signals()
graph = nx.DiGraph() if self.is_directed() else nx.Graph()
graph.add_nodes_from(nodes())
graph.add_edges_from(edges())
graph.name = self.__class__.__name__
return graph
def to_graphtool(self):
r"""Export the graph to graph-tool.
Edge weights are stored as an edge property map,
under the name "weight".
Signals are stored as vertex property maps,
under their name in the :attr:`signals` dictionary.
`N`-dimensional signals are broken into `N` 1-dimensional signals.
They will eventually be joined back together on import.
Returns
-------
graph : :class:`graph_tool.Graph`
A graph-tool graph object.
See Also
--------
to_networkx : export to NetworkX
save : save to a file
Examples
--------
>>> import graph_tool as gt
>>> import graph_tool.draw
>>> from matplotlib import pyplot as plt
>>> graph = graphs.Path(4, directed=True)
>>> graph.set_signal(np.full(4, 2.3), 'signal')
>>> graph = graph.to_graphtool()
>>> graph.is_directed()
True
>>> graph.vertex_properties['signal'][2]
2.3
>>> graph.edge_properties['weight'][graph.edge(0, 1)]
1.0
>>> # gt.draw.graph_draw(graph, vertex_text=graph.vertex_index)
Another common goal is to use graph-tool to compute some properties to
be imported back in the PyGSP as signals.
>>> import graph_tool as gt
>>> import graph_tool.centrality
>>> from matplotlib import pyplot as plt
>>> graph = graphs.Sensor(100, seed=42)
>>> graph.set_signal(graph.coords, 'coords')
>>> graph = graph.to_graphtool()
>>> vprop, eprop = gt.centrality.betweenness(
... graph, weight=graph.edge_properties['weight'])
>>> graph.vertex_properties['betweenness'] = vprop
>>> graph = graphs.Graph.from_graphtool(graph)
>>> graph.compute_fourier_basis()
>>> graph.set_coordinates(graph.signals['coords'])
>>> fig, axes = plt.subplots(1, 2)
>>> _ = graph.plot(graph.signals['betweenness'], ax=axes[0])
>>> _ = axes[1].plot(graph.e, graph.gft(graph.signals['betweenness']))
"""
gt = _import_graphtool()
graph = gt.Graph(directed=self.is_directed())
sources, targets, weights = self.get_edge_list()
graph.add_edge_list(zip(sources, targets))
prop = graph.new_edge_property(gt._gt_type(weights.dtype))
prop.get_array()[:] = weights
graph.edge_properties['weight'] = prop
self._break_signals()
for name, signal in self.signals.items():
prop = graph.new_vertex_property(gt._gt_type(signal.dtype))
prop.get_array()[:] = signal
graph.vertex_properties[name] = prop
return graph
@classmethod
def from_networkx(cls, graph, weight='weight'):
r"""Import a graph from NetworkX.
Edge weights are retrieved as an edge attribute,
under the name specified by the ``weight`` parameter.
Signals are retrieved from node attributes,
and stored in the :attr:`signals` dictionary under the attribute name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
graph : :class:`networkx.Graph`
A NetworkX graph object.
weight : string or None, optional
The edge attribute that holds the numerical values used as the edge
weights. All edge weights are set to 1 if None, or not found.
Returns
-------
graph : :class:`~pygsp.graphs.Graph`
A PyGSP graph object.
Notes
-----
The nodes are ordered according to :meth:`networkx.Graph.nodes`.
In NetworkX, node attributes need not be set for every node.
If a node attribute is not set for a node, a NaN is assigned to the
corresponding signal for that node.
If the graph is a :class:`networkx.MultiGraph`, multiedges are
aggregated by summation.
See Also
--------
from_graphtool : import from graph-tool
load : load from a file
Examples
--------
>>> import networkx as nx
>>> graph = nx.Graph()
>>> graph.add_edge(1, 2, weight=0.2)
>>> graph.add_edge(2, 3, weight=0.9)
>>> graph.add_node(4, sig=3.1416)
>>> graph.nodes()
NodeView((1, 2, 3, 4))
>>> graph = graphs.Graph.from_networkx(graph)
>>> graph.W.toarray()
array([[0. , 0.2, 0. , 0. ],
[0.2, 0. , 0.9, 0. ],
[0. , 0.9, 0. , 0. ],
[0. , 0. , 0. , 0. ]])
>>> graph.signals
{'sig': array([ nan, nan, nan, 3.1416])}
"""
nx = _import_networkx()
from .graph import Graph
adjacency = nx.to_scipy_sparse_matrix(graph, weight=weight)
graph_pg = Graph(adjacency)
for i, node in enumerate(graph.nodes()):
for name in graph.nodes[node].keys():
try:
signal = graph_pg.signals[name]
except KeyError:
signal = np.full(graph_pg.n_vertices, np.nan)
graph_pg.set_signal(signal, name)
try:
signal[i] = graph.nodes[node][name]
except KeyError:
pass # attribute not set for node
graph_pg._join_signals()
return graph_pg
@classmethod
def from_graphtool(cls, graph, weight='weight'):
r"""Import a graph from graph-tool.
Edge weights are retrieved as an edge property,
under the name specified by the ``weight`` parameter.
Signals are retrieved from node properties,
and stored in the :attr:`signals` dictionary under the property name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
graph : :class:`graph_tool.Graph`
A graph-tool graph object.
weight : string
The edge property that holds the numerical values used as the edge
weights. All edge weights are set to 1 if None, or not found.
Returns
-------
graph : :class:`~pygsp.graphs.Graph`
A PyGSP graph object.
Notes
-----
If the graph has multiple edge connecting the same two nodes, a sum
over the edges is taken to merge them.
See Also
--------
from_networkx : import from NetworkX
load : load from a file
Examples
--------
>>> import graph_tool as gt
>>> graph = gt.Graph(directed=False)
>>> e1 = graph.add_edge(0, 1)
>>> e2 = graph.add_edge(1, 2)
>>> v = graph.add_vertex()
>>> eprop = graph.new_edge_property("double")
>>> eprop[e1] = 0.2
>>> eprop[graph.edge(1, 2)] = 0.9
>>> graph.edge_properties["weight"] = eprop
>>> vprop = graph.new_vertex_property("double", val=np.nan)
>>> vprop[3] = 3.1416
>>> graph.vertex_properties["sig"] = vprop
>>> graph = graphs.Graph.from_graphtool(graph)
>>> graph.W.toarray()
array([[0. , 0.2, 0. , 0. ],
[0.2, 0. , 0.9, 0. ],
[0. , 0.9, 0. , 0. ],
[0. , 0. , 0. , 0. ]])
>>> graph.signals
{'sig': PropertyArray([ nan, nan, nan, 3.1416])}
"""
gt = _import_graphtool()
import graph_tool.spectral
from .graph import Graph
weight = graph.edge_properties.get(weight, None)
adjacency = gt.spectral.adjacency(graph, weight=weight)
graph_pg = Graph(adjacency.T)
for name, signal in graph.vertex_properties.items():
graph_pg.set_signal(signal.get_array(), name)
graph_pg._join_signals()
return graph_pg
@classmethod
def load(cls, path, fmt=None, backend=None):
r"""Load a graph from a | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import configparser
import os
import random
from string import ascii_letters, digits
from itertools import chain
import smtplib
from string import Template
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from gophish import Gophish
from SETA_surveys.models import Survey, Question, Answer, TrainingCampaign, TrainingItem
def get_random_string(length):
''' Create random string
Get a random slug as URI for every single training surveys
Input: lenght (int)
Output: string '''
punctuation_symbols = '-_'
string_characters = ascii_letters + digits + punctuation_symbols
url_string = ''.join(random.choice(string_characters) for i in range(length))
return(url_string)
def initialize_connector():
''' Gophish API connector
Get SETAphish.cfg file and read parameters. Check file in /config
for details and examples. '''
global api
config = configparser.ConfigParser()
thisfolder = os.path.dirname(os.path.abspath(__file__))
cfgfile = os.path.join(thisfolder, 'config', 'SETAsurvey.cfg')
try:
config.read(cfgfile)
except Exception as e:
return('ERROR: Config file not found')
api_key = config.get('MAIN','api_key')
host_url = config.get('MAIN','host_url')
verify = config.get('MAIN','verify')
api = Gophish(api_key, host_url, verify=eval(verify))
return
def list_groups_():
''' List Gophish user groups
Get target groups and high level details
Input: none
Return: list of Groups if applies or None,
and message indicating OK or specific error (string). '''
msg = initialize_connector()
if msg is not None:
return(None, msg)
try:
groups = api.groups.get()
return(groups, 'OK')
except:
return(None, 'ERROR: failed to retrieve target groups')
def list_campaigns_():
''' Gophish list phishing campaigns
List existing phishing campaigns
Input: None
Return: list of phishing campaign objects or None,
and message indicating OK or specific error (string). '''
msg = initialize_connector()
if msg is not None:
return(None, msg)
try:
campaigns = api.campaigns.summary()
return(campaigns, 'OK')
except:
return(None, 'ERROR: failed to retrieve phishing campaigns\n')
def get_detail_group_(id):
''' Get Gophish user group detail
Retrieve group details
Input: GroupId in Gophish database
Return: object reference if applies or None,
and message indicating OK or specific error (string). '''
msg = initialize_connector()
if msg is not None:
return(None, msg)
try:
group = api.groups.get(group_id=id)
return(group, 'OK')
except:
return(None, 'Error: failed to retrieve target group {}. Generic error or non existent'.format(id))
def get_campaign_details_(id):
''' Get Gophish phishing campaign details
Retrieve phishing campaign properties
Input: Campaign Id in Gophish database
Return: object reference if applies (summary and details) or None, None,
and message indicating OK or specific error (string). '''
msg = initialize_connector()
if msg is not None:
return(None, None, msg)
try:
summary = api.campaigns.summary(campaign_id=id)
details = api.campaigns.get(campaign_id=id)
return(summary, details, 'OK')
except:
return(None, None, 'ERROR: failed to retrieve phishing campaign details or ID non-existent\n')
def create_keys(scoring_list):
''' Create keys
Create the coded answers key and scoring for the template survey
Answers_key: 1 for true, 0 for false, separated by - within a question and by _ between questions
Score_key: score for true, 0 for false, separated by - within a question and by _ between questions
'''
answer_key = ''
score_key = ''
print(scoring_list)
for item in scoring_list:
for score in item:
if score != 0:
answer_key=answer_key+'1'+'-'
else:
answer_key=answer_key+'0'+'-'
score_key=score_key+str(score)+'-'
answer_key = answer_key[:-1]
score_key = score_key[:-1]
answer_key = answer_key + '_'
score_key = score_key + '_'
answer_key = answer_key[:-1]
score_key = score_key[:-1]
return(answer_key, score_key)
def create_survey_scoring(answers):
''' Create survey scoring
Create a scoring list of lists for the already validated template survey
'''
scoring_list = []
number_of_questions = len(answers)
points_per_question = 100 / number_of_questions
for answers_group in answers:
trues_in_question = 0
for answer in answers_group:
if 'T' in answer[0]:
trues_in_question += 1
scoring_question_list = []
for answer in answers_group:
if 'T' in answer[0]:
scoring_question_list.append(round(points_per_question / trues_in_question,2))
else:
scoring_question_list.append(0.0)
scoring_list.append(scoring_question_list)
return scoring_list
def validate_survey(questions, answers):
''' Validate survey template
Create a scoring list of lists for the already validated survey
#checks:
#C1: same number of questions than answers´ lists
#C2: at least two answers items per questions
#C3: at least one True answer per question
'''
valid = True
msg = 'OK'
#C1
if len(questions) != len(answers):
msg = ('ERROR: Number of questions {} and set of answers {} does not match for the survey'\
.format(len(questions),len(answers)))
valid = False
#C2
for answers_group in answers:
if len(answers_group) < 2:
msg = ('ERROR: Number of answers lower than two for a given survey question')
valid = False
break
#C3
for answers_group in answers:
one_correct = False
for answer in answers_group:
if answer[0] == 'T':
one_correct = True
break
if one_correct == False:
msg = ('ERROR: There is at least one question with no True answers in the survey')
valid = False
return(valid, msg)
def create_survey_(args):
''' Create survey template
Parse CSV training awareness survey file
Input: survey name, file (CSV)
Returns two lists, questions and answers
Format CSV comma separated: Two fields per line: item and content.
#Item: Q if question, T if answer true, F if answer false.
#Content: Question if item Q, possible answer if item was T or F.
'''
name, filename = args
questions = []
answers = []
try:
i = -1
ans_item = []
valid_format = True
# with open(filename) as file:
file = filename.readlines()
for line in file:
array = (line.strip().split(','))
if len(array) != 2:
valid_format = False
msg = 'ERROR: Exiting due to missformated file (len)'
return(None, None, None, None, msg)
# break
else:
item, content = array
if item == 'Q':
i += 1
questions.insert(i,content)
if len(ans_item) > 0:
answers.insert(i-1, ans_item)
ans_item = []
elif item in ['T','F']:
ans_item.append([item,content])
else:
valid_format = False
msg = ('ERROR: Exiting due to missformated file (item)')
return(None, None, None, None, msg)
# break
if valid_format and len(ans_item) > 0:
answers.insert(i, ans_item)
valid, msg = validate_survey(questions, answers)
if valid:
scoring = create_survey_scoring(answers)
else:
return(None, None, None, None, msg)
answer_key, score_key = create_keys(scoring)
return(questions, answers, answer_key, score_key, 'OK')
except:
return(None, None, None, None, 'Unexpected error while processing survey')
def list_surveys_():
''' List Survey templates
Get survey templates and high level details
Input: none
Return: list of survey templates if applies or None,
and message indicating OK or specific error (string). '''
try:
all_surveys = Survey.objects.all()
except:
msg = 'ERROR: Exiting while retrieving surveys from database or non existent'
return(None, msg)
if len(all_surveys) > 0:
msg = 'OK'
else:
msg = 'ERROR: There are no survey in database!'
return(all_surveys, msg)
def get_detail_survey_(survey_id):
''' Get Survey templates
Get specific template, questions and answers
Input: Survey ID
Return: survey object, queryset of questions and queryset of answers
(or None for the three), and message indicating OK or specific error (string). '''
msg = 'OK'
try:
survey = Survey.objects.get(id=survey_id)
questions = Question.objects.filter(survey=survey_id)
answers = Answer.objects.none()
for question in questions:
partial_answers = Answer.objects.filter(question=question.id)
answers = list(chain(answers, partial_answers))
except:
msg = 'ERROR: Exiting while retrieving survey from database or non existent'
return(None, None, None, msg)
if len(answers)==0:
msg = 'ERROR: Exiting while retrieving survey from database or non existent'
return(None, None, None, msg)
return(survey, questions, answers, msg)
def remove_survey_(survey_id):
''' Remove survey templates
Remove specific template, survey, questions and answers
Input: survey ID
Return: survey ID, and message indicating OK or specific error (string). '''
msg = 'OK'
try:
survey = Survey.objects.get(id=survey_id)
survey.delete()
except:
msg = 'ERROR: Exiting while retrieving survey from database or non existent'
return(survey_id, msg)
def read_template(filename):
''' Read email template
Read the email template for sending the URL link for the survey
Input: filename within /modules in the Survey app
Return: Template object. '''
with open(filename, 'r', encoding='utf-8') as template_file:
template_file_content = template_file.read()
return Template(template_file_content)
def send_training_emails(email_host, email_template, email_origin, email_subject, email_password, message_template, names, emails, slugs):
''' Send training | |
<reponame>WeZZard/BeanPorter<filename>src/BeanPorter/bpcml/BPCML.py<gh_stars>0
#!/usr/bin/env python3
from abc import abstractmethod
from typing import List, Dict, Set, Optional
import io
import yaml
import csv
import re
import os
import logging
from BeanPorter.bpcml.ASTContext import ASTContext
from BeanPorter.bpcml.Tokenizer import Tokenizer
from BeanPorter.bpcml.Decls import RuleDecl
from beancount.ingest import cache
_DEFAULT_IMPORTER_COUNTER = 0
REQUIRED_TRANSACTION_PROPERTY_KEYS: Set[str] = frozenset([
'transaction_name',
'timestamp',
'complete',
])
OPTIONAL_TRANSACTION_PROPERTY_KEYS: Set[str] = frozenset([
'payee',
'time',
'debit_account',
'debit_amount',
'debit_currency',
'debit_cost',
'debit_price',
'credit_account',
'credit_amount',
'credit_currency',
'credit_cost',
'credit_price',
'tag',
'links',
])
TRANSACTION_PROPERTY_KEYS: Set[str] = frozenset().union(*[REQUIRED_TRANSACTION_PROPERTY_KEYS, OPTIONAL_TRANSACTION_PROPERTY_KEYS])
def make_default_impoter_name() -> str:
global _DEFAULT_IMPORTER_COUNTER
if _DEFAULT_IMPORTER_COUNTER == 0:
name = 'Default'
else:
name = 'Default-{c}'.format(c=(_DEFAULT_IMPORTER_COUNTER))
_DEFAULT_IMPORTER_COUNTER += 1
return name
def strip_blank(contents):
"""
strip the redundant blank in file contents.
"""
with io.StringIO(contents) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",", quotechar='"')
rows = []
for row in csvreader:
rows.append(",".join(['"{}"'.format(x.strip()) for x in row]))
return "\n".join(rows)
class Developer:
def __init__(self, is_debug_enabled: bool = False):
self.is_debug_enabled = is_debug_enabled
@staticmethod
def make_with_serialization(serialization) -> Optional['Developer']:
if serialization is None:
return None
if serialization['debug'] is not None:
is_debug_enabled = serialization['debug'] == True
else:
is_debug_enabled = False
return Developer(is_debug_enabled=is_debug_enabled)
class FileNameProbe:
@staticmethod
def make_with_serialization(serialization) -> Optional['FileNameProbe']:
if serialization is None:
return None
probe: Optional['FileNameProbe'] = None
pattern = serialization.get('pattern')
if pattern is not None:
re_pattern = re.compile(re.escape(pattern))
probe = _FileNamePatternProbe(re_pattern)
prefix = serialization.get('prefix')
if prefix is not None:
if probe is not None:
raise AssertionError("A probe may have only one kind of configuration for file_name! There have been {}".format(probe.kind()))
assert(isinstance(prefix, str))
probe = _FileNamePrefixProbe(prefix)
suffix = serialization.get('suffix')
if suffix is not None:
assert(probe is None, "A probe may have only one kind of configuration for file_name! There have been {}".format(probe.kind()))
assert(isinstance(suffix, str))
probe = _FileNameSuffixProbe(suffix)
return probe
@abstractmethod
def kind(self) -> str:
pass
class _FileNamePatternProbe(FileNameProbe):
def __init__(self, re_file_name_pattern: re.Pattern):
assert(isinstance(re_file_name_pattern, re.Pattern))
self.pattern = re_file_name_pattern
def kind(self) -> str:
return 'pattern'
def test(self, file_name: str) -> bool:
return self.pattern.match(file_name) is not None
class _FileNamePrefixProbe(FileNameProbe):
def __init__(self, prefix: str):
assert(isinstance(prefix, str))
self.prefix = prefix
def kind(self) -> str:
return 'prefix'
def test(self, file_name: str) -> bool:
last_component = os.path.basename(os.path.normpath(file_name))
return last_component.startswith(self.prefix)
class _FileNameSuffixProbe(FileNameProbe):
def __init__(self, suffix: str):
assert(isinstance(suffix, str))
self.suffix = suffix
def kind() -> str:
return 'suffix'
def test(self, file_name: str) -> bool:
return file_name.endswith(self.suffix)
class Probe:
@staticmethod
def make_with_serialization(serialization) -> 'Probe':
if serialization is None:
return _NoProbe()
return _HasProbe(
FileNameProbe.make_with_serialization(serialization.get('file_name'))
)
@abstractmethod
def test(self, file: cache._FileMemo) -> bool:
pass
class _NoProbe(Probe):
def test(self, file: cache._FileMemo) -> bool:
return True
class _HasProbe(Probe):
def __init__(self, file_name_probe: Optional[FileNameProbe]):
self.file_name_probe = file_name_probe
def test(self, file: cache._FileMemo) -> bool:
is_file_name_matched = True
if self.file_name_probe is not None:
is_file_name_matched = self.file_name_probe.test(file.name)
return is_file_name_matched
class TableHeader:
@staticmethod
def make_with_serialization(serialization) -> 'TableHeader':
if serialization is None:
return _NoTableHeader()
return _LineSpecifiedTableHeader.make_with_serialization(serialization)
class _NoTableHeader(TableHeader):
pass
class _LineSpecifiedTableHeader(TableHeader):
@staticmethod
def make_with_serialization(serialization) -> '_LineSpecifiedTableHeader':
return _LineSpecifiedTableHeader(serialization['line'])
def __init__(self, line):
assert(isinstance(line, int))
self.line = line
class Stripper:
@staticmethod
def make_strippers_with_serialization(serialization) -> List['Stripper']:
if serialization is None:
return list()
strippers = list()
for action_name in serialization:
action_value = serialization[action_name]
strippers.append(Stripper.make_with_action(action_name, action_value))
return strippers
@staticmethod
def make_with_action(action_name, action_value) -> 'Stripper':
assert(isinstance(action_name, str))
if action_name == 'remove_first':
assert(isinstance(action_value, int))
return _RemoveFirstKStripper(action_value)
if action_name == 'remove_last':
assert(isinstance(action_value, int))
return _RemoveLastKStripper(action_value)
if action_name == 'remove_before':
assert(isinstance(action_value, str))
return _RemoveBeforePatternStripper(action_value, False)
if action_name == 'remove_after':
assert(isinstance(action_value, str))
return _RemoveAfterPatternStripper(action_value, False)
if action_name == 'remove_before_and_include':
assert(isinstance(action_value, str))
return _RemoveBeforePatternStripper(action_value, True)
if action_name == 'remove_after_and_include':
assert(isinstance(action_value, str))
return _RemoveAfterPatternStripper(action_value, True)
raise AssertionError('Unrecognized stripper action name: {}'.format(action_name))
@abstractmethod
def evaluate_in_file_contents(self, file_contents) -> Optional[int]:
pass
@abstractmethod
def is_leading_stripper(self) -> bool:
pass
@abstractmethod
def is_trailing_stripper(self) -> bool:
pass
class _RemoveFirstKStripper(Stripper):
def __init__(self, k: int):
self.k = k
def evaluate_in_file_contents(self, file_contents) -> Optional[int]:
reader = csv.reader(io.StringIO(strip_blank(file_contents)))
lines_count = sum(1 for _ in reader)
if lines_count < self.k:
return lines_count
return self.k
def is_leading_stripper(self) -> bool:
return True
@abstractmethod
def is_trailing_stripper(self) -> bool:
return False
class _RemoveLastKStripper(Stripper):
def __init__(self, k: int):
self.k = k
def evaluate_in_file_contents(self, file_contents) -> Optional[int]:
reader = csv.reader(io.StringIO(strip_blank(file_contents)))
lines_count = sum(1 for _ in enumerate(reader))
if lines_count < self.k:
return 0 - 1
return (lines_count - self.k) - 1
def is_leading_stripper(self) -> bool:
return False
@abstractmethod
def is_trailing_stripper(self) -> bool:
return True
class _RemoveBeforePatternStripper(Stripper):
""" Currently not implemented!
"""
def __init__(self, pattern: str, includes: bool):
self.pattern = pattern
self.includes = includes
def is_leading_stripper(self) -> bool:
return True
@abstractmethod
def is_trailing_stripper(self) -> bool:
return False
class _RemoveAfterPatternStripper(Stripper):
""" Currently not implemented!
"""
def __init__(self, pattern: str, includes: bool):
self.pattern = pattern
self.includes = includes
def is_leading_stripper(self) -> bool:
return False
@abstractmethod
def is_trailing_stripper(self) -> bool:
return True
class Transformer(object):
def __init__(self, name: str, patterns: Optional[Dict[str, str]], rules: Optional[Dict[str, RuleDecl]]):
Transformer.validate_rules(rules)
assert(isinstance(name, str))
assert(patterns is None or isinstance(patterns, dict))
assert(rules is None or isinstance(rules, dict))
self.name = name
self.patterns = patterns
self.rules = rules
def __str__(self) -> str:
patterns_desc: str = '\n'.join([' {k} : {v}'.format(k=k, v=self.patterns[k]) for k in self.patterns])
rules_desc: str = '\n'.join([' {k} : {v}'.format(k=k, v=self.rules[k]) for k in self.rules])
return """\
-
patterns:
{patterns}
rules:
{rules}
""".format(patterns=patterns_desc, rules=rules_desc)
@staticmethod
def make_transformers_with_serialization(serialization, tokenizer: Tokenizer) -> List['Transformer']:
if not isinstance(serialization, list):
return list()
return [Transformer.make_with_serialization(c, tokenizer) for c in serialization]
@staticmethod
def make_with_serialization(serialization, tokenizer: Tokenizer) -> 'Transformer':
if serialization is None:
return Transformer('Unnamed Transformer', None, None)
name = serialization.get('name', 'Unnamed Transformer')
assert(isinstance(name, str), 'name is {} in {}'.format(name, serialization))
patterns = serialization.get('patterns', dict())
if patterns is None:
patterns = dict()
assert(isinstance(patterns, dict), 'patterns is {} in {}'.format(patterns, serialization))
raw_rule = serialization.get('rules', dict())
assert(isinstance(raw_rule, dict), 'rules is {} in {}'.format(raw_rule, serialization))
rules: Dict[str, RuleDecl] = dict()
for each_key in raw_rule:
ast_context = ASTContext(tokenizer, raw_rule[each_key])
rules[each_key] = ast_context.make_syntax()
return Transformer(name, patterns, rules)
@staticmethod
def validate_rules(rules: Optional[Dict[str, str]]):
if rules is None:
return
for each_rule_name in rules:
if each_rule_name not in TRANSACTION_PROPERTY_KEYS:
raise Exception('Unexpected rule name: {r}'.format(r=each_rule_name))
def map_value(self, name: str, variables: Dict[str, str]) -> Optional[str]:
assert(isinstance(name, str))
assert(isinstance(variables, dict))
syntax = self.get_rule_syntax(name)
if syntax is None:
return None
return syntax.evaluate(variables)
def get_rule_syntax(self, name: str) -> Optional[RuleDecl]:
assert(isinstance(name, str))
return self.rules.get(name)
class Importer:
@staticmethod
def make_importers(config) -> List['Importer']:
if not isinstance(config, list):
return list()
return [Importer.make_importer(x) for x in config]
@staticmethod
def make_importer(config) -> 'Importer':
assert(config is not None)
name = config.get('name', None)
encoding = config.get('encoding', None)
probe = Probe.make_with_serialization(config.get('probe'))
table_header = TableHeader.make_with_serialization(config.get('table_header'))
strippers = Stripper.make_strippers_with_serialization(config.get('strippers'))
variable_map = config.get('variables', dict())
transformers = Transformer.make_transformers_with_serialization(config.get('transformers'), Tokenizer())
return Importer(
name,
encoding,
probe,
table_header,
strippers,
variable_map,
transformers
)
def __init__(
self,
name: Optional[str],
encoding: Optional[str],
probe: Probe,
table_header: TableHeader,
strippers: List[Stripper],
variable_map: Dict[str, str],
transformers: List[Transformer]
):
self.name = name if name is not None else make_default_impoter_name()
self.probe = probe
self.encoding = encoding
self.table_header = table_header
self.strippers = strippers
self.variable_map = variable_map
self.transformers = transformers
def extend_with_extension(self, extension: 'ImporterExtension'):
assert(isinstance(extension, ImporterExtension))
assert(self.name == extension.name)
if extension.probe is not None:
print('Importer extension may not have probe.')
print('Probe: {}'.format(extension.probe))
if extension.table_header is not None:
print('Importer extension may not have table header.')
print('Table header: {}'.format(extension.table_header))
self.strippers.extend(extension.strippers)
self.variable_map.update(extension.variable_map)
self.transformers.extend(extension.transformers)
def normalize(self, file_contents) -> List[List[str]]:
reader = csv.reader(io.StringIO(strip_blank(file_contents)))
normalized_contents = list()
stripped_leading_lines = self._get_stripped_leading_lines(file_contents)
stripped_trailing_lines = self._get_stripped_trailing_lines(file_contents)
line = next(reader, None)
line_number = 0
while line is not None:
previous_line = line
previous_line_number = line_number
line = next(reader, None)
line_number += 1
if stripped_leading_lines is not None:
if previous_line_number < stripped_leading_lines:
continue
if stripped_trailing_lines is not None:
if previous_line_number > stripped_trailing_lines:
continue
normalized_contents.append(previous_line)
return normalized_contents
def _get_stripped_leading_lines(self, file_contents) -> Optional[int]:
line_to_strip = None
for each_stripper in self.strippers:
if each_stripper.is_leading_stripper():
line = each_stripper.evaluate_in_file_contents(file_contents)
if line_to_strip is not None:
line_to_strip = max(line, line_to_strip)
else:
line_to_strip = line
return line_to_strip
def _get_stripped_trailing_lines(self, file_contents) -> Optional[int]:
line_to_strip = None
for each_stripper in self.strippers:
if each_stripper.is_trailing_stripper():
line = each_stripper.evaluate_in_file_contents(file_contents)
if line_to_strip is not None:
line_to_strip = min(line, line_to_strip)
else:
line_to_strip = line
return line_to_strip
class ImporterExtension(Importer):
@staticmethod
def make_importers_with_serialization(serialization) -> List['ImporterExtension']:
assert(isinstance(serialization, list), "{}".format(serialization))
return [ImporterExtension.make_with_serialization(c) for c in serialization]
@staticmethod
def make_with_serialization(serialization, extracted_name: Optional[str] = None) -> 'ImporterExtension':
assert(serialization is not | |
<gh_stars>0
# Plotting specific imports:
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
## Plotting ##
def plot(x_name,y_names, x_array, y_arrays,y_axis, title='Figure',
legendlocation="best", y_colors=None,
y_format =None, x_format =None,figsize=(6,4),grid=True,formatter=False):
'''
Makes lineplots of the arrays using matplotlib
Args:
x_name(string) : Name of x-axis
y_names (list) : Containing strings with the names of the lineplots
x_array(array) : Data for x-variable
y_arrays(list) : Containing arrays with data for the y-variable of all plots
y_axis(string) : Name of y_axis
title(string) : Figure title
legendlocation(string): location of legend
formatter(list) : List of formattting and tick location options
figsize (tuple) : Figure size
Returns:
fig, ax : matplotlib figure objects
'''
fig, ax = plt.subplots(figsize=figsize)
if not y_colors:
y_colors = [None for i in range(len(y_names))]
for y_array,y_name,y_color in zip(y_arrays,y_names,y_colors):
ax.plot(x_array,y_array,label=y_name,color=y_color)
ax.legend(loc=legendlocation)
#settings:
ax.set_title(title)
ax.grid(grid)
ax.set_xlabel(x_name)
ax.set_ylabel(y_axis)
if formatter:
ax.xaxis.set_major_locator(formatter[0])
ax.xaxis.set_major_formatter(formatter[1])
ax.yaxis.set_major_locator(formatter[2])
ax.yaxis.set_major_formatter(formatter[3])
return fig, ax
def plot_hist(arrays,names=['first'],
x_label='x',y_label='Density',title='Figure',
alpha=0.5,legendlocation='best',figsize=(8,6),bins=50,
colors=None,grid=True,density=True):
'''
Makes histograms of the arrays using matplotlib
Args:
arrays (list) : list of arrays to be plotted
names (list) : Containing strings for labels
x_label (string): X-axis-label name
y_label (string): Y-axis-label name (default is density)
title (string) : Figure title
alpha (float) : transpancy of histogram plot
legendlocation(string): location of legend
figsize (tuple) : Figure size
bins (int) : Number of bins plotted
collors(list) : list of colors for the plots (if None, the colors are default)
grid (bool) : Whether to plot with grid
density (bool) : Whether to plot density or frequency
Returns:
fig, ax : matplotlib figure objects
'''
fig, ax = plt.subplots(figsize=figsize)
if not colors:
colors = [None for i in range(len(names))]
for array,name,color in zip(arrays,names,colors):
ax.hist(array,label=name,color=color,bins=bins,alpha=alpha,density=density)
ax.legend(loc=legendlocation)
#settings:
ax.set_title(title)
ax.grid(grid)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
return fig, ax
def plot_3d(x_grid,y_grid,z_grid,xlabel=r'$p_1$',ylabel=r'$p_2$',zlabel='Excess demand',
cmap=mpl.cm.jet,figsize=(10,6),title='figure',fig=None, ax=None,alpha=0.9,color=None):
'''
Make surface 3d plots
Args:
arrays (list) : list of arrays to be plotted
names (list) : Containing strings for labels
x_label (string): X-axis-label name
y_label (string): Y-axis-label name (default is density)
title (string) : Figure title
alpha (float) : transpancy of histogram plot
legendlocation(string): location of legend
figsize (tuple) : Figure size
bins (int) : Number of bins plotted
collors(list) : list of colors for the plots (if None, the colors are default)
grid (bool) : Whether to plot with grid
density (bool) : Whether to plot density or frequency
fig, ax : matplotlib figure objects. If None, the function will make up its own
Returns:
fig, ax : matplotlib figure objects
'''
zero_surface = False
if fig is None:
fig = plt.figure(figsize = figsize)
ax = fig.add_subplot(1,1,1,projection='3d')
zero_surface = True
# plot
ax.plot_surface(x_grid,y_grid,z_grid,cmap=cmap,color=color,alpha=alpha)
if zero_surface:
# Plot a surface around zero
zeroes_surface = np.zeros(x_grid.shape)
ax.plot_surface(x_grid,y_grid,zeroes_surface,color='black',alpha=0.5)
# Title options
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
#ax.invert_xaxis()
fig.tight_layout()
return fig, ax
## 2. AS-AD model ##
# We redifine our lambdifyed funtion to accept the par-dictionairy, and check that they give the same result:
def redefine_fun(lambda_fun):
'''
Calculates the equilibrium outputgab using the analytical solution
derived in sympy.
Args:
lambda_fun (fun) : Lambda function to be redifined
Returns:
fun (fun) : Redifened function
This functions has arguements:
y_t1 (float) : The outputgab in the previous period
pi_t1 (float) : The inflationgab in the previous period
v_t (float) : The demand chock in the current period
s_t (float) : The supply chock in the current period
s_t1 (float) : The supply chock in the previous period
par (dict) : Dictionairy contaning values of parameters
'''
fun = lambda y_t1,pi_t1,v_t,s_t,s_t1, par : lambda_fun(y_t1,pi_t1,v_t,s_t,s_t1,par['alpha'],par['gamma'],par['h'],par['b'],par['phi'])
return fun
def ad(y_t,v_t,par):
'''
Aggregate demand
Args:
y_t (float) : Outputgab in current period
v_t (float) : Demand chock in current period
par (dict) : Dictionairy contaning values of parameters
Returns
ad (float) : Aggregate demand
'''
h = par['h']
alpha =par['alpha']
b = par['b']
ad = 1/(h*alpha)*(v_t-(1+b*alpha)*y_t)
return ad
def sras(y_t, y_t1,pi_t1,s_t,s_t1,par):
'''
Short run aggregate supply
Args:
y_t (float) : Outputgab in current period
y_t1 (float) : The outputgab in the previous period
pi_t1 (float) : The inflationgab in the previous period
s_t (float) : Supply chock in current period
s_t1 (float) : Supply chock in previous period
par (dict) : Dictionairy contaning values of parameters
Returns
sras (float) : Short run aggregate supply
'''
phi = par['phi']
gamma = par['gamma']
sras = pi_t1+gamma*y_t-phi*gamma*y_t1+s_t-phi*s_t1
return sras
def d_pers(v_t1,x_t,par):
'''
Args:
v_t1 (float) : Demand chock in previous period
x_t (float) : Added demand chock in current period
Returns:
v_t (float) : Demand chock in current period
'''
v_t = par['delta']*v_t1+x_t
return v_t
def s_pers(s_t1,c_t,par):
'''
Args:
s_t1 (float) : Supply chock in previous period
c_t (float) : Added supply chock in current period
Returns:
s_t (float) : Supply chock in current period
'''
s_t = par['omega']*s_t1+c_t
return s_t
## 3. Exchange economy ##
def utility(x1,x2,x3,beta1,beta2,beta3, gamma):
utility = (x1**beta1*x2**beta2*x3**beta3)**gamma
return utility
def utility_distribution(x1s,x2s,x3s,x1s_equal,x2s_equal,x3s_equal,betas,gamma,plot_range=[0,4]):
'''
Calculates the distribution of utility for all comsumer, for a given gamma and for two levels of comsumption for all comsumers,
one derived from randomly distributed endowments, and one for equally distributed endowments
Calculates the mean and variance, and makes a two figures containing everything
Args:
x1s (array) : Comsumption of good 1 for each comsumer
x2s (array) : Comsumption of good 2 for each comsumer
x3s (array) : Comsumption of good 3 for each comsumer
x1s_equal (array) : Comsumption of good 1 for each comsumer (Equal distribution of endowments)
x2s_equal (array) : Comsumption of good 2 for each comsumer (Equal distribution of endowments)
x2s_equal (array) : Comsumption of good 3 for each comsumer (Equal distribution of endowments)
betas (array) : Containing beta for all comsumers for all goods
gamma (float) : Parameter
plot_range(list) : Containing min and max of range of the plotted x-axis.
Returns:
plot1 (bokeh.plotting.figure.Figure) : The figure, for random endowments, which has to be called
in the bokeh.plotting comand, show(), to be viewed
plot2 (bokeh.plotting.figure.Figure) : The figure, for equal endowments, which has to be called
in the bokeh.plotting comand, show(), to be viewed
'''
# Random endowments
utilitys = []
for i in range(len(x1s)):
utilitys.append(utility(x1s[i],x2s[i],x3s[i],betas[i,0],betas[i,1],betas[i,2], gamma))
hist, edges = np.histogram(utilitys, bins=150)
plot1 = plot_hist([hist], [edges],names= [''],plot_range=plot_range,
y_label='Observations',x_label='Utility',
title=f'Randomly distributed endowments, gamma = {gamma:.2f}',
width=500,height=350)
mean = np.mean(utilitys)
variance = np.var(utilitys)
meantext = Label(x=250, y=215, text=f'Mean = {mean:.4f}',
text_font_size='10pt',x_units='screen', y_units='screen')
vartext = Label(x=250, y=200, text=f'Variance = {variance:.4f}',
text_font_size='10pt',x_units='screen', y_units='screen')
plot1.add_layout(meantext)
plot1.add_layout(vartext)
# Equal endowments
utilitys_equal = []
for i in range(len(x1s_equal)):
utilitys_equal.append(utility(x1s_equal[i],x2s_equal[i],x3s_equal[i],betas[i,0],betas[i,1],betas[i,2], gamma))
hist, edges = np.histogram(utilitys_equal, bins=150)
plot2 = plot_hist([hist], [edges],names= [''],plot_range=plot_range,
y_label='Observations',x_label='Utility',
title=f'Equally distributed endowments, gamma = {gamma:.2f}',
width=500,height=350)
mean = np.mean(utilitys_equal)
variance = np.var(utilitys_equal)
meantext = Label(x=250, y=215, text=f'Mean = {mean:.4f}',
text_font_size='10pt',x_units='screen', y_units='screen')
vartext = Label(x=250, y=200, text=f'Variance = {variance:.4f}',
text_font_size='10pt',x_units='screen', y_units='screen')
plot2.add_layout(meantext)
plot2.add_layout(vartext)
return plot1, plot2
## Optimized functions
from numba import njit
@njit
def I(p,es):
return p@es
@njit
def demand(p,es,betas):
return (betas.T*I(p,es)).T/p
@njit
def excess_demand(p,es,betas):
total_demand = np.sum(demand(p,es,betas),axis=0)
supply = np.sum(es,axis=1)
return total_demand-supply
@njit
def fill_excesss_demand(es,betas,p1s,p2s,p3,excess_demands,precision):
# Calculate excess demand in all instances
for i in range(precision):
for j in range(precision):
p = np.array([p1s[i,j],p2s[i,j],p3])
excess_demands_i = excess_demand(p,es,betas)
for k in range(3):
excess_demands[k][i,j] = excess_demands_i[k]
## This function can't be uptimized because np.meshgrid is not suported
def prep_excess_d_plot(p1_bounds,p2_bounds,es,betas, precision=50):
'''
Prepares surface plot of excess demand
Args:
p1_bounds (tuple) : upper and lower bound of p1 range
p2_bounds (tuple) : upper and lower bound of p2 range
e (array) : Endowments for agents in the model
betas (array) : Budget shares spend on each good for agents in the model
precision (int) : Precision for the plotted grid
Returns
p1s (array) : Price of good 1 grid
p2s (array) : Price of good 2 grid
excess_demands (array) : grid for calculated excess demand
'''
# Setting p3 numeria
p3 = 1
# | |
{7,D}
5 Ct u0 {1,S}
6 Cdd u0 {3,D}
7 Cd u0 {4,D}
8 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-Cd)(Cds-Cds)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 CS u0 {1,S} {8,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Ct u0 {1,S}
7 Cd u0 {4,D}
8 S2d u0 {3,D}
9 C u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-S2d)(Cds-Cds)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 CS u0 {1,S} {8,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Ct u0 {1,S}
7 Cd u0 {4,D}
8 S2d u0 {3,D}
9 S2d u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=SCtCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {6,D}
3 CS u0 {1,S} {7,D}
4 Ct u0 {1,S}
5 Cs u0 {1,S}
6 S2d u0 {2,D}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=SCbCb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {6,D}
3 CS u0 {1,S} {7,D}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 S2d u0 {2,D}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cd)CsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 C u0 {3,D}
7 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cds)CsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 Cd u0 {3,D}
7 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd)CsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 Cdd u0 {3,D}
7 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-Cd)CsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 CS u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Cs u0 {1,S}
6 Cs u0 {1,S}
7 S2d u0 {3,D}
8 C u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-S2d)CsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 CS u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Cs u0 {1,S}
6 Cs u0 {1,S}
7 S2d u0 {3,D}
8 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=SCbCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {6,D}
3 CS u0 {1,S} {7,D}
4 Cb u0 {1,S}
5 Ct u0 {1,S}
6 S2d u0 {2,D}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cd)CbCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 Ct u0 {1,S}
6 C u0 {3,D}
7 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cds)CbCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 Ct u0 {1,S}
6 Cd u0 {3,D}
7 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd)CbCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 Ct u0 {1,S}
6 Cdd u0 {3,D}
7 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-S2d)CbCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 CS u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Cb u0 {1,S}
6 Ct u0 {1,S}
7 S2d u0 {3,D}
8 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-Cd)CbCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 CS u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Cb u0 {1,S}
6 Ct u0 {1,S}
7 S2d u0 {3,D}
8 C u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=SCsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {6,D}
3 CS u0 {1,S} {7,D}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 S2d u0 {2,D}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cd)CbCb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 C u0 {3,D}
7 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cds)CbCb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 Cd u0 {3,D}
7 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd)CbCb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 Cdd u0 {3,D}
7 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-S2d)CbCb",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 CS u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Cb u0 {1,S}
6 Cb u0 {1,S}
7 S2d u0 {3,D}
8 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-Cd)CbCb",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 CS u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Cb u0 {1,S}
6 Cb u0 {1,S}
7 S2d u0 {3,D}
8 C u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=S(Cds-Cd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 CS u0 {1,S} {8,D}
4 Cd u0 {1,S} {6,D}
5 Ct u0 {1,S}
6 C u0 {4,D}
7 S2d u0 {2,D}
8 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=S(Cds-Cds)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 CS u0 {1,S} {8,D}
4 Cd u0 {1,S} {6,D}
5 Ct u0 {1,S}
6 Cd u0 {4,D}
7 S2d u0 {2,D}
8 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=S(Cds-Cdd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 CS u0 {1,S} {8,D}
4 Cd u0 {1,S} {6,D}
5 Ct u0 {1,S}
6 Cdd u0 {4,D}
7 S2d u0 {2,D}
8 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=S(Cds-Cdd-Cd)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 CS u0 {1,S} {7,D}
4 CS u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Ct u0 {1,S}
7 S2d u0 {3,D}
8 S2d u0 {4,D}
9 C u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=S(Cds-Cdd-S2d)Ct",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 CS u0 {1,S} {7,D}
4 CS u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Ct u0 {1,S}
7 S2d u0 {3,D}
8 S2d u0 {4,D}
9 S2d u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=S(Cds-Cd)Cs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 | |
#!/usr/bin/env python3
#----------------------------------------------------------------------------------------------------------------------#
# #
# Tuplex: Blazing Fast Python Data Science #
# #
# #
# (c) 2017 - 2021, Tuplex team #
# Created by <NAME> first on 1/1/2021 #
# License: Apache 2.0 #
#----------------------------------------------------------------------------------------------------------------------#
import cloudpickle
import sys
from .libexec.tuplex import _Context, _DataSet
from tuplex.utils.reflection import get_source as get_udf_source
from tuplex.utils.reflection import get_globals
from tuplex.utils.source_vault import SourceVault
from .exceptions import classToExceptionCode
# signed 64bit limit
max_rows = 9223372036854775807
class DataSet:
def __init__(self):
self._dataSet = None
def unique(self):
""" removes duplicates from Dataset (out-of-order). Equivalent to a DISTINCT clause in a SQL-statement.
Returns:
tuplex.dataset.Dataset: A Tuplex Dataset object that allows further ETL operations.
"""
assert self._dataSet is not None, 'internal API error, datasets must be created via context object'
ds = DataSet()
ds._dataSet = self._dataSet.unique()
return ds
def map(self, ftor):
""" performs a map operation using the provided udf function over the dataset and
returns a dataset for further processing.
Args:
ftor (lambda) or (function): a lambda function, e.g. ``lambda x: x`` or an identifier to a function. \
Currently there are two supported syntactical options for functions. A function may either take a \
single parameter which is then interpreted as tuple of the underlying data or a list of parameters, \
e.g. ``lambda a, b: a + b`` would sum the two columns. If there is not match, whenever an action is \
called Tuplex will point out the mismatch.
Returns:
tuplex.dataset.DataSet: A Tuplex Dataset object that allows further ETL operations
"""
assert self._dataSet is not None, 'internal API error, datasets must be created via context object'
assert ftor is not None, 'need to provide valid functor'
code = ''
# try to get code from vault (only lambdas supported yet!)
try:
# convert code object to str representation
code = get_udf_source(ftor)
except Exception as e:
raise Exception('Could not extract code for {}. Details:\n{}'.format(ftor, e)) from None
g = get_globals(ftor)
ds = DataSet()
ds._dataSet = self._dataSet.map(code, cloudpickle.dumps(ftor), g)
return ds
def filter(self, ftor):
""" performs a map operation using the provided udf function over the dataset and
returns a dataset for further processing.
Args:
ftor (lambda) or (function): a lambda function, e.g. ``lambda x: x`` or an identifier to a function. \
that returns a boolean. Tuples for which the functor returns ``True`` will be kept, the others discarded.
Returns:
tuplex.dataset.DataSet: A Tuplex Dataset object that allows further ETL operations
"""
assert self._dataSet is not None, 'internal API error, datasets must be created via context object'
assert ftor is not None, 'need to provide valid functor'
code = ''
# try to get code from vault (only lambdas supported yet!)
try:
# convert code object to str representation
code = get_udf_source(ftor)
except Exception as e:
raise Exception('Could not extract code for {}.Details:\n{}'.format(ftor, e))
g = get_globals(ftor)
ds = DataSet()
ds._dataSet = self._dataSet.filter(code, cloudpickle.dumps(ftor), g)
return ds
def collect(self):
""" action that generates a physical plan, processes data and collects result then as list of tuples.
Returns:
(list): A list of tuples, or values if the dataset has only one column.
"""
assert self._dataSet is not None, 'internal API error, datasets must be created via context objects'
return self._dataSet.collect()
def take(self, nrows=5):
""" action that generates a physical plan, processes data and collects the top results then as list of tuples.
Args:
nrows (int): number of rows to collect. Per default ``5``.
Returns:
(list): A list of tuples
"""
assert isinstance(nrows, int), 'num rows must be an integer'
assert nrows > 0, 'please specify a number greater than zero'
assert self._dataSet is not None, 'internal API error, datasets must be created via context objects'
return self._dataSet.take(nrows)
def show(self, nrows=None):
""" action that generates a physical plan, processes data and prints results as nicely formatted
ASCII table to stdout.
Args:
nrows (int): number of rows to collect. If ``None`` all rows will be collected
"""
assert self._dataSet is not None, 'internal API error, datasets must be created via context objects'
# if optional value is None or below zero, simply return all rows. Else only up to nrows!
if nrows is None or nrows < 0:
nrows = -1
self._dataSet.show(nrows)
def resolve(self, eclass, ftor):
""" Adds a resolver operator to the pipeline. The signature of ftor needs to be identical to the one of the preceding operator.
Args:
eclass: Which exception to apply resolution for, e.g. ZeroDivisionError
ftor: A function used to resolve this exception. May also produce exceptions.
Returns:
tuplex.dataset.DataSet: A Tuplex Dataset object that allows further ETL operations
"""
# check that predicate is a class for an exception class
assert issubclass(eclass, Exception), 'predicate must be a subclass of Exception'
# translate to C++ exception code enum
ec = classToExceptionCode(eclass)
assert self._dataSet is not None, 'internal API error, datasets must be created via context objects'
assert ftor is not None, 'need to provide valid functor'
code = ''
# try to get code from vault (only lambdas supported yet!)
try:
# convert code object to str representation
code = get_udf_source(ftor)
except Exception as e:
raise Exception('Could not extract code for {}.Details:\n{}'.format(ftor, e))
g = get_globals(ftor)
ds = DataSet()
ds._dataSet = self._dataSet.resolve(ec, code, cloudpickle.dumps(ftor), g)
return ds
def withColumn(self, column, ftor):
""" appends a new column to the dataset by calling ftor over existing tuples
Args:
column: name for the new column/variable. If column exists, its values will be replaced
ftor: function to call
Returns:
tuplex.dataset.DataSet: A Tuplex Dataset object that allows further ETL operations
"""
assert self._dataSet is not None, 'internal API error, datasets must be created via context object'
assert ftor is not None, 'need to provide valid functor'
assert isinstance(column, str), 'column needs to be a string'
code = ''
# try to get code from vault (only lambdas supported yet!)
try:
# convert code object to str representation
code = get_udf_source(ftor)
except Exception as e:
raise Exception('Could not extract code for {}.Details:\n{}'.format(ftor, e))
g = get_globals(ftor)
ds = DataSet()
ds._dataSet = self._dataSet.withColumn(column, code, cloudpickle.dumps(ftor), g)
return ds
def mapColumn(self, column, ftor):
""" maps directly one column. UDF takes as argument directly the value of the specified column and will overwrite
that column with the result. If you need access to multiple columns, use withColumn instead.
If the column name already exists, it will be overwritten.
Args:
column (str): name for the column to map
ftor: function to call
Returns:
tuplex.dataset.DataSet: A Tuplex Dataset object that allows further ETL operations
"""
assert self._dataSet is not None, 'internal API error, datasets must be created via context object'
assert ftor is not None, 'need to provide valid functor'
assert isinstance(column, str), 'column needs to be a string'
code = ''
# try to get code from vault (only lambdas supported yet!)
try:
# convert code object to str representation
code = get_udf_source(ftor)
except Exception as e:
raise Exception('Could not extract code for {}.Details:\n{}'.format(ftor, e)) from None
g = get_globals(ftor)
ds = DataSet()
ds._dataSet = self._dataSet.mapColumn(column, code, cloudpickle.dumps(ftor), g)
return ds
def selectColumns(self, columns):
""" selects a subset of columns as defined through columns which is a list or a single column
Args:
columns: list of strings or integers. A string should reference a column name, whereas as an integer refers to an index. Indices may be negative according to python rules. Order in list determines output order
Returns:
tuplex.dataset.DataSet: A Tuplex Dataset object that allows further ETL operations
"""
assert self._dataSet is not None, 'internal API error, datasets must be created via context object'
# syntatic sugar, allow single column, list, tuple, ...
if isinstance(columns, (str, int)):
columns = [columns]
if isinstance(columns, tuple):
columns = list(columns)
assert(isinstance(columns, list))
for el in columns:
assert isinstance(el, (str, int)), 'element {} must be a string or int'.format(el)
ds = DataSet()
ds._dataSet = self._dataSet.selectColumns(columns)
return ds
def renameColumn(self, oldColumnName, newColumnName):
""" rename a column in dataset
Args:
oldColumnName: str, old column name. Must exist.
newColumnName: str, new | |
"""Module providing graphs available from KGOBO.
References
----------
Please cite:
```bib
@misc{kgobo,
title = "KG-OBO",
year = "2021",
author = "{<NAME> and <NAME>}",
howpublished = {\\url{https://github.com/Knowledge-Graph-Hub/kg-obo}},
note = {Online; accessed 14 September 2021}
}
```
"""
from ensmallen import Graph # pylint: disable=import-error
from .automatic_graph_retrieval import AutomaticallyRetrievedGraph
def MOD(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "10-03-2021-14-36", **kwargs
) -> Graph:
"""Return MOD graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "10-03-2021-14-36"
Version to retrieve
The available versions are:
- 1.031.4
- 10-03-2021-14-36
"""
return AutomaticallyRetrievedGraph(
"MOD", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FBBT(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-02-24", **kwargs
) -> Graph:
"""Return FBBT graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-02-24"
Version to retrieve
The available versions are:
- 2022-04-13
- 2021-09-01
- 2021-10-14
- 2021-12-09
- 2022-01-27
- 2022-02-24
"""
return AutomaticallyRetrievedGraph(
"FBBT", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def BTO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-04-27", **kwargs
) -> Graph:
"""Return BTO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-04-27"
Version to retrieve
The available versions are:
- 2021-10-26
- 2021-04-27
"""
return AutomaticallyRetrievedGraph(
"BTO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CHMO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-02-17", **kwargs
) -> Graph:
"""Return CHMO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-02-17"
Version to retrieve
The available versions are:
- 2022-04-19
- no_version
- 2022-02-17
"""
return AutomaticallyRetrievedGraph(
"CHMO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OBA(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-01-19", **kwargs
) -> Graph:
"""Return OBA graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-01-19"
Version to retrieve
The available versions are:
- 2022-05-11
- 13-11-2015-10-21
- 2021-12-03
- 2022-01-19
"""
return AutomaticallyRetrievedGraph(
"OBA", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def PSO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-05-19", **kwargs
) -> Graph:
"""Return PSO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-05-19"
Version to retrieve
The available versions are:
- 2020-05-19
"""
return AutomaticallyRetrievedGraph(
"PSO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OGSF(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "11-22-2014", **kwargs
) -> Graph:
"""Return OGSF graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "11-22-2014"
Version to retrieve
The available versions are:
- 11-22-2014
"""
return AutomaticallyRetrievedGraph(
"OGSF", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MCO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-05-15", **kwargs
) -> Graph:
"""Return MCO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-05-15"
Version to retrieve
The available versions are:
- 2019-05-15
"""
return AutomaticallyRetrievedGraph(
"MCO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OPMI(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "Vision-Release--1.0.130", **kwargs
) -> Graph:
"""Return OPMI graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "Vision-Release--1.0.130"
Version to retrieve
The available versions are:
- Vision-Release--1.0.130
"""
return AutomaticallyRetrievedGraph(
"OPMI", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FBDV(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", | |
<reponame>kianheus/uwb-simulator
"""The main window of the UWBSimulator GUI
This file contains the code for the main window of the graphical user
interface (GUI) of the UWBsim. In the main window, simulation parameters
can be changed and the simulation can be started.
Classes:
MainWindow: The main window of the simulator GUI
"""
import os
import csv
from PyQt5 import QtWidgets
from PyQt5 import QtCore
from PyQt5 import QtGui
import pyqtgraph as pg
import yaml
import math
import numpy as np
import UWBsim
from UWBsim.interface.simulation_window import SimulationWindow
from UWBsim.interface.anchor_position_window import AnchorPositionWindow
from UWBsim.simulation import SimulationParams
import UWBsim.utils.uwb_ranging as uwb
class MainWindow(QtWidgets.QWidget):
"""Main window for the UWBsim GUI
The main window of the UWBsim GUI can be split in two parts. On the
left hand side, the data for the simulation can be chosen and
adjusted. This includes selection of a logfile with input data,
selection of what ranging data and additional measurements to use
and the possibility to enable and position up to eight UWB anchors.
In the case of generated UWB measurements, the noise profile can be
tuned as well. On the right side, the estimators for comparison can
be selected and tuned individually.
"""
def __init__(self, *args, **kwargs):
"""Initializes and draws the main window. """
super(MainWindow, self).__init__(*args, **kwargs)
self.threadpool = QtCore.QThreadPool()
self.setWindowTitle("UWB Simulator")
# Load previous settings if it exists, use defaults otherwise
params_reference = SimulationParams()
if os.path.isfile(UWBsim.PREFERENCE_FILE):
with open(UWBsim.PREFERENCE_FILE) as f:
params = yaml.load(f, Loader=yaml.FullLoader)
else:
params = params_reference
# Initialize sub-windows
self.anchor_position_dialog = AnchorPositionWindow(
params.ranging.anchor_positions)
self.sim_window = SimulationWindow()
self.sim_window.get_sim_params = self.get_sim_params
###############################################
# Create and place all elements of the window #
###############################################
outerLayout = QtWidgets.QVBoxLayout()
self.setLayout(outerLayout)
settingsWidget = QtWidgets.QWidget()
settingsLayout = QtWidgets.QHBoxLayout()
settingsWidget.setLayout(settingsLayout)
outerLayout.addWidget(settingsWidget)
## DATA
self.dataBox = QtWidgets.QGroupBox('Data')
dataLayout = QtWidgets.QGridLayout()
self.dataBox.setLayout(dataLayout)
settingsLayout.addWidget(self.dataBox)
dataFileBox = QtWidgets.QGroupBox('Log File')
dataFileLayout = QtWidgets.QHBoxLayout()
dataFileBox.setLayout(dataFileLayout)
dataLayout.addWidget(dataFileBox, 0, 0, 1, 2)
self.file_select_lineEdit = QtWidgets.QLineEdit(params.drone.logfile)
file_select_button = QtWidgets.QPushButton('Browse')
file_select_button.clicked.connect(self._file_select_button_clicked)
dataFileLayout.addWidget(self.file_select_lineEdit)
dataFileLayout.addWidget(file_select_button)
## RANGING
rangingBox = QtWidgets.QGroupBox('UWB Ranging')
rangingLayout = QtWidgets.QVBoxLayout()
rangingBox.setLayout(rangingLayout)
dataLayout.addWidget(rangingBox, 1,0)
self.uwb_log_radio = QtWidgets.QRadioButton('Use log data')
self.uwb_log_radio.setObjectName('OriginalRanging')
self.uwb_generate_twr_radio = QtWidgets.QRadioButton('Generate TWR')
self.uwb_generate_tdoa_radio = QtWidgets.QRadioButton('Generate TdoA')
if params.ranging.source == uwb.RangingSource.LOG:
self.uwb_log_radio.setChecked(True)
elif params.ranging.rtype == uwb.RangingType.TWR:
self.uwb_generate_twr_radio.setChecked(True)
elif params.ranging.rtype == uwb.RangingType.TDOA:
self.uwb_generate_tdoa_radio.setChecked(True)
else:
self.uwb_log_radio.setChecked(True)
self.uwb_log_radio.toggled.connect(self._ranging_toggled)
self.uwb_generate_twr_radio.toggled.connect(self._ranging_toggled)
self.uwb_generate_tdoa_radio.toggled.connect(self._ranging_toggled)
self.ranging_interval_lb = QtWidgets.QLabel('Ranging Interval (s):')
self.ranging_interval_lb.setDisabled(True)
self.ranging_interval=QtWidgets.QLineEdit(str(params.ranging.interval))
self.ranging_interval.setValidator(QtGui.QDoubleValidator())
self.ranging_interval.setDisabled(self.uwb_log_radio.isChecked())
rangingLayout.addWidget(self.uwb_log_radio)
rangingLayout.addWidget(self.uwb_generate_twr_radio)
rangingLayout.addWidget(self.uwb_generate_tdoa_radio)
rangingLayout.addWidget(self.ranging_interval_lb)
rangingLayout.addWidget(self.ranging_interval)
## NOISE
self.noiseBox = QtWidgets.QGroupBox('Noise')
self.noiseBox.setDisabled(self.uwb_log_radio.isChecked())
noiseLayout = QtWidgets.QHBoxLayout()
self.noiseBox.setLayout(noiseLayout)
dataLayout.addWidget(self.noiseBox,1,1,2,1)
noiseSettingsBox = QtWidgets.QWidget()
noiseSettingsLayout = QtWidgets.QFormLayout()
noiseSettingsBox.setLayout(noiseSettingsLayout)
noiseLayout.addWidget(noiseSettingsBox)
# Gaussian
self.noise_gaussian_radio = QtWidgets.QRadioButton()
self.noise_gaussian_radio.setChecked(
params.ranging.source==uwb.RangingSource.GENERATE_GAUSS)
self.noise_gaussian_radio.toggled.connect(self._noise_ht_toggled)
self.noise_sigma = QtWidgets.QLineEdit(str(params.ranging.gauss_sigma))
self.noise_sigma.setValidator(QtGui.QDoubleValidator())
self.noise_sigma.textChanged.connect(self._noise_update)
# Heavy-Tailed Cauchy
self.noise_htCauchy_radio = QtWidgets.QRadioButton()
self.noise_htCauchy_radio.setChecked(
params.ranging.source==uwb.RangingSource.GENERATE_HT_CAUCHY)
self.noise_htCauchy_radio.toggled.connect(self._noise_ht_toggled)
self.noise_cauchy_ratio = QtWidgets.QLineEdit(str(params.ranging.htc_ratio))
self.noise_cauchy_ratio.setDisabled(not self.noise_htCauchy_radio.isChecked())
self.noise_cauchy_ratio.setValidator(QtGui.QDoubleValidator(0.0, 1.0, 1))
self.noise_cauchy_ratio.textChanged.connect(self._noise_update)
self.noise_gamma = QtWidgets.QLineEdit(str(params.ranging.htc_gamma))
self.noise_gamma.setDisabled(not self.noise_htCauchy_radio.isChecked())
self.noise_gamma.setValidator(QtGui.QDoubleValidator())
self.noise_gamma.textChanged.connect(self._noise_update)
# Heavy-Tailed Gamma
self.noise_htGamma_radio = QtWidgets.QRadioButton()
self.noise_htGamma_radio.setChecked(
params.ranging.source==uwb.RangingSource.GENERATE_HT_GAMMA)
self.noise_htGamma_radio.toggled.connect(self._noise_ht_toggled)
self.noise_mu = QtWidgets.QLineEdit(str(params.ranging.htg_mu))
self.noise_mu.setDisabled(not self.noise_htGamma_radio.isChecked())
self.noise_mu.setValidator(QtGui.QDoubleValidator())
self.noise_mu.textChanged.connect(self._noise_update)
self.noise_lambda = QtWidgets.QLineEdit(str(params.ranging.htg_lambda))
self.noise_lambda.setDisabled(not self.noise_htGamma_radio.isChecked())
self.noise_lambda.setValidator(QtGui.QDoubleValidator())
self.noise_lambda.textChanged.connect(self._noise_update)
self.noise_k = QtWidgets.QLineEdit(str(params.ranging.htg_k))
self.noise_k.setDisabled(not self.noise_htGamma_radio.isChecked())
self.noise_k.setValidator(QtGui.QIntValidator())
self.noise_k.textChanged.connect(self._noise_update)
self.noise_scale = QtWidgets.QLineEdit(str(params.ranging.htg_scale))
self.noise_scale.setDisabled(not self.noise_htGamma_radio.isChecked())
self.noise_scale.setValidator(QtGui.QDoubleValidator())
self.noise_scale.textChanged.connect(self._noise_update)
# Add parameters to layout
#noiseSettingsLayout.addRow('Outliers (%):', self.noise_outlier_chance)
noiseSettingsLayout.addRow('Pure Gaussian', self.noise_gaussian_radio)
noiseSettingsLayout.addRow('Sigma:', self.noise_sigma)
noiseSettingsLayout.addRow('HT Cauchy:', self.noise_htCauchy_radio)
noiseSettingsLayout.addRow('Ratio (TdoA):', self.noise_cauchy_ratio)
noiseSettingsLayout.addRow('Gamma:', self.noise_gamma)
noiseSettingsLayout.addRow('HT Gamma:', self.noise_htGamma_radio)
noiseSettingsLayout.addRow('mu:', self.noise_mu)
noiseSettingsLayout.addRow('lambda:', self.noise_lambda)
noiseSettingsLayout.addRow('k:', self.noise_k)
noiseSettingsLayout.addRow('scale:', self.noise_scale)
# Plot area for noise shape
noise_plot_wg = pg.GraphicsLayoutWidget()
noise_plot_wg.setBackground('#FAFAFA')
noiseLayout.addWidget(noise_plot_wg)
self.noise_plot = noise_plot_wg.addPlot()
self.noise_plot.setTitle('Noise PDF')
self.noise_plot.setLabels(left='f(x)', bottom='x')
self.noise_plot.showGrid(x=True, y=True)
self.noise_plot.addItem(pg.InfiniteLine(pos=[0,0], pen='#AAAAAA'),
ignoreBounds=True)
self.noise_plot_line = self.noise_plot.plot([], [], pen='#1F77B4')
self._noise_update()
## MEASUREMENTS
measurementBox = QtWidgets.QGroupBox('Measurements')
measurementLayout = QtWidgets.QGridLayout()
measurementBox.setLayout(measurementLayout)
dataLayout.addWidget(measurementBox, 2,0)
self.altitude_checkbox = QtWidgets.QCheckBox('Altitude')
self.altitude_checkbox.setChecked(params.drone.altitude_enable)
measurementLayout.addWidget(self.altitude_checkbox,1,0)
## ANCHORS
anchorBox = QtWidgets.QGroupBox('Anchors')
anchorLayout = QtWidgets.QGridLayout()
anchorBox.setLayout(anchorLayout)
anchor_enable = [False for _ in range(8)]
for i,en in enumerate(params.ranging.anchor_enable):
anchor_enable[i] = False or en
self.anchor_enable_checkbox = []
i = 0
for row in range(2):
for col in range(4):
checkbox = QtWidgets.QCheckBox(
'ID {}\n({:.2f},{:.2f},{:.2f})'.format(
i, *self.anchor_position_dialog.anchor_positions[i]))
checkbox.setChecked(anchor_enable[i])
checkbox.setStyleSheet("QCheckBox:unchecked {color: gray}")
self.anchor_enable_checkbox.append(checkbox)
anchorLayout.addWidget(checkbox, row, col)
i += 1
anchor_position_button = QtWidgets.QPushButton('Anchor Positions')
anchor_position_button.clicked.connect(self._anchor_pos_button_clicked)
anchorLayout.addWidget(anchor_position_button, 2,0,3,4)
dataLayout.addWidget(anchorBox, 3, 0, 4, 2)
# ESTIMATORS
estimatorBox = QtWidgets.QGroupBox('Estimators')
estimatorLayout = QtWidgets.QGridLayout()
estimatorBox.setLayout(estimatorLayout)
settingsLayout.addWidget(estimatorBox)
# MHE
self.mheBox = QtWidgets.QGroupBox('MHE')
self.mheBox.setCheckable(True)
self.mheBox.setChecked(params.estimators.mhe.enable)
mheLayout = QtWidgets.QFormLayout()
self.mheBox.setLayout(mheLayout)
estimatorLayout.addWidget(self.mheBox,0,0,1,1)
self.mhe_rate = QtWidgets.QLineEdit(
str(params.estimators.mhe.rate))
self.mhe_rate.setValidator(QtGui.QIntValidator())
self.mhe_Nmax = QtWidgets.QLineEdit(
str(params.estimators.mhe.N_max))
self.mhe_Nmax.setValidator(QtGui.QIntValidator())
self.mhe_MHEIter = QtWidgets.QLineEdit(
str(params.estimators.mhe.iterations))
self.mhe_MHEIter.setValidator(QtGui.QIntValidator())
self.mhe_mu = QtWidgets.QLineEdit(str(params.estimators.mhe.mu))
self.mhe_mu.setValidator(QtGui.QDoubleValidator())
self.mhe_alpha = QtWidgets.QLineEdit(
str(params.estimators.mhe.alpha))
self.mhe_alpha.setValidator(QtGui.QDoubleValidator())
self.mhe_RANSACIter = QtWidgets.QLineEdit(
str(params.estimators.mhe.ransac_iterations))
self.mhe_RANSACIter.setValidator(QtGui.QIntValidator())
self.mhe_RANSACFraction = QtWidgets.QLineEdit(
str(params.estimators.mhe.ransac_fraction))
self.mhe_RANSACFraction.setValidator(QtGui.QDoubleValidator())
self.mhe_RANSACthreshold = QtWidgets.QLineEdit(
str(params.estimators.mhe.ransac_threshold))
self.mhe_RANSACthreshold.setValidator(QtGui.QDoubleValidator())
mheLayout.addRow(QtWidgets.QLabel('Rate [Hz]:'), self.mhe_rate)
mheLayout.addRow(QtWidgets.QLabel('N_max:'), self.mhe_Nmax)
mheLayout.addRow(QtWidgets.QLabel('Iterations (MHE):'), self.mhe_MHEIter)
mheLayout.addRow(QtWidgets.QLabel('Mu:'), self.mhe_mu)
mheLayout.addRow(QtWidgets.QLabel('Alpha:'), self.mhe_alpha)
mheLayout.addRow(QtWidgets.QLabel('Iterations (RANSAC):'), self.mhe_RANSACIter)
mheLayout.addRow(QtWidgets.QLabel('RANSAC Fraction:'), self.mhe_RANSACFraction)
mheLayout.addRow(QtWidgets.QLabel('RANSAC Threshold:'), self.mhe_RANSACthreshold)
# EKF
self.ekfBox = QtWidgets.QGroupBox('EKF')
self.ekfBox.setCheckable(True)
self.ekfBox.setChecked(params.estimators.ekf.enable)
ekfLayout = QtWidgets.QFormLayout()
self.ekfBox.setLayout(ekfLayout)
estimatorLayout.addWidget(self.ekfBox,1,0,1,1)
self.ekf_rate = QtWidgets.QLineEdit(str(params.estimators.ekf.rate))
self.ekf_rate.setValidator(QtGui.QIntValidator())
ekfLayout.addRow(QtWidgets.QLabel('Rate [Hz]:'), self.ekf_rate)
self.ekf_outlierThreshold = QtWidgets.QLineEdit(
str(params.estimators.ekf.outlierThreshold))
self.ekf_outlierThreshold.setValidator(QtGui.QDoubleValidator())
ekfLayout.addRow(QtWidgets.QLabel('Outlier Threshold:'), self.ekf_outlierThreshold)
## RUN BUTTON
run_button = QtWidgets.QPushButton('Run')
run_button.clicked.connect(self._run_button_clicked)
estimatorLayout.addWidget(run_button, 2, 0)
def _file_select_button_clicked(self):
"""Opens the logfile selection dialog
Opens the dialog to navigate the filebrowser and select a logfile, then
updates the textline that contains the location of the logfile. The
dialog opens in the 'data' folder. If the interaction is cancelled,
the previous logfile is retained in the textline.
"""
old_logfile = self.file_select_lineEdit.text()
logfile, _ = QtWidgets.QFileDialog.getOpenFileName(self.dataBox,
'Open File','data')
if logfile == "":
self.file_select_lineEdit.setText(old_logfile)
else:
self.file_select_lineEdit.setText(logfile)
self.logfile = logfile
def _ranging_toggled(self):
"""Enables/Disables ranging options based on ranging mode
Disables noise tuning and ranging interval inputs if the ranging source
is set to log and enables them otherwise. Call after ranging source has
been changed.
"""
if self.uwb_log_radio.isChecked():
self.noiseBox.setDisabled(True)
self.ranging_interval.setDisabled(True)
self.ranging_interval_lb.setDisabled(True)
else:
self.noiseBox.setDisabled(False)
self.ranging_interval.setDisabled(False)
self.ranging_interval_lb.setDisabled(False)
self._noise_update()
def _noise_ht_toggled(self):
"""Enables/Disables noise options based on noise type
Enables the noise parameters needed for the chosen noise type and
disables all other noise parameters, then updates the noise plot.
Call after noise type has been changed.
"""
if self.noise_gaussian_radio.isChecked():
self.noise_cauchy_ratio.setDisabled(True)
self.noise_gamma.setDisabled(True)
self.noise_mu.setDisabled(True)
self.noise_lambda.setDisabled(True)
self.noise_k.setDisabled(True)
self.noise_scale.setDisabled(True)
elif self.noise_htCauchy_radio.isChecked():
self.noise_cauchy_ratio.setDisabled(False)
self.noise_gamma.setDisabled(False)
self.noise_mu.setDisabled(True)
self.noise_lambda.setDisabled(True)
self.noise_k.setDisabled(True)
self.noise_scale.setDisabled(True)
elif self.noise_htGamma_radio.isChecked():
self.noise_cauchy_ratio.setDisabled(True)
self.noise_gamma.setDisabled(True)
self.noise_mu.setDisabled(False)
self.noise_lambda.setDisabled(False)
self.noise_k.setDisabled(False)
self.noise_scale.setDisabled(False)
self._noise_update()
def _noise_update(self):
"""Updates plot of the noise PDF
Updates the probability density function (PDF) of the noise displayed
in the noise plot based on the current noise parameters. Call after
noise parameters have been changed.
"""
sigma = float(self.noise_sigma.text())
if sigma==0:
return
fx = []
x = []
if self.noise_gaussian_radio.isChecked():
# pure gaussian
sigma2 = sigma*sigma
gauss_pref = 1/(sigma*np.sqrt(2*np.pi))
for i in np.linspace(-6*sigma, 6*sigma, 100):
f = gauss_pref * np.exp(-i*i/(2*sigma2))
x.append(i)
fx.append(f)
elif self.noise_htCauchy_radio.isChecked():
# Heavy tailed with Cauchy
sigma = float(self.noise_sigma.text())
gamma = float(self.noise_gamma.text())
sigma2 = sigma*sigma
gamma2 = gamma*gamma
if sigma==0 or gamma==0:
return
if self.uwb_generate_twr_radio.isChecked():
alpha = (2*math.pi*gamma) / (math.sqrt(2*math.pi*sigma2) + math.pi*gamma)
gauss_pref = (2-alpha)/(sigma*np.sqrt(2*np.pi))
cauchy_pref = alpha/(math.pi*gamma)
for i in np.linspace(-6*sigma, 0, 50):
f = gauss_pref * np.exp(-i*i/(2*sigma2))
x.append(i)
fx.append(f)
gamma2 = gamma*gamma
for i in np.linspace(0,12*sigma, 100):
f = cauchy_pref / (1+(i*i/gamma2))
x.append(i)
fx.append(f)
elif self.uwb_generate_tdoa_radio.isChecked():
ratio = float(self.noise_cauchy_ratio.text())
gauss_pref = 1/(sigma*np.sqrt(2*np.pi))
cauchy_pref = 1/(math.pi*gamma)
for i in np.linspace(-6*sigma, 6*sigma, 200):
gauss = gauss_pref * np.exp(-(i)*(i)/(2*sigma2))
cauchy = cauchy_pref / (1+((i*i)/gamma2))
f = ratio*cauchy + (1-ratio)*gauss
x.append(i)
fx.append(f)
elif self.noise_htGamma_radio.isChecked():
# Heavy tailed with Gamma
mu = float(self.noise_mu.text())
sigma = float(self.noise_sigma.text())
lmbd = float(self.noise_lambda.text())
k = int(self.noise_k.text())
alpha = float(self.noise_scale.text())
sig2 = sigma*sigma
gauss_pref = 1/(sigma*np.sqrt(2*np.pi))
G = math.gamma(k)
for i in np.linspace(-6*sigma, 12*sigma, 150):
gauss = gauss_pref * np.exp(-(i-mu)*(i-mu)/(2*sig2))
if i>0:
gamma = np.power(lmbd,k) * np.power(i,(k-1)) * np.exp(-lmbd*i) / G
else:
gamma = 0
x.append(i)
fx.append(gauss/(1+alpha) + alpha*gamma/(1+alpha))
self.noise_plot_line.setData(x, fx)
def _anchor_pos_button_clicked(self):
"""Opens dialog to edit, save and load anchor positions """
self.anchor_position_dialog.exec_()
for i, anchor in enumerate(self.anchor_enable_checkbox):
anchor.setText('ID {}\n({:.2f},{:.2f},{:.2f})'.format(i,
*self.anchor_position_dialog.anchor_positions[i]))
def _run_button_clicked(self):
"""Starts estimator simulation in separate window """
self.sim_window.run_button_clicked()
# Show/Bring window to front
if self.sim_window.isVisible():
self.sim_window.activateWindow()
else:
self.sim_window.show()
def get_sim_params(self):
"""Collects and returns simulation parameters from the main window
Crates a SimulationParams parameter structure with all the parameters
that were entered into the main window.
Returns:
Instance of SimulationParams containing all the parameter choices
made.
"""
params = SimulationParams()
# logfile and name
params.drone.logfile = self.file_select_lineEdit.text()
(_,params.name) = os.path.split(params.drone.logfile)
# ESTIMATORS
# MHE
params.estimators.mhe.enable = self.mheBox.isChecked()
params.estimators.mhe.rate = int(self.mhe_rate.text())
params.estimators.mhe.N_max = int(self.mhe_Nmax.text())
params.estimators.mhe.iterations = int(self.mhe_MHEIter.text())
params.estimators.mhe.mu = float(self.mhe_mu.text())
params.estimators.mhe.alpha = | |
[]
for iwell in range(self.nwell):
well = self.wells[iwell]
ga_this = ga[self.posl[iwell]:self.posl[iwell]+self.nsizl[iwell]]
popl.append(ga_this.sum())
for ich in range(well.nchan):
if self.channels[iwell][ich] is not None: continue
k = (well.kchl[ich] * ga_this).sum()
kl.append(k)
if self.channels[iwell][ich] is None: # dissoc
kdis += k
return kdis, kl, ga, popl
def solve(self, T, p, gguess=None, solver="", bandpcrit=1e-9, neig=1,
reactant=None, chemact_well_ch=None,
verbose=False, nthreads=None, maxmemGB=None):
""" solve ME by calling solve1d or solve2d function of the library
T: temperature in K
p: pressure in bar
gguess: initial guess for iterative solver
solver: see me2d.show_solvers()
bandpcrit: truncation threshold for banded matrix (None to use dense matrix)
neig: number of eigenpairs to be computed
reactant: name of the reactant well (only for InvIter solver for strady-state decomposition)
chemact_well_ch: recombination (well-name, channel) (for chemical activation with solver=LinEq; gguess has to be None)
verbose: verbose flag (True/False or integer)
nthreads: number of threads to be used in the computation
maxmemGB: max memory size used by the solver in GB
"""
logfp = sys.stdout
if bandpcrit is None: bandpcrit = -1.
if reactant is None: reactant = -1
elif reactant in self.names: reactant = self.names.index(reactant)
if chemact_well_ch is not None:
chemact_well = chemact_well_ch[0]
chemact_ch = chemact_well_ch[1]
if chemact_well in self.names: chemact_well = self.names.index(chemact_well)
if nthreads is not None:
max_threads_orig = get_num_threads()
set_num_threads(nthreads)
if maxmemGB is not None: self.lib.set_me_maxmem_GB(maxmemGB)
if verbose:
logfp.write("%s ver.%s: %s.solve started at %s\n"
% (__package__, __version__, self.__class__.__name__,
time.strftime("%a, %d %b %Y %H:%M:%S")))
logfp.write("Library: %s\n" % (self.libfn))
max_threads = get_num_threads()
if len(max_threads) > 0:
for x in max_threads: logfp.write("%s max threads = %s\n" % (x[0], x[1]))
if maxmemGB is not None:
logfp.write("Max memory size = %s GB\n" % (self.lib.get_me_maxmem_GB()))
solverstr = solver
if bandpcrit >= 0: solverstr += " (banded, pcrit=%.1e)" % (bandpcrit)
if reactant >= 0: solverstr += " (reactant = %s)" % (self.names[reactant])
if chemact_well_ch is not None:
solverstr += " (chemact: well = %s, ch = %s)" % (self.names[chemact_well], chemact_ch)
logfp.write("T=%.0f, p=%.2e, solver=%s\n" % (T, p, solverstr))
logfp.flush()
nsiz = np.array(self.nsizl, dtype=np.int64)
bandpcrit = np.full(self.nsiz, bandpcrit)
# p is given in bar
dens = p * 0.1 / constants.kb / T # molecule/cm3
ZM = np.array([well.Z * dens for well in self.wells]) # s^-1
kbT = T / constants.cm2k # cm^-1
if verbose is True: verbose = 1
elif verbose is False: verbose = 0
if gguess is not None: vec = np.array(gguess)
elif chemact_well_ch is not None:
# chemical activation flux
if self.channels[chemact_well][chemact_ch-1] is not None:
logfp.write("WARNING: THIS IS AN ISOMERIZATION CHANNEL!\n")
logfp.flush()
ga = self.rhoa * np.exp(- self.Ea * constants.cm2k / T)
ga_sel = ga[self.posl[chemact_well]:self.posl[chemact_well]+self.nsizl[chemact_well]]
flx = ga_sel * self.wells[chemact_well].kchl[chemact_ch-1]
vec = np.zeros(len(ga))
vec[self.posl[chemact_well]:self.posl[chemact_well]+self.nsizl[chemact_well]] = flx
else: vec = self.rhoa * np.exp(- self.Ea * constants.cm2k / T) # thermal distrib
vals = np.zeros(neig)
if len(vec) < neig*self.nsiz: vec = np.append(vec, np.zeros(neig*self.nsiz - len(vec)))
if self.is1d:
y_e = np.array([well.y_e for well in self.wells])
ainv_ea = np.concatenate([well.ainv_ea for well in self.wells])
ptype = np.full(self.nsiz, -1, dtype=np.int64) # downward prob. given
res = self.lib.solve1d_mw(self.nwell, nsiz, neig, vals, vec,
self.Ea, self.rhoa, self.ka,
y_e, ainv_ea, ptype,
len(self.kisom_sym), self.kisom_sym,
self.kisom_i, self.kisom_j,
bandpcrit, ZM, kbT, solver.encode(), reactant, verbose)
else:
y_e = np.array([well.y_e for well in self.wells])
y_J = np.array([well.y_J for well in self.wells])
ainv_ea = np.concatenate([well.ainv_ea for well in self.wells])
ainv_Ja = np.concatenate([well.ainv_Ja for well in self.wells])
ptype = np.full(self.nsiz, 0, dtype=np.int64) # symmetrized prob. given
res = self.lib.solve2d_mw(self.nwell, nsiz, neig, vals, vec,
self.Ea, self.ea, self.Ja, self.rhoa, self.ka,
y_e, y_J, ainv_ea, ainv_Ja, ptype,
len(self.kisom_sym), self.kisom_sym,
self.kisom_i, self.kisom_j,
bandpcrit, ZM, kbT, solver.encode(), reactant, verbose)
if nthreads is not None:
restore_num_threads(max_threads_orig)
if res < 0: raise ValueError("ERROR in solver: res = %g" % res)
ksol = -vals[0]
ga = vec[:self.nsiz]
kdis = 0.
kl = []
popl = []
for iwell in range(self.nwell):
well = self.wells[iwell]
ga_this = ga[self.posl[iwell]:self.posl[iwell]+self.nsizl[iwell]]
popl.append(ga_this.sum())
for ich in range(well.nchan):
if self.channels[iwell][ich] is not None: continue
k = (well.kchl[ich] * ga_this).sum()
kl.append(k)
if self.channels[iwell][ich] is None: # dissoc
kdis += k
kdiff = abs((kdis - ksol) / kdis)
if (kdiff > 0.01) and (not solver.startswith("LinEq")):
logfp.write("WARNING: |kdis-ksol|/|kdis| = %.2e (kdis=%.6e, ksol=%.6e)\n"
% (kdiff, kdis, ksol))
logfp.flush()
return kdis, kl, ga, popl, vals, vec
class ME1DMW(MEBaseMW):
""" multiple-well 1D master equation """
@classmethod
def read_from(cls, well_list, connections, maxE=None):
""" read well_list and return an instance of the class
arguments:
well_list: list of tuple (well_name, rrkm_file, relative_energy)
connections: list of isomerization channels ((well_name, ichan), (well_name, ichan))
(note: channel index starts from 1)
"""
names, wells = prepare_multi1d(well_list, maxE=maxE)
return cls(names, wells, connections)
def __init__(self, names, wells, connections):
"""
names: list of well names
wells: list of ME1D objects
connections: list of isomerization channels ((well_name, ichan), (well_name, ichan))
(note: channel index starts from 1)
"""
super().__init__()
self.is1d = True
self.nwell = len(names)
self.names = names
self.wells = wells
self.connections = connections
self.dE = None
self.topE = None
self.nsizl = []
self.posl = []
for iwell in range(self.nwell):
dE_this = self.wells[iwell].dE
if self.dE is None: self.dE = dE_this
elif self.dE != dE_this:
raise ValueError("inconsistent dE: %s, %s" % (self.dE, dE_this))
topE_this = self.wells[iwell].Ea[-1]
if self.topE is None: self.topE = topE_this
elif self.topE != topE_this:
raise ValueError("inconsistent topE: %s, %s" % (self.topE, topE_this))
self.nsizl.append(self.wells[iwell].nsiz)
if iwell == 0: self.posl.append(0)
else: self.posl.append(self.posl[iwell-1] + self.wells[iwell-1].nsiz)
self.nsiz = sum(self.nsizl)
self.Ea = np.concatenate([well.Ea for well in self.wells])
self.rhoa = np.concatenate([well.rhoa for well in self.wells])
self.ka = np.concatenate([well.ka for well in self.wells])
self.set_channels()
def set_kisom(self, iwell1, ich1, iwell2, ich2):
well1 = self.wells[iwell1]
well2 = self.wells[iwell2]
if (well1.offset0 > well2.offset0):
start1 = 0
start2 = well1.offset0 - well2.offset0
nk = well1.nsiz
else:
start1 = well2.offset0 - well1.offset0
start2 = 0
nk = well2.nsiz
rho1 = well1.rhoa[start1:]
rho2 = well2.rhoa[start2:]
k1 = well1.kchl[ich1][start1:]
k2 = well2.kchl[ich2][start2:]
nh1 = rho1 * k1
nh2 = rho2 * k2
nh = np.sqrt(nh1) * np.sqrt(nh2)
# check symmetry
rdiffmax = 0.
for ik in range(nk):
# skip check for small k
if (k1[ik] < 1e-6) and (k2[ik] < 1e-6): continue
# +/- 1 grain tolerance (for numerical discretization error)
diff = None
if ik == 0:
if nh1[ik] > nh2[ik+1]: diff = nh1[ik] - nh2[ik+1]
elif ik == nk-1:
if nh1[ik] < nh2[ik-1]: diff = nh2[ik-1] - nh1[ik]
else:
if nh1[ik] < nh2[ik-1]: diff = nh2[ik-1] - nh1[ik]
elif nh1[ik] > nh2[ik+1]: diff = nh1[ik] - nh2[ik+1]
if diff is not None:
rdiff = abs(diff) / nh[ik]
if rdiff > rdiffmax: rdiffmax = rdiff
if rdiffmax > 0.3:
raise ValueError("asymmetry detected: %s %% between %s and %s"
% (rdiffmax*100, self.names[iwell1], self.names[iwell2]))
ksym = nh / (np.sqrt(rho1) * np.sqrt(rho2)) # store symmetrized k (= k_i * sqrt(rho_i/rho_j))
self.kisom_sym = np.append(self.kisom_sym, ksym)
pos1 = self.posl[iwell1] + start1
pos2 = self.posl[iwell2] + start2
pos1a = pos1 + np.arange(nk, dtype=np.int64)
pos2a = pos2 + np.arange(nk, dtype=np.int64)
if pos1 < pos2:
self.kisom_i = np.append(self.kisom_i, pos1a) # array of index i
self.kisom_j = np.append(self.kisom_j, pos2a) # array of index j (j>i)
else:
self.kisom_i = np.append(self.kisom_i, pos2a) # array of index i
self.kisom_j = np.append(self.kisom_j, pos1a) # array of index j (j>i)
return
class ME2DMW(MEBaseMW):
""" multiple-well 2D master equation """
@classmethod
def read_from(cls, well_list, connections, dJ, maxE=None, maxJ=None):
""" read well_list and return an instance of the class
arguments:
well_list: list of tuple (well_name, rrkmEJ_file, relative_energy)
connections: list of isomerization channels ((well_name, ichan), (well_name, ichan))
(note: channel index starts from 1)
"""
names, wells = prepare_multi2d(well_list, dJ, maxE=maxE, maxJ=maxJ)
return cls(names, wells, connections)
def __init__(self, names, wells, connections):
"""
names: list of well names
wells: list of ME2D objects
connections: list of isomerization channels ((well_name, ichan), (well_name, ichan))
(note: channel index starts from 1)
"""
super().__init__()
self.is1d = False
self.nwell = len(names)
self.names = names
self.wells = wells
self.connections = connections
self.dE = None
self.topE | |
cnt_odd = 0
for outcome, cnt in drow.counts.items():
if outcome[0][i] == outcome[0][j]: cnt_even += cnt
else: cnt_odd += cnt
exptn = float(cnt_even - cnt_odd) / total
fp = 0.5 + 0.5 * float(cnt_even - cnt_odd + 1) / (total + 2)
else:
raise NotImplementedError("Expectation values of weight > 2 observables are not implemented!")
wt = _np.sqrt(total) / _np.sqrt(fp * (1.0 - fp))
f = 0.5 + 0.5 * exptn
err = 2 * _np.sqrt(f * (1.0 - f) / total) # factor of 2 b/c expectation is addition of 2 terms
return exptn, wt, err
#Get data to fit and weights to use in fitting
data_to_fit = []; wts = []; errbars = []
for L in max_lenghts:
opstr = prepFid + idle_string * L + measFid
exptn, wt, err = unsigned_exptn_and_weight(opstr, obs_indices)
data_to_fit.append(minus_sign * exptn)
wts.append(wt)
errbars.append(err)
#curvefit -> slope
coeffs = _np.polyfit(max_lenghts, data_to_fit, fit_order, w=wts) # when fit_order = 1 = line
if fit_order == 1:
slope = coeffs[0]
elif fit_order == 2:
#OLD: slope = coeffs[1] # c2*x2 + c1*x + c0 ->deriv@x=0-> c1
det = coeffs[1]**2 - 4 * coeffs[2] * coeffs[0]
slope = -_np.sign(coeffs[0]) * _np.sqrt(det) if det >= 0 else coeffs[1]
# c2*x2 + c1*x + c0 ->deriv@y=0-> 2*c2*x0 + c1;
# x0=[-c1 +/- sqrt(c1^2 - 4c2*c0)] / 2*c2; take smaller root
# but if determinant is < 0, fall back to x=0 slope
else: raise NotImplementedError("Only fit_order <= 2 are supported!")
return {'rate': slope,
'fit_order': fit_order,
'fitCoeffs': coeffs,
'data': data_to_fit,
'errbars': errbars,
'weights': wts}
def do_idle_tomography(nqubits, dataset, max_lenghts, pauli_basis_dicts, maxweight=2,
idle_string=((),), include_hamiltonian="auto",
include_stochastic="auto", include_affine="auto",
advanced_options=None, verbosity=0, comm=None):
"""
Analyze `dataset` using the idle tomography protocol to characterize
`idle_string`.
Parameters
----------
nqubits : int
The number of qubits.
dataset : DataSet
The set of data counts (observations) to use.
max_lenghts : list
A list of maximum germ-power lengths. Each specifies a number many times
to repeat the idle gate, and typically this is a list of the powers of
2 preceded by zero, e.g. `[0,1,2,4,16]`. The largest value in this
list should be chosen to be the maximum number of idle gates you want to
perform in a row (typically limited by performance or time constraints).
pauli_basis_dicts : tuple
A `(prepPauliBasisDict,measPauliBasisDict)` tuple of dictionaries
specifying the way to prepare and measure in Pauli bases. See
:function:`preferred_signs_from_paulidict` for details on each
dictionary's format.
maxweight : int, optional
The maximum weight of errors to consider.
idle_string : Circuit-like, optional
A Circuit or tuple of operation labels that represents the idle
gate being characterized by idle tomography.
include_hamiltonian, include_stochastic, include_affine : {True,False,"auto"}
Whether to extract Hamiltonian-, Stochastic-, and Affine-type
intrinsic errors. If "auto" is specified, then the corresponding
error-type is extracted only if there is enough data to reliably
infer them (i.e. enough data to construct "full rank" Jacobian
matrices).
advanced_options : dict, optional
A dictionary of optional advanced arguments which influence the
way idle tomography is performed. Allowed keys are:
- "jacobian mode": {"separate","together"} how to evaluate jacobians
- "preferred_prep_basis_signs" : 3-tuple of "+"/"-" or default="auto"
- "preferred_meas_basis_signs" : 3-tuple of "+"/"-" or default="auto"
- "pauli_fidpairs": alternate list of pauli fiducial pairs to use
- "fit order" : integer order for polynomial fits to data
- "ham_tmpl" : see :function:`make_idle_tomography_list`
verbosity : int, optional
How much detail to send to stdout.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
Returns
-------
IdleTomographyResults
"""
printer = _VerbosityPrinter.create_printer(verbosity, comm=comm)
if advanced_options is None:
advanced_options = {}
prepDict, measDict = pauli_basis_dicts
if nqubits == 1: # special case where line-labels may be ('*',)
if len(dataset) > 0:
first_circuit = list(dataset.keys())[0]
line_labels = first_circuit.line_labels
else:
line_labels = (0,)
GiStr = _Circuit(idle_string, line_labels=line_labels)
else:
GiStr = _Circuit(idle_string, num_lines=nqubits)
jacmode = advanced_options.get("jacobian mode", "separate")
sto_aff_jac = None; sto_aff_obs_err_rates = None
ham_aff_jac = None; ham_aff_obs_err_rates = None
rankStr = "" if (comm is None) else "Rank%d: " % comm.Get_rank()
preferred_prep_basis_signs = advanced_options.get('preferred_prep_basis_signs', 'auto')
preferred_meas_basis_signs = advanced_options.get('preferred_meas_basis_signs', 'auto')
if preferred_prep_basis_signs == "auto":
preferred_prep_basis_signs = preferred_signs_from_paulidict(prepDict)
if preferred_meas_basis_signs == "auto":
preferred_meas_basis_signs = preferred_signs_from_paulidict(measDict)
if 'pauli_fidpairs' in advanced_options:
same_basis_fidpairs = [] # *all* qubits prep/meas in same basis
diff_basis_fidpairs = [] # at least one doesn't
for pauli_fidpair in advanced_options['pauli_fidpairs']:
#pauli_fidpair is a (prep,meas) tuple of NQPauliState objects
if pauli_fidpair[0].rep == pauli_fidpair[1].rep: # don't care about sign
same_basis_fidpairs.append(pauli_fidpair)
else:
diff_basis_fidpairs.append(pauli_fidpair)
#print("DB: LENGTHS: same=",len(same_basis_fidpairs)," diff=",len(diff_basis_fidpairs))
else:
same_basis_fidpairs = None # just for
diff_basis_fidpairs = None # safety
errors = _idttools.allerrors(nqubits, maxweight)
fit_order = advanced_options.get('fit order', 1)
intrinsic_rates = {}
pauli_fidpair_dict = {}
observed_rate_infos = {}
if include_stochastic in (True, "auto"):
tStart = _time.time()
if 'pauli_fidpairs' in advanced_options:
pauli_fidpairs = same_basis_fidpairs
else:
pauli_fidpairs = idle_tomography_fidpairs(
nqubits, maxweight, False, include_stochastic, include_affine,
advanced_options.get('ham_tmpl', "auto"),
preferred_prep_basis_signs, preferred_meas_basis_signs)
#print("DB: %d same-basis pairs" % len(pauli_fidpairs))
#divide up strings among ranks
indxFidpairList = list(enumerate(pauli_fidpairs))
my_FidpairList, _, _ = _tools.mpitools.distribute_indices(indxFidpairList, comm, False)
my_J = []; my_obs_infos = []
for i, (ifp, pauli_fidpair) in enumerate(my_FidpairList):
#NOTE: pauli_fidpair is a 2-tuple of NQPauliState objects
all_outcomes = _idttools.alloutcomes(pauli_fidpair[0], pauli_fidpair[1], maxweight)
t0 = _time.time(); infos_for_this_fidpair = _collections.OrderedDict()
for j, out in enumerate(all_outcomes):
printer.log(" - outcome %d of %d" % (j, len(all_outcomes)), 2)
#form jacobian rows as we get extrinsic error rates
Jrow = [stochastic_jac_element(pauli_fidpair[0], err, pauli_fidpair[1], out)
for err in errors]
if include_affine:
Jrow.extend([affine_jac_element(pauli_fidpair[0], err, pauli_fidpair[1], out)
for err in errors])
my_J.append(Jrow)
info = compute_observed_samebasis_err_rate(dataset, pauli_fidpair, pauli_basis_dicts, GiStr,
out, max_lenghts, fit_order)
info['jacobian row'] = _np.array(Jrow)
infos_for_this_fidpair[out] = info
my_obs_infos.append(infos_for_this_fidpair)
printer.log("%sStochastic fidpair %d of %d: %d outcomes analyzed [%.1fs]" %
(rankStr, i, len(my_FidpairList), len(all_outcomes), _time.time() - t0), 1)
#Gather results
info_list = [my_obs_infos] if (comm is None) else comm.gather(my_obs_infos, root=0)
J_list = [my_J] if (comm is None) else comm.gather(my_J, root=0)
if comm is None or comm.Get_rank() == 0:
# pseudo-invert J to get "intrinsic" error rates (labeled by AllErrors(nqubits))
# J*intr = obs
J = _np.concatenate(J_list, axis=0)
infos_by_fidpair = list(_itertools.chain(*info_list)) # flatten ~ concatenate
obs_err_rates = _np.array([info['rate']
for fidpair_infos in infos_by_fidpair
for info in fidpair_infos.values()])
if jacmode == "separate":
rank = _np.linalg.matrix_rank(J)
if rank < J.shape[1]:
#Rank defficiency - if affine is "auto", try with just stochastic
if include_affine == "auto":
J_sto = J[:, 0:len(errors)]
rank_sto = _np.linalg.matrix_rank(J_sto)
if rank_sto < len(errors):
if include_stochastic == "auto":
include_stochastic = False # drop stochastic part
else:
_warnings.warn(("Idle tomography: stochastic-jacobian rank "
"(%d) < #intrinsic rates (%d)") % (rank_sto, J_sto.shape[1]))
else: # stochasic alone is OK - drop affine part
J = J_sto
include_affine = False # for below processing
else:
if include_affine and include_stochastic == "auto":
raise ValueError(("Cannot set `include_stochastic`"
" to 'auto' when `include_affine` is True"))
_warnings.warn(("Idle tomography: %s-jacobian rank "
"(%d) < #intrinsic rates (%d)") % ("samebasis", rank, J.shape[1]))
invJ = _np.linalg.pinv(J)
intrinsic_stochastic_rates = _np.dot(invJ, obs_err_rates)
if include_stochastic: # "auto" could change to False in jac processing above
if include_affine:
if jacmode == "separate":
Nrates = len(intrinsic_stochastic_rates)
intrinsic_rates['stochastic'] = intrinsic_stochastic_rates[0:Nrates // 2]
intrinsic_rates['affine'] = intrinsic_stochastic_rates[Nrates // 2:]
elif jacmode == "together":
sto_aff_jac = J
sto_aff_obs_err_rates = obs_err_rates
else: raise ValueError("Invalid `jacmode` == %s" % str(jacmode))
pauli_fidpair_dict['samebasis'] = pauli_fidpairs # "key" to observed rates
observed_rate_infos['samebasis'] = infos_by_fidpair
else:
if jacmode == "separate":
intrinsic_rates['stochastic'] = intrinsic_stochastic_rates
elif jacmode == "together":
sto_aff_jac = J
sto_aff_obs_err_rates = obs_err_rates
pauli_fidpair_dict['samebasis'] = pauli_fidpairs # "key" to observed rates
observed_rate_infos['samebasis'] = infos_by_fidpair
printer.log("Completed Stochastic/Affine in %.2fs" % (_time.time() - tStart), 1)
elif include_affine: # either True or "auto"
raise ValueError("Cannot extract affine error rates without also extracting stochastic ones!")
if include_hamiltonian in (True, "auto"):
tStart = _time.time()
if 'pauli_fidpairs' in advanced_options:
pauli_fidpairs = diff_basis_fidpairs
else:
pauli_fidpairs = idle_tomography_fidpairs(
nqubits, maxweight, include_hamiltonian, False, False,
advanced_options.get('ham_tmpl', "auto"),
preferred_prep_basis_signs, preferred_meas_basis_signs)
#print("DB: %d diff-basis pairs" % len(pauli_fidpairs))
#divide up fiducial pairs among ranks
indxFidpairList = list(enumerate(pauli_fidpairs))
my_FidpairList, _, _ = _tools.mpitools.distribute_indices(indxFidpairList, comm, False)
my_J = []; my_obs_infos = []; my_Jaff = []
for i, (ifp, pauli_fidpair) in enumerate(my_FidpairList):
all_observables = _idttools.allobservables(pauli_fidpair[1], maxweight)
t0 | |
type2="C224Y" type3="C235" type4="N239" k1="0.37656" k2="1.79912" k3="-1.21336" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224I" type3="C235" type4="N239" k1="0.18828" k2="0.89956" k3="-0.60668" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224" type3="C235" type4="N239" k1="0.18828" k2="0.89956" k3="-0.60668" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224I" type3="C235" type4="O236" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224" type3="C235" type4="O236" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224A" type3="C235" type4="O236" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224S" type3="C235" type4="O236" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224K" type3="C235" type4="O236" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224Y" type3="C235" type4="O236" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C223" type3="C267" type4="O268" k1="11.00392" k2="1.71544" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224" type3="C267" type4="O268" k1="11.00392" k2="1.71544" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224I" type3="C267" type4="O268" k1="11.00392" k2="1.71544" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224A" type3="C267" type4="O268" k1="11.00392" k2="1.71544" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224S" type3="C267" type4="O268" k1="11.00392" k2="1.71544" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224K" type3="C267" type4="O268" k1="11.00392" k2="1.71544" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224Y" type3="C267" type4="O268" k1="11.00392" k2="1.71544" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C223" type3="C267" type4="O269" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224" type3="C267" type4="O269" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224I" type3="C267" type4="O269" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224A" type3="C267" type4="O269" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224S" type3="C267" type4="O269" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224K" type3="C267" type4="O269" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C224Y" type3="C267" type4="O269" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C235" type3="C246" type4="N239" k1="4.41412" k2="1.9874" k3="-4.93712" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C235" type3="C292" type4="N287" k1="3.78652" k2="4.50826" k3="-0.98324" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C235" type3="C293" type4="N287" k1="3.78652" k2="4.50826" k3="-0.98324" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C235" type3="C295" type4="N309" k1="-1.96648" k2="5.76346" k3="-5.58564" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C283" type3="C157" type4="O154" k1="13.091736" k2="-2.169404" k3="2.859764" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C283" type3="C158" type4="O154" k1="13.091736" k2="-2.169404" k3="2.859764" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C283" type3="C206" type4="S200" k1="4.29906" k2="1.106668" k3="1.138048" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C283" type3="C214" type4="S203" k1="4.29906" k2="1.106668" k3="1.138048" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C283" type3="C271" type4="O272" k1="0" k2="1.71544" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N238" type2="C284" type3="C271" type4="O272" k1="0" k2="1.71544" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N239" type2="C235" type3="C246" type4="N239" k1="4.41412" k2="1.9874" k3="-4.93712" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N239" type2="C235" type3="C293" type4="N287" k1="3.78652" k2="4.50826" k3="-0.98324" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N239" type2="C235" type3="C295" type4="N309" k1="-1.96648" k2="5.76346" k3="-5.58564" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N239" type2="C246" type3="C235" type4="O236" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N239" type2="C285" type3="C271" type4="O272" k1="0" k2="1.71544" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N287" type2="C292" type3="C235" type4="O236" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N287" type2="C293" type3="C157" type4="O154" k1="13.091736" k2="-2.169404" k3="2.859764" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N287" type2="C293" type3="C158" type4="O154" k1="13.091736" k2="-2.169404" k3="2.859764" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N287" type2="C293" type3="C206" type4="S200" k1="4.29906" k2="1.106668" k3="1.138048" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N287" type2="C293" type3="C214" type4="S203" k1="4.29906" k2="1.106668" k3="1.138048" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N287" type2="C293" type3="C235" type4="O236" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N309" type2="C295" type3="C235" type4="O236" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="N503" type2="C508" type3="C507" type4="N511" k1="0" k2="22.489" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C267" type2="C136" type3="C293" type4="N287" k1="-11.508092" k2="3.194484" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C235" type2="C293" type3="C136" type4="C267" k1="1.251016" k2="3.259336" k3="0.53346" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C267" type2="C136" type3="C283" type4="N238" k1="-11.508092" k2="3.194484" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C267" type2="C136" type3="C283" type4="C271" k1="1.251016" k2="3.259336" k3="0.53346" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C145" type3="C145" type4="" k1="0" k2="15.167" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C145" type3="C166" type4="" k1="0" k2="15.167" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C166" type3="C145" type4="" k1="0" k2="15.167" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C145" type3="C501" type4="" k1="0" k2="14.644" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C501" type3="C145" type4="" k1="0" k2="14.644" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C145" type3="C502" type4="" k1="0" k2="15.167" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C502" type3="C145" type4="" k1="0" k2="15.167" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="N300" type3="C302" type4="" k1="0" k2="8.1588" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C302" type3="N300" type4="" k1="0" k2="8.1588" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C500" type3="C501" type4="" k1="0" k2="7.0082" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C501" type3="C500" type4="" k1="0" k2="7.0082" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C500" type3="C514" type4="" k1="0" k2="27.3006" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C514" type3="C500" type4="" k1="0" k2="27.3006" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C501" type3="C502" type4="" k1="0" k2="12.552" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C502" type3="C501" type4="" k1="0" k2="12.552" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="N503" type3="C502" type4="" k1="0" k2="6.3806" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C502" type3="N503" type4="" k1="0" k2="6.3806" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C506" type3="N503" type4="" k1="0" k2="9.7278" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="N503" type3="C506" type4="" k1="0" k2="9.7278" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="N511" type3="C506" type4="" k1="0" k2="20.92" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C506" type3="N511" type4="" k1="0" k2="20.92" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C507" type3="C508" type4="" k1="0" k2="22.489" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C508" type3="C507" type4="" k1="0" k2="22.489" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="N511" type3="C507" type4="" k1="0" k2="10.0416" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="C507" type3="N511" type4="" k1="0" k2="10.0416" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="" type2="N512" type3="C509" type4="" k1="0" k2="9.7278" | |
# Copyright 2014 Nervana Systems Inc., 2016 <NAME> All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this is a cuda port of an opencl implementation of Lavin and Gray's winograd algorithms
# idea is that since my gpu is nvidia, it will be easier to optimize using cuda, then backport back
# into opencl???
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
import os
from os import path
if not path.isdir('/tmp/cudaptx'):
os.makedirs('/tmp/cudaptx')
def calcU():
print('calcU')
code = r"""
__global__ void calcU(
float* Out, const float* In,
int RSK, int SK, int SK2, int K, int C1152, int C, int GK)
{
int tid = threadIdx.x;
//if(tid != 0) {
// return;
//}
int blkK = gridDim.x - blockIdx.x - 1;
int c = gridDim.y - blockIdx.y - 1;
int k = (blkK<<5) + tid;
// output before:
// [Co//32][Ci][xi][nu][Co % 32]
// output in new order:
// [xi][nu][Co//32][Ci][Co % 32]
int out_offset = blkK*C1152 + c*1152 + tid;
bool valid_k = k < K;
int f_r0s0 = c*RSK + k;
int f_r0s1 = f_r0s0 + K;
int f_r0s2 = f_r0s1 + K;
int f_r1s0 = f_r0s0 + SK;
int f_r1s1 = f_r0s1 + SK;
int f_r1s2 = f_r0s2 + SK;
int f_r2s0 = f_r0s0 + SK2;
int f_r2s1 = f_r0s1 + SK2;
int f_r2s2 = f_r0s2 + SK2;
float I[3][3];
I[0][0] = valid_k ? (In[f_r0s0]) : 0.0f;
I[0][1] = valid_k ? (In[f_r0s1]) : 0.0f;
I[0][2] = valid_k ? (In[f_r0s2]) : 0.0f;
I[1][0] = valid_k ? (In[f_r1s0]) : 0.0f;
I[1][1] = valid_k ? (In[f_r1s1]) : 0.0f;
I[1][2] = valid_k ? (In[f_r1s2]) : 0.0f;
I[2][0] = valid_k ? (In[f_r2s0]) : 0.0f;
I[2][1] = valid_k ? (In[f_r2s1]) : 0.0f;
I[2][2] = valid_k ? (In[f_r2s2]) : 0.0f;
float rcp4 = 1.0f/4.0f;
float rcp6 = 1.0f/6.0f;
float rcp12 = 1.0f/12.0f;
float rcp24 = 1.0f/24.0f;
float T[6][3];
#pragma unroll
for (int i = 0; i < 3; i++)
{
float t0 = rcp6 * I[2][i];
float t1 = fma(I[0][i], -rcp6, -t0);
float t2 = fma(I[0][i], rcp24, t0);
T[0][i] = rcp4 * I[0][i];
T[1][i] = fma(I[1][i], -rcp6, t1);
T[2][i] = fma(I[1][i], rcp6, t1);
T[3][i] = fma(I[1][i], rcp12, t2);
T[4][i] = fma(I[1][i], -rcp12, t2);
T[5][i] = I[2][i];
}
// output in new order:
// [xi][nu][Co//32][Ci][Co % 32]
// we can probably make these __global__ parameters
int nu_stride = 32 * C * GK;
int xi_stride = nu_stride * 6;
//int nu_stride = 0;
//int xi_stride = 0;
out_offset = tid + // Co % 32
(c << 5) + // Ci
((blkK * C) << 5) // Co // 32
;
#pragma unroll
for (int i = 0; i < 6; i++)
{
float t0 = rcp6 * T[i][2];
float t1 = fma(T[i][0], -rcp6, -t0);
float t2 = fma(T[i][0], rcp24, t0);
// Out[out_offset + 32*(i*6 + 0)] = (rcp4 * T[i][0]);
// Out[out_offset + 32*(i*6 + 1)] = (fma(T[i][1], -rcp6, t1));
// Out[out_offset + 32*(i*6 + 2)] = (fma(T[i][1], rcp6, t1));
// Out[out_offset + 32*(i*6 + 3)] = (fma(T[i][1], rcp12, t2));
// Out[out_offset + 32*(i*6 + 4)] = (fma(T[i][1], -rcp12, t2));
// Out[out_offset + 32*(i*6 + 5)] = (T[i][2]);
// output in new order:
// [xi][nu][Co//32][Ci][Co % 32]
Out[out_offset + i * xi_stride + 0 * nu_stride] = (rcp4 * T[i][0]);
Out[out_offset + i * xi_stride + 1 * nu_stride] = (fma(T[i][1], -rcp6, t1));
Out[out_offset + i * xi_stride + 2 * nu_stride] = (fma(T[i][1], rcp6, t1));
Out[out_offset + i * xi_stride + 3 * nu_stride] = (fma(T[i][1], rcp12, t2));
Out[out_offset + i * xi_stride + 4 * nu_stride] = (fma(T[i][1], -rcp12, t2));
Out[out_offset + i * xi_stride + 5 * nu_stride] = (T[i][2]);
}
}
"""
with open('/tmp/out.cu', 'w') as f:
f.write(code)
module = SourceModule(code, keep=True, cache_dir='/tmp/cudaptx') # -cl-mad-enable -cl-fast-relaxed-math -cl-no-signed-zeros
return module.get_function('calcU')
def calcV():
print('calcV')
code = r"""
static inline int div64(int value, int div_mul, int div_shift)
{
int result;
// if the divisor is a power of two the magic will be 1 and it's just a simple right shift
if (div_mul == 1)
result = value >> div_shift;
// Otherwise multiply by magic and right shift just the high bits
else
result = (value * div_mul) >> div_shift;
return result;
}
__global__ void calcV(
float* Out, const float* In,
int Y, int X, int N, int pad_y, int pad_x,
int GXS, int GYS2, int GXS2, int magic_GXS2, int shift_GXS2, int magic_GXS, int shift_GXS,
int shlY, int shlX, int maskY, int shrY, int maskX, int shrX, int shlN, int maskN,
int YXN, int XN, int GYS_GXS_C_1152, int GXS_C_1152, int C_1152,
int GX, int GY_GX, int GN, int C)
{
int tid = threadIdx.x;
int blkN = gridDim.x - blockIdx.x - 1;
int blkYX = gridDim.y - blockIdx.y - 1;
int c = gridDim.z - blockIdx.z - 1;
// unpack y,x from blockIdx.x
int gy2 = (blkYX * magic_GXS) >> shift_GXS;
int gx2 = blkYX - gy2*GXS;
// Implement a square wave block id remapping
// (for all but last row (if odd number of rows))
//int gy = gy2 << 1;
//int gx = gx2;
//if (gy2 != GYS2)
//{
// gy += (gx2 & 1) ^ ((gx2 & 2) >> 1);
// gx = gx2 >> 1;
//}
// Scan backwards on odd rows
//if (gy2 & 1)
// gx = GXS - gx - 1;
int gx = gx2;
int gy = gy2;
//int gygx = gy * tiles + gx;
// Super block YXN coordinates
int y0 = (gy << shlY) + (((tid & maskY) >> shrY) << 2) - pad_y;
int x0 = (gx << shlX) + (((tid & maskX) >> shrX) << 2) - pad_x;
int n = (blkN << shlN) + (tid & maskN);
bool valid = n < N;
bool xin[6], yin[6];
float I[6][6];
#pragma unroll
for (int i = 0; i < 6; i++)
{
xin[i] = x0 + i >= 0 && x0 + i < X && valid;
yin[i] = y0 + i >= 0 && y0 + i < Y;
}
int offset = c*YXN + y0*XN + x0*N + n;
#pragma unroll
for (int y = 0; y < 6; y++)
{
if (y) offset += XN;
#pragma unroll
for (int x = 0; x < 6; x++)
{
float val = 0;
if (yin[y] && xin[x])
val = *(In + offset + x*N);
I[y][x] = (val);
}
}
float T[6][6];
#pragma unroll
for (int i = 0; i < 6; i++)
{
float t0 = fma(I[2][i], -4.0f, I[4][i]);
float t1 = fma(I[1][i], -4.0f, I[3][i]);
float t2 = I[4][i] - I[2][i];
float t3 = I[3][i] - I[1][i];
float t4 = fma(I[2][i], -5.0f, I[4][i]);
float t5 = fma(I[3][i], -5.0f, I[5][i]);
T[0][i] = fma(I[0][i], 4.0f, t4);
T[1][i] = t0 + t1;
T[2][i] = t0 - t1;
T[3][i] = fma(t3, 2.0f, t2);
T[4][i] = fma(t3, -2.0f, t2);
T[5][i] = fma(I[1][i], 4.0f, t5);
}
// old layout:
// [tH, tW, N // 32, Ci, xi, nu, N % 32]
// new layout:
// [xi, nu, N // 32, tH, tW, Ci, N % 32]
// (note: since last dimension is 32, this is always going to be 128-byte aligned)
int out_offset = tid + // N % 32
(c << 5) + // ci
blkYX * (C << 5) + // th* tiles + tw (?)
// 0 *((2 - gy) * 3 + (2 - gx)) * (C << 5) + // th* tiles + tw (?)
| |
for id in self.agent_map.values():
removeObject(id)
for o in self.handles:
for a in range(len(self.handles[o])):
h = self.handles[o][a]
if h is not None:
removeObject(h)
self.handles = {}
self.agent_map = {}
def draw_q(self, o, Q):
aa = Q[o] # get the action values
min_a = min(aa) # minimum of the action values
aa = [a - min_a for a in aa] # shift to make all >= 0
sum_a = sum(aa) # sum of action values
if sum_a != 0: aa = [a/sum_a for a in aa] # normalize
if o not in self.handles: # create handles list
self.handles[o] = [None, None, None, None, None]
(x, y) = self.maze.rc2xy(o[0], o[1])
for a, (dr, dc) in enumerate(MAZE_MOVES):
p = Vector3f(x, y, 0)
value = aa[a] * 5
if dr == 0: dr = 0.1
else: p.x += dr*value
if dc == 0: dc = 0.1
else: p.y += dc*value
if value == 0 and self.handles[o][a] is not None:
# don't show 0 values
removeObject(self.handles[o][a])
self.handles[o][a] = None
elif self.handles[o][a] is None:
# create the cube to show the value
self.handles[o][a] = \
addObject("data/shapes/cube/BlueCube.xml", \
p, Vector3f(0, 0, 0), scale=Vector3f(0.5, 0.5, 0.5))
else:
# move the existing cube
getSimContext().setObjectPosition(self.handles[o][a], p)
center = len(MAZE_MOVES)
if self.handles[o][center] is None:
self.handles[o][center] = \
addObject("data/shapes/cube/YellowCube.xml", \
Vector3f(x, y, 0), \
scale=Vector3f(0.6,0.6,0.6))
class EgocentricMazeEnvironment(MazeEnvironment):
"""
The environment is a 2-D maze.
This is a slightly more continous version
* Actions (1 discrete action)
* 0 - move forward by WALK_BY
* 1 - turn CW by TURN_BY and move forward by WALK_BY
* 2 - turn CCW by TURN_BY and move forward by WALK_BY
* 3 - move backward by WALK_BY
* Observations ()
* o[0] - the current x position
* o[1] - the current y position
* o[2] - the angle to the target
* o[3] - the distance to the target
* o[4] - o[7] - ray sensors cast around the agent (starting with straight ahead and going clockwise)
"""
def __init__(self, granularity = 1):
"""
Constructor
@param granularity - the number of steps it takes to cover the whole WALK_BY distance
"""
MazeEnvironment.__init__(self)
action_info = FeatureVectorInfo() # describes the actions
observation_info = FeatureVectorInfo() # describes the observations
reward_info = FeatureVectorInfo() # describes the rewards
action_info.add_discrete(0, CONT_MAZE_N_ACTIONS-1) # action
( (xmin, ymin), (xmax, ymax) ) = MazeEnvironment.maze.xy_limits()
print 'MAZE LIMITS', ( (xmin, ymin), (xmax, ymax) )
observation_info.add_continuous(xmin, xmax) # x-coord
observation_info.add_continuous(ymin, ymax) # y-coord
observation_info.add_continuous(0, CONT_MAZE_MAX_DISTANCE ) # distance to target
observation_info.add_continuous(-180, 180) # angle to target
for i in range(CONT_MAZE_N_RAYS):
observation_info.add_continuous(0,1) # ray sensor
reward_info.add_continuous(-100,100)
self.agent_info = AgentInitInfo(observation_info, action_info, reward_info)
self.granularity = granularity
self.max_steps = MAX_STEPS * 15 * self.granularity # allow 15 * g actions per cell
print 'Initialized EgocentricMazeEnvironment'
def reset(self, agent):
"""
reset the environment to its initial state
"""
(x,y) = MazeEnvironment.maze.rc2xy(0,0)
agent.state.position = Vector3f(x,y,0)
agent.state.rotation = Vector3f(0,0,0)
self.agents_at_goal.discard(agent)
print 'Episode %d complete' % agent.episode
return True
def step(self, agent, action):
"""
Continuous version
"""
if not self.agent_info.actions.validate(action):
if agent.step >= self.max_steps - 1:
return self.max_steps * self.rewards.last_reward(agent)
else:
return self.rewards.null_move(agent)
a = int(round(action[0]))
pos = agent.state.position # current position
rot = agent.state.rotation # current rotation
(x,y,heading) = (pos.x, pos.y, rot.z) # current pose
new_x, new_y, new_heading = x, y, heading # pose to be computed
dx, dy = 0, 0
if a == CONT_MAZE_ACTIONS['CW']: # clockwise
new_heading = wrap_degrees(heading, -CONT_MAZE_TURN_BY)
elif a == CONT_MAZE_ACTIONS['CCW']: # counter-clockwise
new_heading = wrap_degrees(heading, CONT_MAZE_TURN_BY)
elif a == CONT_MAZE_ACTIONS['FWD']: # forward
dx = CONT_MAZE_WALK_BY * cos(radians(new_heading)) / self.granularity
dy = CONT_MAZE_WALK_BY * sin(radians(new_heading)) / self.granularity
elif a == CONT_MAZE_ACTIONS['BCK']: # backward
dx = -CONT_MAZE_WALK_BY * cos(radians(new_heading)) / self.granularity
dy = -CONT_MAZE_WALK_BY * sin(radians(new_heading)) / self.granularity
if dx != 0 or dy != 0:
new_x, new_y = x + dx, y + dy # this is where we are moving to
print 'move test', x, y, dx, dy,
# leave a buffer of space to check in the right direction
if dx != 0: dx = dx * 1.1 # leave a buffer for testing
if dy != 0: dy = dy * 1.1 # leave a buffer for testing
test_x, test_y = x + dx, y + dy # this is to check if there are walls there
print dx, dy, test_x, test_y
if not MazeEnvironment.maze.xy_bounds(test_x, test_y):
print "could not move, out of bounds"
self.set_animation(agent, 'stand')
return self.rewards.out_of_bounds(agent)
elif not MazeEnvironment.maze.xy_valid(x,y,test_x,test_y):
print "could not move, hit a wall"
self.set_animation(agent, 'stand')
return self.rewards.hit_wall(agent)
if new_x != x or new_y != y:
self.set_animation(agent, 'run')
# move the agent
agent.state.rotation = Vector3f(0,0,new_heading)
pos0 = agent.state.position
pos0.x = new_x
pos0.y = new_y
agent.state.position = pos0
(new_r, new_c) = MazeEnvironment.maze.xy2rc(new_x, new_y)
if new_r == ROWS - 1 and new_c == COLS - 1:
self.agents_at_goal.add(agent)
return self.max_steps * self.rewards.goal_reached(agent)
elif agent.step >= self.max_steps - 1:
return self.max_steps * self.rewards.last_reward(agent)
return self.rewards.valid_move(agent)
def sense(self, agent, obs):
"""
Continuous version
"""
pos = agent.state.position # current position
rot = agent.state.rotation # current rotation
(x,y,heading) = (pos.x, pos.y, rot.z) # current pose
obs[0] = x # the agent can observe its position
obs[1] = y # the agent can observe its position
(tx, ty) = MazeEnvironment.maze.rc2xy(ROWS-1,COLS-1) # coordinates of target
tx, ty = tx - x, ty - y # line to target
obs[2] = hypot(tx, ty) # distance to target
angle_to_target = degrees(atan2(ty, tx)) # angle to target from +x, in degrees
angle_to_target = wrap_degrees(angle_to_target, -heading) # heading to target relative to us
obs[3] = angle_to_target
d_angle = 360.0 / CONT_MAZE_N_RAYS
p0 = agent.state.position
for i in range(CONT_MAZE_N_RAYS):
angle = radians(heading + i * d_angle)
direction = Vector3f(cos(angle), sin(angle), 0) # direction of ray
ray = (p0, p0 + direction * GRID_DX)
# we only look for objects of type 1, which means walls
result = getSimContext().findInRay(ray[0], ray[1], 1, False)
# we can now return a continuous sensor since FindInRay returns the hit point
if len(result) > 0:
(sim, hit) = result
len1 = (ray[1] - ray[0]).getLength() # max extent
len2 = (hit - ray[0]).getLength() # actual extent
if len1 != 0:
obs[4+i] = len2/len1
else:
obs[4+i] = 0
else:
obs[4+i] = 1
if not self.agent_info.sensors.validate(obs):
print 'ERROR: incorect observation!', obs
print ' should be:', self.agent_info.sensors
return obs
class GranularMazeEnvironment(MazeEnvironment):
"""
The environment is a 2-D maze.
In the discrete version, the agent moves from cell to cell.
* Actions (1 discrete action)
* 0 - move in the +r direction
* 1 - move in the -r direction
* 2 - move in the +c direction
* 3 - move in the -c direction
* Observations (6 discrete observations)
* o[0] - the current x position
* o[1] - the current y position
* o[2] - obstacle in the +r direction?
* o[3] - obstacle in the -r direction?
* o[4] - obstacle in the +c direction?
* o[5] - obstacle in the -c direction?
"""
def __init__(self, granularity = 8):
"""
generate the maze
"""
MazeEnvironment.__init__(self)
action_info = FeatureVectorInfo() # describes the actions
observation_info = FeatureVectorInfo() # describes the observations
reward_info = FeatureVectorInfo() # describes the rewards
action_info.add_discrete(0, CONT_MAZE_N_ACTIONS-1) # action
( (xmin, ymin), (xmax, ymax) ) = MazeEnvironment.maze.xy_limits()
print 'MAZE LIMITS', ( (xmin, ymin), (xmax, ymax) )
observation_info.add_continuous(xmin, xmax) # x-coord
observation_info.add_continuous(ymin, ymax) # y-coord
for (dr, dc) in MAZE_MOVES:
observation_info.add_continuous(0,1) # movement sensor
reward_info.add_continuous(-100,100)
self.agent_info = AgentInitInfo(observation_info, action_info, reward_info)
self.max_steps = MAX_STEPS * (granularity * 2) # allow 2x granularity steps per cell
self.granularity = granularity
print 'Initialized GranularMazeEnvironment'
def reset(self, agent):
"""
reset the environment to its initial state
"""
(x,y) = MazeEnvironment.maze.rc2xy(0,0)
agent.state.position = Vector3f(x,y,0)
agent.state.rotation = Vector3f(0,0,0)
self.agents_at_goal.discard(agent)
print 'Episode %d complete' % agent.episode
return True
def set_position(self, agent, new_pose):
"""
set the next agent position to new_pose = (x,y,h)
"""
new_x, new_y, new_heading = new_pose
pos = agent.state.position
if pos.x == new_x and pos.y == new_y:
self.set_animation(agent, 'stand')
| |
{self.root}.")
@property
def root(self) -> Parameter:
"""Root of the hierarchical search space tree, as identified during
``HierarchicalSearchSpace`` construction.
"""
return self._root
def flatten(self) -> SearchSpace:
"""Returns a flattened ``SearchSpace`` with all the parameters in the
given ``HierarchicalSearchSpace``; ignores their hierarchical structure.
"""
return SearchSpace(
parameters=list(self.parameters.values()),
parameter_constraints=self.parameter_constraints,
)
def cast_observation_features(
self, observation_features: core.observation.ObservationFeatures
) -> core.observation.ObservationFeatures:
"""Cast parameterization of given observation features to the hierarchical
structure of the given search space; return the newly cast observation features
with the full parameterization stored in ``metadata`` under
``Keys.FULL_PARAMETERIZATION``.
For each parameter in given parameterization, cast it to the proper type
specified in this search space and remove it from the parameterization if that
parameter should not be in the arm within the search space due to its
hierarchical structure.
"""
full_parameterization_md = {
Keys.FULL_PARAMETERIZATION: observation_features.parameters.copy()
}
obs_feats = observation_features.clone(
replace_parameters=self._cast_parameterization(
parameters=observation_features.parameters
)
)
if not obs_feats.metadata:
obs_feats.metadata = full_parameterization_md # pyre-ignore[8]
else:
obs_feats.metadata = {**obs_feats.metadata, **full_parameterization_md}
return obs_feats
def flatten_observation_features(
self, observation_features: core.observation.ObservationFeatures
) -> core.observation.ObservationFeatures:
"""Flatten observation features that were previously cast to the hierarchical
structure of the given search space; return the newly flattened observation
features. This method re-injects parameter values that were removed from
observation features during casting (as they are saved in observation features
metadata).
"""
obs_feats = observation_features
if (
not obs_feats.metadata
or Keys.FULL_PARAMETERIZATION not in obs_feats.metadata
):
warnings.warn(
f"Cannot flatten observation features {obs_feats} as full "
"parameterization is not recorded in metadata."
)
return obs_feats
# NOTE: Instead, could just use the full parameterization as stored;
# opting for a safer option of only injecting parameters that were
# removed, but not altering those that are present if they have different
# values in full parameterization as stored in metadata.
full_parameterization = not_none(obs_feats.metadata)[Keys.FULL_PARAMETERIZATION]
obs_feats.parameters = {**full_parameterization, **obs_feats.parameters}
return obs_feats
def check_membership(
self,
parameterization: TParameterization,
raise_error: bool = False,
check_all_parameters_present: bool = True,
) -> bool:
"""Whether the given parameterization belongs in the search space.
Checks that the given parameter values have the same name/type as
search space parameters, are contained in the search space domain,
and satisfy the parameter constraints.
Args:
parameterization: Dict from parameter name to value to validate.
raise_error: If true parameterization does not belong, raises an error
with detailed explanation of why.
check_all_parameters_present: Ensure that parameterization specifies
values for all parameters as expected by the search space and its
hierarchical structure.
Returns:
Whether the parameterization is contained in the search space.
"""
super().check_membership(
parameterization=parameterization,
raise_error=raise_error,
check_all_parameters_present=False,
)
# Check that each arm "belongs" in the hierarchical
# search space; ensure that it only has the parameters that make sense
# with each other (and does not contain dependent parameters if the
# parameter they depend on does not have the correct value).
cast_to_hss_params = set(
self._cast_parameterization(
parameters=parameterization,
check_all_parameters_present=check_all_parameters_present,
).keys()
)
parameterization_params = set(parameterization.keys())
if cast_to_hss_params != parameterization_params:
if raise_error:
raise ValueError(
"Parameterization violates the hierarchical structure of the search"
f"space; cast version would have parameters: {cast_to_hss_params},"
f" but full version contains parameters: {parameterization_params}."
)
return False
return True
def hierarchical_structure_str(self, parameter_names_only: bool = False) -> str:
"""String representation of the hierarchical structure.
Args:
parameter_names_only: Whether parameter should show up just as names
(instead of full parameter strings), useful for a more concise
representation.
"""
def _hrepr(param: Optional[Parameter], value: Optional[str], level: int) -> str:
is_level_param = param and not value
if is_level_param:
param = not_none(param)
node_name = f"{param.name if parameter_names_only else param}"
ret = "\t" * level + node_name + "\n"
if param.is_hierarchical:
for val, deps in param.dependents.items():
ret += _hrepr(param=None, value=str(val), level=level + 1)
for param_name in deps:
ret += _hrepr(
param=self[param_name],
value=None,
level=level + 2,
)
else:
value = not_none(value)
node_name = f"({value})"
ret = "\t" * level + node_name + "\n"
return ret
return _hrepr(param=self.root, value=None, level=0)
def _cast_arm(self, arm: Arm) -> Arm:
"""Cast parameterization of given arm to the types in this search space and to
its hierarchical structure; return the newly cast arm.
For each parameter in given arm, cast it to the proper type specified
in this search space and remove it from the arm if that parameter should not be
in the arm within the search space due to its hierarchical structure.
"""
# Validate parameter values in flat search space.
arm = super().cast_arm(arm=arm)
return Arm(
parameters=self._cast_parameterization(parameters=arm.parameters),
name=arm._name,
)
def _cast_parameterization(
self,
parameters: TParameterization,
check_all_parameters_present: bool = True,
) -> TParameterization:
"""Cast parameterization (of an arm, observation features, etc.) to the
hierarchical structure of this search space.
Args:
parameters: Parameterization to cast to hierarchical structure.
check_all_parameters_present: Whether to raise an error if a paramete
that is expected to be present (according to values of other
parameters and the hierarchical structure of the search space)
is not specified.
"""
def _find_applicable_parameters(root: Parameter) -> Set[str]:
applicable = {root.name}
if check_all_parameters_present and root.name not in parameters:
raise RuntimeError(
f"Parameter '{root.name}' not in parameterization to cast."
)
if not root.is_hierarchical:
return applicable
for val, deps in root.dependents.items():
if parameters[root.name] == val:
for dep in deps:
applicable.update(_find_applicable_parameters(root=self[dep]))
return applicable
applicable_paramers = _find_applicable_parameters(root=self.root)
if not all(k in parameters for k in applicable_paramers):
raise RuntimeError(
f"Parameters {applicable_paramers- set(parameters.keys())} "
"missing from the arm."
)
return {k: v for k, v in parameters.items() if k in applicable_paramers}
def _find_root(self) -> Parameter:
"""Find the root of hierarchical search space: a parameter that does not depend on
other parameters.
"""
dependent_parameter_names = set()
for parameter in self.parameters.values():
if parameter.is_hierarchical:
for deps in parameter.dependents.values():
dependent_parameter_names.update(param_name for param_name in deps)
root_parameters = self._all_parameter_names - dependent_parameter_names
if len(root_parameters) != 1:
num_parameters = len(self.parameters)
# TODO: In the future, do not need to fail here; can add a "unifying" root
# fixed parameter, on which all independent parameters in the HSS can
# depend.
raise NotImplementedError(
"Could not find the root parameter; found dependent parameters "
f"{dependent_parameter_names}, with {num_parameters} total parameters."
f" Root parameter candidates: {root_parameters}. Having multiple "
"independent parameters is not yet supported."
)
return self.parameters[root_parameters.pop()]
def _validate_hierarchical_structure(self) -> None:
"""Validate the structure of this hierarchical search space, ensuring that all
subtrees are independent (not sharing any parameters) and that all parameters
are reachable and part of the tree.
"""
def _check_subtree(root: Parameter) -> Set[str]:
logger.debug(f"Verifying subtree with root {root}...")
visited = {root.name}
# Base case: validate leaf node.
if not root.is_hierarchical:
return visited # TODO: Should there be other validation?
# Recursive case: validate each subtree.
visited_in_subtrees = ( # Generator of sets of visited parameter names.
_check_subtree(root=self[param_name])
for deps in root.dependents.values()
for param_name in deps
)
# Check that subtrees are disjoint and return names of visited params.
visited.update(
reduce(
lambda set1, set2: _disjoint_union(set1=set1, set2=set2),
visited_in_subtrees,
next(visited_in_subtrees),
)
)
logger.debug(f"Visited parameters {visited} in subtree.")
return visited
# Verify that all nodes have been reached.
visited = _check_subtree(root=self._root)
if len(self._all_parameter_names - visited) != 0:
raise UserInputError(
f"Parameters {self._all_parameter_names - visited} are not reachable "
"from the root. Please check that the hierachical search space provided"
" is represented as a valid tree with a single root."
)
logger.debug(f"Visited all parameters in the tree: {visited}.")
class RobustSearchSpace(SearchSpace):
"""Search space for robust optimization that supports environmental variables
and input noise.
In addition to the usual search space properties, this allows specifying
environmental variables (parameters) and input noise distributions.
"""
def __init__(
self,
parameters: List[Parameter],
parameter_distributions: List[ParameterDistribution],
environmental_variables: Optional[List[Parameter]] = None,
parameter_constraints: Optional[List[ParameterConstraint]] = None,
) -> None:
"""Initialize the robust search space.
Args:
parameters: List of parameter objects for the search space.
parameter_distributions: List of parameter distributions, each representing
the distribution of one or more parameters. These can be used to
specify the distribution of the environmental variables or the input
noise distribution on the parameters.
environmental_variables: List of parameter objects, each denoting an
environmental variable. These must have associated parameter
distributions.
parameter_constraints: List of parameter constraints.
"""
if len(parameter_distributions) == 0:
raise UserInputError(
"RobustSearchSpace requires at least one distributional parameter. "
| |
<filename>Day 35/rain_alert/venv/lib/python3.9/site-packages/twilio/rest/messaging/v1/campaign.py
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class CampaignList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version):
"""
Initialize the CampaignList
:param Version version: Version that contains the resource
:returns: twilio.rest.messaging.v1.campaign.CampaignList
:rtype: twilio.rest.messaging.v1.campaign.CampaignList
"""
super(CampaignList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/a2p/Campaigns'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams CampaignInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.messaging.v1.campaign.CampaignInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists CampaignInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.messaging.v1.campaign.CampaignInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of CampaignInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CampaignInstance
:rtype: twilio.rest.messaging.v1.campaign.CampaignPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return CampaignPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of CampaignInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of CampaignInstance
:rtype: twilio.rest.messaging.v1.campaign.CampaignPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return CampaignPage(self._version, response, self._solution)
def create(self, brand_registration_sid, use_case, description, message_samples,
has_embedded_links, has_embedded_phone, messaging_service_sid):
"""
Create the CampaignInstance
:param unicode brand_registration_sid: A2P BrandRegistration Sid
:param unicode use_case: A2P Campaign UseCase.
:param unicode description: A short description of what this SMS campaign does
:param list[unicode] message_samples: Message samples
:param bool has_embedded_links: Indicate that this SMS campaign will send messages that contain links
:param bool has_embedded_phone: Indicates that this SMS campaign will send messages that contain phone numbers
:param unicode messaging_service_sid: MessagingService SID
:returns: The created CampaignInstance
:rtype: twilio.rest.messaging.v1.campaign.CampaignInstance
"""
data = values.of({
'BrandRegistrationSid': brand_registration_sid,
'UseCase': use_case,
'Description': description,
'MessageSamples': serialize.map(message_samples, lambda e: e),
'HasEmbeddedLinks': has_embedded_links,
'HasEmbeddedPhone': has_embedded_phone,
'MessagingServiceSid': messaging_service_sid,
})
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return CampaignInstance(self._version, payload, )
def get(self, sid):
"""
Constructs a CampaignContext
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.messaging.v1.campaign.CampaignContext
:rtype: twilio.rest.messaging.v1.campaign.CampaignContext
"""
return CampaignContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a CampaignContext
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.messaging.v1.campaign.CampaignContext
:rtype: twilio.rest.messaging.v1.campaign.CampaignContext
"""
return CampaignContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Messaging.V1.CampaignList>'
class CampaignPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the CampaignPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.messaging.v1.campaign.CampaignPage
:rtype: twilio.rest.messaging.v1.campaign.CampaignPage
"""
super(CampaignPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of CampaignInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.messaging.v1.campaign.CampaignInstance
:rtype: twilio.rest.messaging.v1.campaign.CampaignInstance
"""
return CampaignInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Messaging.V1.CampaignPage>'
class CampaignContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, sid):
"""
Initialize the CampaignContext
:param Version version: Version that contains the resource
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.messaging.v1.campaign.CampaignContext
:rtype: twilio.rest.messaging.v1.campaign.CampaignContext
"""
super(CampaignContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/a2p/Campaigns/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the CampaignInstance
:returns: The fetched CampaignInstance
:rtype: twilio.rest.messaging.v1.campaign.CampaignInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return CampaignInstance(self._version, payload, sid=self._solution['sid'], )
def delete(self):
"""
Deletes the CampaignInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Messaging.V1.CampaignContext {}>'.format(context)
class CampaignInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
class Status(object):
APPROVED = "approved"
PENDING = "pending"
FAILED = "failed"
def __init__(self, version, payload, sid=None):
"""
Initialize the CampaignInstance
:returns: twilio.rest.messaging.v1.campaign.CampaignInstance
:rtype: twilio.rest.messaging.v1.campaign.CampaignInstance
"""
super(CampaignInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'messaging_service_sid': payload.get('messaging_service_sid'),
'brand_registration_sid': payload.get('brand_registration_sid'),
'sid': payload.get('sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'description': payload.get('description'),
'message_samples': payload.get('message_samples'),
'status': payload.get('status'),
'failure_reason': payload.get('failure_reason'),
'use_case': payload.get('use_case'),
'has_embedded_links': payload.get('has_embedded_links'),
'has_embedded_phone': payload.get('has_embedded_phone'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CampaignContext for this CampaignInstance
:rtype: twilio.rest.messaging.v1.campaign.CampaignContext
"""
if self._context is None:
self._context = CampaignContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def messaging_service_sid(self):
"""
:returns: MessagingService SID
:rtype: unicode
"""
return self._properties['messaging_service_sid']
@property
def brand_registration_sid(self):
"""
:returns: A2P BrandRegistration Sid
:rtype: unicode
"""
return self._properties['brand_registration_sid']
@property
def sid(self):
"""
:returns: Campaign sid
:rtype: unicode
"""
return self._properties['sid']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def description(self):
"""
:returns: A short description of what this SMS campaign does
:rtype: unicode
"""
return self._properties['description']
@property
def message_samples(self):
"""
:returns: Message samples
:rtype: list[unicode]
"""
return self._properties['message_samples']
@property
def status(self):
"""
:returns: Campaign status
:rtype: CampaignInstance.Status
"""
return self._properties['status']
@property
def failure_reason(self):
"""
:returns: A reason why campaign registration has failed
:rtype: unicode
"""
return self._properties['failure_reason']
@property
def use_case(self):
"""
:returns: A2P Campaign UseCase.
:rtype: unicode
"""
return self._properties['use_case']
@property
def has_embedded_links(self):
"""
:returns: Indicate that this SMS campaign will send messages that contain links
:rtype: bool
"""
return self._properties['has_embedded_links']
@property
def has_embedded_phone(self):
"""
:returns: Indicates that this SMS campaign will send messages that contain phone numbers
:rtype: bool
"""
return self._properties['has_embedded_phone']
@property
def url(self):
"""
:returns: The absolute URL of the Campaign resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the CampaignInstance
:returns: The fetched CampaignInstance
:rtype: twilio.rest.messaging.v1.campaign.CampaignInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the CampaignInstance
:returns: True | |
``default_weight`` or 1) will be used
for all edges.
:param float default_weight: If ``weight_fn`` is not used this can be
optionally used to specify a default weight to use for all edges.
:param float null_value: An optional float that will treated as a null
value. This is the default value in the output matrix and it is used
to indicate the absence of an edge between 2 nodes. By default this is
``0.0``.
:return: The adjacency matrix for the input dag as a numpy array
:rtype: numpy.ndarray
"""
raise TypeError("Invalid Input Type %s for graph" % type(graph))
@adjacency_matrix.register(PyDiGraph)
def _digraph_adjacency_matrix(graph, weight_fn=None, default_weight=1.0, null_value=0.0):
return digraph_adjacency_matrix(
graph,
weight_fn=weight_fn,
default_weight=default_weight,
null_value=null_value,
)
@adjacency_matrix.register(PyGraph)
def _graph_adjacency_matrix(graph, weight_fn=None, default_weight=1.0, null_value=0.0):
return graph_adjacency_matrix(
graph,
weight_fn=weight_fn,
default_weight=default_weight,
null_value=null_value,
)
@functools.singledispatch
def all_simple_paths(graph, from_, to, min_depth=None, cutoff=None):
"""Return all simple paths between 2 nodes in a PyGraph object
A simple path is a path with no repeated nodes.
:param graph: The graph to find the path in. Can either be a
class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`
:param int from_: The node index to find the paths from
:param int to: The node index to find the paths to
:param int min_depth: The minimum depth of the path to include in the
output list of paths. By default all paths are included regardless of
depth, setting to 0 will behave like the default.
:param int cutoff: The maximum depth of path to include in the output list
of paths. By default includes all paths regardless of depth, setting to
0 will behave like default.
:returns: A list of lists where each inner list is a path of node indices
:rtype: list
"""
raise TypeError("Invalid Input Type %s for graph" % type(graph))
@all_simple_paths.register(PyDiGraph)
def _digraph_all_simple_paths(graph, from_, to, min_depth=None, cutoff=None):
return digraph_all_simple_paths(graph, from_, to, min_depth=min_depth, cutoff=cutoff)
@all_simple_paths.register(PyGraph)
def _graph_all_simple_paths(graph, from_, to, min_depth=None, cutoff=None):
return graph_all_simple_paths(graph, from_, to, min_depth=min_depth, cutoff=cutoff)
@functools.singledispatch
def floyd_warshall(
graph,
weight_fn=None,
default_weight=1.0,
parallel_threshold=300,
):
"""Find all-pairs shortest path lengths using Floyd's algorithm
Floyd's algorithm is used for finding shortest paths in dense graphs
or graphs with negative weights (where Dijkstra's algorithm fails).
This function is multithreaded and will launch a pool with threads equal
to the number of CPUs by default if the number of nodes in the graph is
above the value of ``parallel_threshold`` (it defaults to 300).
You can tune the number of threads with the ``RAYON_NUM_THREADS``
environment variable. For example, setting ``RAYON_NUM_THREADS=4`` would
limit the thread pool to 4 threads if parallelization was enabled.
:param graph: The graph to run Floyd's algorithm on. Can
either be a :class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`
:param callable weight_fn: A callable object (function, lambda, etc) which
will be passed the edge object and expected to return a ``float``. This
tells retworkx/rust how to extract a numerical weight as a ``float``
for edge object. Some simple examples are::
floyd_warshall(graph, weight_fn= lambda x: 1)
to return a weight of 1 for all edges. Also::
floyd_warshall(graph, weight_fn=float)
to cast the edge object as a float as the weight. If this is not
specified a default value (either ``default_weight`` or 1) will be used
for all edges.
:param float default_weight: If ``weight_fn`` is not used this can be
optionally used to specify a default weight to use for all edges.
:param int parallel_threshold: The number of nodes to execute
the algorithm in parallel at. It defaults to 300, but this can
be tuned
:return: A read-only dictionary of path lengths. The keys are the source
node indices and the values are a dict of the target node and the
length of the shortest path to that node. For example::
{
0: {0: 0.0, 1: 2.0, 2: 2.0},
1: {1: 0.0, 2: 1.0},
2: {0: 1.0, 2: 0.0},
}
:rtype: AllPairsPathLengthMapping
"""
raise TypeError("Invalid Input Type %s for graph" % type(graph))
@floyd_warshall.register(PyDiGraph)
def _digraph_floyd_warshall(
graph,
weight_fn=None,
default_weight=1.0,
parallel_threshold=300,
):
return digraph_floyd_warshall(
graph,
weight_fn=weight_fn,
default_weight=default_weight,
parallel_threshold=parallel_threshold,
)
@floyd_warshall.register(PyGraph)
def _graph_floyd_warshall(
graph,
weight_fn=None,
default_weight=1.0,
parallel_threshold=300,
):
return graph_floyd_warshall(
graph,
weight_fn=weight_fn,
default_weight=default_weight,
parallel_threshold=parallel_threshold,
)
@functools.singledispatch
def floyd_warshall_numpy(
graph,
weight_fn=None,
default_weight=1.0,
parallel_threshold=300,
):
"""Find all-pairs shortest path lengths using Floyd's algorithm
Floyd's algorithm is used for finding shortest paths in dense graphs
or graphs with negative weights (where Dijkstra's algorithm fails).
This function is multithreaded and will launch a pool with threads equal
to the number of CPUs by default if the number of nodes in the graph is
above the value of ``parallel_threshold`` (it defaults to 300).
You can tune the number of threads with the ``RAYON_NUM_THREADS``
environment variable. For example, setting ``RAYON_NUM_THREADS=4`` would
limit the thread pool to 4 threads if parallelization was enabled.
:param graph: The graph to run Floyd's algorithm on. Can
either be a :class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`
:param callable weight_fn: A callable object (function, lambda, etc) which
will be passed the edge object and expected to return a ``float``. This
tells retworkx/rust how to extract a numerical weight as a ``float``
for edge object. Some simple examples are::
floyd_warshall_numpy(graph, weight_fn: lambda x: 1)
to return a weight of 1 for all edges. Also::
floyd_warshall_numpy(graph, weight_fn: lambda x: float(x))
to cast the edge object as a float as the weight. If this is not
specified a default value (either ``default_weight`` or 1) will be used
for all edges.
:param float default_weight: If ``weight_fn`` is not used this can be
optionally used to specify a default weight to use for all edges.
:param int parallel_threshold: The number of nodes to execute
the algorithm in parallel at. It defaults to 300, but this can
be tuned
:returns: A matrix of shortest path distances between nodes. If there is no
path between two nodes then the corresponding matrix entry will be
``np.inf``.
:rtype: numpy.ndarray
"""
raise TypeError("Invalid Input Type %s for graph" % type(graph))
@floyd_warshall_numpy.register(PyDiGraph)
def _digraph_floyd_warshall_numpy(
graph, weight_fn=None, default_weight=1.0, parallel_threshold=300
):
return digraph_floyd_warshall_numpy(
graph,
weight_fn=weight_fn,
default_weight=default_weight,
parallel_threshold=parallel_threshold,
)
@floyd_warshall_numpy.register(PyGraph)
def _graph_floyd_warshall_numpy(graph, weight_fn=None, default_weight=1.0, parallel_threshold=300):
return graph_floyd_warshall_numpy(
graph,
weight_fn=weight_fn,
default_weight=default_weight,
parallel_threshold=parallel_threshold,
)
@functools.singledispatch
def astar_shortest_path(graph, node, goal_fn, edge_cost_fn, estimate_cost_fn):
"""Compute the A* shortest path for a graph
:param graph: The input graph to use. Can
either be a :class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`
:param int node: The node index to compute the path from
:param goal_fn: A python callable that will take in 1 parameter, a node's
data object and will return a boolean which will be True if it is the
finish node.
:param edge_cost_fn: A python callable that will take in 1 parameter, an
edge's data object and will return a float that represents the cost
of that edge. It must be non-negative.
:param estimate_cost_fn: A python callable that will take in 1 parameter, a
node's data object and will return a float which represents the
estimated cost for the next node. The return must be non-negative. For
the algorithm to find the actual shortest path, it should be
admissible, meaning that it should never overestimate the actual cost
to get to the nearest goal node.
:returns: The computed shortest path between node and finish as a list
of node indices.
:rtype: NodeIndices
"""
raise TypeError("Invalid Input Type %s for graph" % type(graph))
@astar_shortest_path.register(PyDiGraph)
def _digraph_astar_shortest_path(graph, node, goal_fn, edge_cost_fn, estimate_cost_fn):
return digraph_astar_shortest_path(graph, node, goal_fn, edge_cost_fn, estimate_cost_fn)
@astar_shortest_path.register(PyGraph)
def _graph_astar_shortest_path(graph, node, goal_fn, edge_cost_fn, estimate_cost_fn):
return graph_astar_shortest_path(graph, node, goal_fn, edge_cost_fn, estimate_cost_fn)
@functools.singledispatch
def dijkstra_shortest_paths(
graph,
source,
target=None,
weight_fn=None,
default_weight=1.0,
as_undirected=False,
):
"""Find the shortest path from a node
This function will generate the shortest path from a source node using
Dijkstra's algorithm.
:param graph: The input graph to use. Can either be a
:class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`
:param int source: The node index to find paths from
:param int target: An optional target to find a path to
:param weight_fn: An optional weight function for an edge. It will accept
a single argument, the edge's weight object and will return a float
which will be used to represent the weight/cost of the edge
:param float default_weight: If ``weight_fn`` isn't specified this optional
float value will be used for the weight/cost of each edge.
:param bool as_undirected: If set to | |
import os
import importlib
import operator
import yaml
from core.checks import APTPackageChecksBase
from core import constants
from core.cli_helpers import CLIHelper
from core.log import log
from core.utils import mktemp_dump
from core.ystruct import YAMLDefOverrideBase, YAMLDefSection
class CallbackHelper(object):
def __init__(self):
self.callbacks = {}
def callback(self, f):
def callback_inner(*args, **kwargs):
return f(*args, **kwargs)
self.callbacks[f.__name__] = callback_inner
# we don't need to return but we leave it so that we can unit test
# these methods.
return callback_inner
YOverridesCollection = []
def ydef_override(c):
YOverridesCollection.append(c)
return c
class YDefsSection(YAMLDefSection):
def __init__(self, name, content, extra_overrides=None,
checks_handler=None):
"""
@param name: name of defs group
@param content: defs tree of type dict
@param extra_overrides: optional extra overrides
@param checks_handler: handler object used by some overrides to locate
callback methods.
"""
overrides = [] + YOverridesCollection
if extra_overrides:
overrides += extra_overrides
if checks_handler:
for c in overrides:
if hasattr(c, 'EVENT_CHECK_OBJ'):
c.EVENT_CHECK_OBJ = checks_handler
super().__init__(name, content, override_handlers=overrides)
class YAMLDefOverrideBaseX(YAMLDefOverrideBase):
def get_cls(self, import_str):
mod = import_str.rpartition('.')[0]
class_name = import_str.rpartition('.')[2]
return getattr(importlib.import_module(mod), class_name)
def get_property(self, import_str):
mod = import_str.rpartition('.')[0]
property = import_str.rpartition('.')[2]
class_name = mod.rpartition('.')[2]
mod = mod.rpartition('.')[0]
cls = getattr(importlib.import_module(mod), class_name)
try:
ret = getattr(cls(), property)
except Exception:
if constants.DEBUG_MODE:
log.error("failed to get property %s", import_str)
raise
return ret
def get_attribute(self, import_str):
mod = import_str.rpartition('.')[0]
attr = import_str.rpartition('.')[2]
try:
ret = getattr(importlib.import_module(mod), attr)
except Exception as exc:
if constants.DEBUG_MODE:
log.exception("failed to get module attribute %s", import_str)
# ystruct.YAMLDefOverrideBase swallows AttributeError so need to
# convert to something else.
if type(exc) == AttributeError:
raise ImportError from exc
raise
return ret
def get_import(self, import_str):
"""
First attempt to treat import string as a class property then try
module attribute.
"""
try:
return self.get_property(import_str)
except Exception:
pass
return self.get_attribute(import_str)
@ydef_override
class YAMLDefChecks(YAMLDefOverrideBaseX):
KEYS = ['checks']
@ydef_override
class YAMLDefConclusions(YAMLDefOverrideBaseX):
KEYS = ['conclusions']
@ydef_override
class YAMLDefPriority(YAMLDefOverrideBaseX):
KEYS = ['priority']
def __int__(self):
return int(self.content)
@ydef_override
class YAMLDefDecision(YAMLDefOverrideBaseX):
KEYS = ['decision']
@property
def is_singleton(self):
"""
A decision can be based off a single check or combinations of checks.
If the value is a string and not a dict then it is assumed to be a
single check with no boolean logic applied.
"""
return type(self.content) is str
def __iter__(self):
for _bool, val in self.content.items():
yield _bool, val
@ydef_override
class YAMLDefExpr(YAMLDefOverrideBaseX):
"""
An expression can be a string or a list of strings and can be provided
as a single value or dict (with keys start, body, end etc) e.g.
An optional passthrough-results key is provided and used with events type
defintions to indicate that search results should be passed to
their handler as a raw core.searchtools.SearchResultsCollection. This is
typically so that they can be parsed with core.analytics.LogEventStats.
Defaults to False.
params:
expr|hint:
<str>
start|body|end:
expr: <int>
hint: <int>
usage:
If value is a string:
str(expr|hint)
If using keys start|body|end:
<key>.expr
<key>.hint
Note that expressions can be a string or list of strings.
"""
KEYS = ['start', 'body', 'end', 'expr', 'hint', 'passthrough-results']
@property
def expr(self):
"""
Subkey e.g for start.expr, body.expr. Expression defs that are just
a string or use subkey 'expr' will rely on __getattr__.
"""
return self.content.get('expr', self.content)
def __getattr__(self, name):
"""
This is a fallback for when the value is not a key and we just want
to return the contents e.g. a string or list.
If the value is a string or list you can use a non-existant key e.g.
'value' to retreive it.
"""
if type(self.content) == dict:
return super().__getattr__(name)
else:
return self.content
@ydef_override
class YAMLDefRaises(YAMLDefOverrideBaseX):
KEYS = ['raises']
@property
def message(self):
""" Optional """
return self.content.get('message')
@property
def format_dict(self):
"""
Optional dict of key/val pairs used to format the message string.
"""
_format_dict = self.content.get('format-dict')
if not _format_dict:
return {}
return {k: self.get_import(v) for k, v in _format_dict.items()}
@property
def format_groups(self):
""" Optional """
return self.content.get('search-result-format-groups')
@property
def type(self):
""" Imports and returns class object. """
return self.get_cls(self.content['type'])
@ydef_override
class YAMLDefSettings(YAMLDefOverrideBaseX):
KEYS = ['settings']
def __iter__(self):
for key, val in self.content.items():
yield key, val
@ydef_override
class YAMLDefInput(YAMLDefOverrideBaseX):
KEYS = ['input']
TYPE_COMMAND = 'command'
TYPE_FILESYSTEM = 'filesystem'
# NOTE: this must be set by the handler object that is using the overrides.
# we need a better way to do this but we can't use __init__ because
# this class must be provided uninstantiated.
EVENT_CHECK_OBJ = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._path = None
@property
def meta(self):
defaults = {'allow-all-logs': True,
'args': [],
'kwargs': {},
'args-callback': None}
_meta = self.content.get('meta', defaults)
defaults.update(_meta)
return defaults
@property
def path(self):
if self.type == self.TYPE_FILESYSTEM:
path = os.path.join(constants.DATA_ROOT, self.value)
if constants.USE_ALL_LOGS and self.meta['allow-all-logs']:
path = "{}*".format(path)
return path
elif self.type == self.TYPE_COMMAND:
if self._path:
return self._path
args_callback = self.meta['args-callback']
if args_callback:
args, kwargs = getattr(self.EVENT_CHECK_OBJ, args_callback)()
else:
args = self.meta['args']
kwargs = self.meta['kwargs']
# get command output
out = getattr(CLIHelper(), self.value)(*args,
**kwargs)
# store in temp file to make it searchable
# NOTE: we dont need to delete this at the the end since they are
# created in the plugun tmp dir which is wiped at the end of the
# plugin run.
self._path = mktemp_dump(''.join(out))
return self._path
@ydef_override
class YAMLDefContext(YAMLDefOverrideBaseX):
KEYS = ['context']
def __getattr__(self, name):
name = name.replace('_', '-')
return self._load()[name]
def _load(self):
ctxt = {}
for key, val in self.content.items():
ctxt[key] = self.get_import(val)
return ctxt
@ydef_override
class YAMLDefRequires(YAMLDefOverrideBaseX):
KEYS = ['requires']
@property
def apt(self):
return self.content.get('apt', None)
@property
def _property(self):
return self.content.get('property', None)
@property
def value(self):
"""
An optional value to match against. If no value is provided this will
return True by default.
"""
return self.content.get('value', True)
@property
def op(self):
""" Operator used for value comparison. Default is eq. """
return getattr(operator, self.content.get('op', 'eq'))
def _passes(self, apt, property, value):
""" Assert whether the requirement is met.
Returns True if met otherwise False.
"""
if apt:
pkg = apt
result = APTPackageChecksBase(core_pkgs=[pkg]).is_installed(pkg)
log.debug('requirement check: apt %s (result=%s)', pkg, result)
return result
elif property:
result = self.op(self.get_property(property), value)
log.debug('requirement check: property %s %s %s (result=%s)',
property, self.op.__name__, value, result)
return result
log.debug('unknown requirement check')
return False
def _has_groups(self):
if set(self.content.keys()).intersection(['and', 'or']):
return True
return False
def _is_valid_requirement(self, entry):
apt = entry.get('apt')
property = entry.get('property')
if not any([apt, property]):
return False
return True
@property
def passes(self):
"""
Content can either be a single requirement or a list of requirements.
Returns True if any requirement is met.
"""
if not self._has_groups():
log.debug("single requirement provided")
if self._is_valid_requirement(self.content):
return self._passes(self.apt, self._property, self.value)
else:
log.debug("invalid requirement: %s - fail", self.content)
return False
else:
log.debug("requirements provided as groups")
results = {}
# list of requirements
for op, requirements in self.content.items():
if op not in results:
results[op] = []
log.debug("op=%s has %s requirement(s)", op, len(requirements))
for entry in requirements:
if not self._is_valid_requirement(entry):
log.debug("invalid requirement: %s - fail", entry)
_result = False
else:
_result = self._passes(entry.get('apt'),
entry.get('property'),
entry.get('value', True))
results[op].append(_result)
if op == 'or' and any(results[op]):
return True
elif op == 'and' and not all(results[op]):
return False
# Now AND all groups for the final result
final_results = []
for op in results:
if op == 'and':
final_results.append(all(results[op]))
else:
final_results.append(any(results[op]))
return all(final_results)
@ydef_override
class YAMLDefConfig(YAMLDefOverrideBaseX):
KEYS = ['config']
def actual(self, key, section=None):
obj = self.get_cls(self.handler)
if hasattr(self, 'path'):
self.cfg = obj(self.path)
else:
self.cfg = obj()
if section:
actual = self.cfg.get(key, section=section)
else:
actual = self.cfg.get(key)
return actual
def check(self, actual, value, op, allow_unset=False):
if value is not None and actual is None:
if allow_unset:
return True
else:
return False
# Apply the type from the yaml to that of the config
if value is not None and type(value) != type(actual):
actual = type(value)(actual)
if getattr(operator, op)(actual, value):
return True
return False
class YDefsLoader(object):
def __init__(self, ytype):
self.ytype = ytype
def _is_def(self, path):
return path.endswith('.yaml')
def _get_yname(self, path):
return os.path.basename(path).partition('.yaml')[0]
def _get_defs_recursive(self, path):
""" Recursively find all yaml/files beneath a directory. """
defs = {}
for entry in os.listdir(path):
_path = os.path.join(path, entry)
if os.path.isdir(_path):
defs[os.path.basename(_path)] = self._get_defs_recursive(_path)
else:
if not self._is_def(entry):
continue
if self._get_yname(_path) == os.path.basename(path):
with open(_path) as fd:
defs.update(yaml.safe_load(fd.read()) or {})
continue
with open(_path) as fd:
_content = yaml.safe_load(fd.read()) or {}
defs[self._get_yname(_path)] = _content
return defs
@property
def plugin_defs(self):
path = os.path.join(constants.PLUGIN_YAML_DEFS, self.ytype,
constants.PLUGIN_NAME)
if os.path.isdir(path):
return | |
import os
import numpy as np
import h5py
import utils.normalization as normalization
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import random
from scipy.spatial import ConvexHull
import functools
from scipy.spatial import Delaunay
import traceback
import sys
import tensorflow as tf
from tensorflow import keras
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import f1_score
from scipy import stats
from pandas_plink import read_plink
import csv
class data_generator_ae:
'''
Class to generate data for training and evaluation.
If get_data is False then only ind pop list will be generated
'''
def __init__(self,
filebase,
normalization_mode = "smartPCAstyle",
normalization_options= {"flip":False, "missing_val":0.0},
get_genotype_data=True,
impute_missing = True):
'''
:param filebase: path + filename prefix of eigenstrat (eigenstratgeno/snp/fam) or plink (bed/bim/fam) data
:param normalization_mode: how to normalize the genotypes. corresponds to functions in utils/normalization
:param normalization_options: options for normalization
:param get_genotype_data: whether ot not to compute and return genotypes (otherwise just metadata about number of samples, etc. is generated. faster)
:param impute_missing: if true, genotypes that are missing in the original data are set to the most frequent genotype per marker.
'''
self.filebase = filebase
self.missing_val = normalization_options["missing_val"]
self.normalization_mode = normalization_mode
self.normalization_options = normalization_options
self.train_batch_location = 0
self.impute_missing = impute_missing
self._define_samples()
if get_genotype_data:
self._normalize()
def _impute_missing(self, genotypes):
'''
Replace missing values in genotypes with the most frequent value per SNP.
:param genotypes: (n_markers x n_samples) numpy array of genotypes, missing values represented by 9.
'''
for m in genotypes:
m[m == 9.0] = get_most_frequent(m)
def _define_samples(self):
ind_pop_list = get_ind_pop_list(self.filebase)
self.sample_idx_all = np.arange(len(ind_pop_list))
self.sample_idx_train = np.arange(len(ind_pop_list))
self.n_train_samples_orig = len(self.sample_idx_all)
self.n_train_samples = self.n_train_samples_orig
self.ind_pop_list_train_orig = ind_pop_list[self.sample_idx_all]
self.train_set_indices = np.array(range(self.n_train_samples))
self.n_valid_samples = 0
def _sparsify(self, mask, keep_fraction):
'''
Sparsify a mask defining data that is missing / present.
0 represents missing
1 represents present
:param mask: int array (n x m)
:param keep_fraction: probability to keep data
'''
mask[np.random.random_sample(mask.shape) > keep_fraction] = 0
def _normalize(self):
'''
Normalize the genotype data.
'''
ind_pop_list = get_ind_pop_list(self.filebase)
n_samples = len(ind_pop_list)
try:
genotypes = np.genfromtxt(self.filebase + ".eigenstratgeno", delimiter = np.repeat(1, n_samples))
self.n_markers = len(genotypes)
except:
(genotypes, self.n_markers) = genfromplink(self.filebase)
if self.impute_missing:
self._impute_missing(genotypes)
genotypes = np.array(genotypes, order='F') # Cheeky, this will then be transposed, so we have individual-major order
genotypes_train = genotypes[:, self.sample_idx_all]
normalization_method = getattr(normalization, "normalize_genos_"+self.normalization_mode)
genotypes_train_normed, _, scaler = normalization_method(genotypes_train, np.array([]),
get_scaler = True,
**self.normalization_options)
self.scaler = scaler
self.genotypes_train_orig = np.array(genotypes_train_normed, dtype = np.dtype('f4'), order='C')
def get_nonnormalized_data(self):
'''
Get the non-nornalized training data.
Missing data represented by missing_val.
:return: train data (n_samples x n_markers)
'''
ind_pop_list = get_ind_pop_list(self.filebase)
n_samples = len(ind_pop_list)
try:
genotypes = np.genfromtxt(self.filebase + ".eigenstratgeno", delimiter = np.repeat(1, n_samples))
self.n_markers = len(genotypes)
except:
(genotypes, self.n_markers) = genfromplink(self.filebase)
if self.impute_missing:
self._impute_missing(genotypes)
else:
genotypes[genotypes == 9.0] = self.missing_val
genotypes_train = np.array(genotypes[:, self.sample_idx_train].T, order='C')
return genotypes_train
def define_validation_set(self, validation_split):
'''
Define a set of validation samples from original train samples.
Stratified by population.
Re-defines self.sample_idx_train and self.sample_idx_valid
:param validation_split: proportion of samples to reserve for validation set
If validation_split is less samples than there are populations, one sample per population is returned.
'''
# reset index of fetching train batch samples
self.train_batch_location = 0
_, _, self.sample_idx_train, self.sample_idx_valid = get_test_samples_stratified(self.genotypes_train_orig, self.ind_pop_list_train_orig, validation_split)
self.train_set_indices = np.array(range((len(self.sample_idx_train))))
self.n_valid_samples = len(self.sample_idx_valid)
self.n_train_samples = len(self.sample_idx_train)
def get_valid_set(self, sparsify):
'''
:param sparsify:
:return: input_data_valid (n_valid_samples x n_markers x 2): sparsified validation genotypes with mask specifying missing values.
originally missing + removed by sparsify are indicated by value 0
target_data_valid (n_valid_samples x n_markers): original validation genotypes
ind_pop_list valid (n_valid_samples x 2) : individual and population IDs of validation samples
'''
# n_valid_samples x n_markers x 2
input_data_valid = np.full((len(self.sample_idx_valid),self.genotypes_train_orig.shape[1],2),1.0)
genotypes_valid = np.copy(self.genotypes_train_orig[self.sample_idx_valid])
mask_valid = np.full(input_data_valid[:,:,0].shape, 1)
if not self.impute_missing:
# set the ones that are originally missing to 0 in mask (sinc we havent imputed them with RR)
mask_valid[np.where(genotypes_valid == self.missing_val)] = 0
if sparsify > 0.0:
self._sparsify(mask_valid, 1.0 - sparsify)
missing_idx_valid = np.where(mask_valid == 0)
genotypes_valid[missing_idx_valid] = self.missing_val
input_data_valid[:,:,0] = genotypes_valid
input_data_valid[:,:,1] = mask_valid
targets = self.genotypes_train_orig[self.sample_idx_valid]
return input_data_valid, targets, self.ind_pop_list_train_orig[self.sample_idx_valid]
def reset_batch_index(self):
'''
Reset the internal sample counter for batches. So the next batch will start with sample 0.
'''
self.train_batch_location = 0
def shuffle_train_samples(self):
'''
Shuffle the order of the train samples that batches are taken from
'''
p = np.random.permutation(len(self.sample_idx_train))
self.train_set_indices = self.train_set_indices[p]
def _get_indices_looped(self, n_samples):
'''
Get indices of n_samples from the train set. Start over at 0 if reaching the end.
:param n_samples: number of samples to get
:return: indices
'''
if self.train_batch_location + n_samples < len(self.sample_idx_train):
idx = list(range(self.train_batch_location, self.train_batch_location + n_samples, 1))
self.train_batch_location = (self.train_batch_location + n_samples) % len(self.sample_idx_train)
else:
idx = list(range(self.train_batch_location, len(self.sample_idx_train), 1)) + list(range(0, n_samples - (len(self.sample_idx_train)-self.train_batch_location) , 1))
self.train_batch_location = n_samples - (len(self.sample_idx_train)-self.train_batch_location)
return self.train_set_indices[idx]
def get_train_batch(self, sparsify, n_samples_batch):
'''
Get n_samples_batch train samples, with genotypes randomly set to missing with probability sparsify.
Fetch n_samples sequentially starting at index self.train_batch_location, looping over the current train set
If validation set has been defined, return train samples exluding the validation samples.
:param sparsify:
:param n_samples_batch: number of samples in batch
:return: input_data_train_batch (n_samples x n_markers x 2): sparsified genotypes with mask specifying missing values of trai batch.
originally missing + removed by sparsify are indicated by value 0
target_data_train_batch (n_samples x n_markers): original genotypes of this train batch
ind_pop_list_train_batch (n_samples x 2) : individual and population IDs of train batch samples
'''
input_data_train = np.full((n_samples_batch, self.genotypes_train_orig.shape[1], 2), 1.0, dtype=np.dtype('f4'))
indices_this_batch = self._get_indices_looped(n_samples_batch)
genotypes_train = np.copy(self.genotypes_train_orig[self.sample_idx_train[indices_this_batch]])
mask_train = np.full(input_data_train[:,:,0].shape, 1)
if not self.impute_missing:
# set the ones that are originally missing to 0 in mask (sinc we havent imputed them with RR)
mask_train[np.where(genotypes_train == self.missing_val)] = 0
if sparsify > 0.0:
self._sparsify(mask_train, 1.0 - sparsify)
# indices of originally missing data + artifically sparsified data
missing_idx_train = np.where(mask_train == 0)
# fill genotypes with original valid genotypes and sparsify according to binary_mask_train
genotypes_train[missing_idx_train] = self.missing_val
input_data_train[:,:,0] = genotypes_train
input_data_train[:,:,1] = mask_train
targets = self.genotypes_train_orig[self.sample_idx_train[indices_this_batch]]
return input_data_train, targets, self.ind_pop_list_train_orig[self.sample_idx_train[indices_this_batch]]
def get_train_set(self, sparsify):
'''
Get all train samples, with genotypes randomly set to missing with probability sparsify.
Excluding validation samples.
If validation set has been defined, return train samples exluding the validation samples.
:param sparsify: fraction of data to remove
:return: input_data_train (n_train_samples x n_markers x 2): sparsified genotypes with mask specifying missing values of all train samples.
originally missing + removed by sparsify are indicated by value 0
target_data_train (n_train_samples x n_markers): original genotypes of train samples
ind_pop_list_train (n_train_samples x 2) : individual and population IDs of train samples
'''
# n_train_samples x n_markers x 2
input_data_train = np.full((self.n_train_samples, self.genotypes_train_orig[self.sample_idx_train].shape[1], 2), 1.0)
genotypes_train = np.copy(self.genotypes_train_orig[self.sample_idx_train])
# the mask is all ones
mask_train = np.full(input_data_train[:,:,0].shape, 1)
if not self.impute_missing:
# set the ones that are originally missing to 0 in mask (sinc we havent imputed them with RR)
mask_train[np.where(genotypes_train == self.missing_val)] = 0
if sparsify > 0.0:
self._sparsify(mask_train, 1.0 - sparsify)
# indices of originally missing data + artifically sparsified data
missing_idx_train = np.where(mask_train == 0)
# fill genotypes with original valid genotypes and sparsify according to binary_mask_train
genotypes_train[missing_idx_train] = self.missing_val
input_data_train[:,:,0] = genotypes_train
input_data_train[:,:,1] = mask_train
print("In DG.get_train_set: number of "+str(self.missing_val)+" genotypes in train: " + str( len(np.where(input_data_train[:,:,0] == self.missing_val)[0])))
print("In DG.get_train_set: number of -9 genotypes in train: " + str( len(np.where(input_data_train[:,:,0] == -9)[0])))
print("In DG.get_train_set: number of 0 values in train mask: " + str( len(np.where(input_data_train[:,:,1] == 0)[0])))
targets = self.genotypes_train_orig[self.sample_idx_train]
return input_data_train, targets, self.ind_pop_list_train_orig[self.sample_idx_train]
def in_hull(p, hull):
"""
from https://stackoverflow.com/questions/16750618/whats-an-efficient-way-to-find-if-a-point-lies-in-the-convex-hull-of-a-point-cl
Test if points in `p` are in `hull`
`p` should be a `NxK` coordinates of `N` points in `K` dimensions
`hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the
coordinates of `M` points in `K`dimensions for which Delaunay triangulation
will be computed
"""
if not isinstance(hull,Delaunay):
hull = Delaunay(hull)
return hull.find_simplex(p, bruteforce=True)>=0
def convex_hull_error(coords_by_pop, plot = False, min_points_required = 3):
'''
Calculate the hull error of projected coordinates of the populations defined by coords_by_pop
For every population: calculate the fraction that other population's points make up of all the points inside THIS population's convex hull
Return the average of this over populations.
:param coords_by_pop: dict mapping population to list of coordinates (n_samples x n_dim)
:param min_points_required: min number of points needed to define a simplex for the convex hull.
:return: the hull error
'''
all_pop_coords = functools.reduce(lambda a,b : a+b, [coords_by_pop[pop] for pop in coords_by_pop.keys()])
num_pops = len(coords_by_pop.keys())
frac_sum = 0.0
try:
num_pops = len(list(coords_by_pop.keys()))
for pop in coords_by_pop.keys():
other_pops = list(coords_by_pop.keys())
assert len(other_pops) == num_pops
other_pops.remove(pop)
this_pop_coords = np.array(coords_by_pop[pop])
num_points_in_pop = float(len(this_pop_coords))
other_pop_coords = np.concatenate([np.array(coords_by_pop[popu]) for popu in other_pops])
assert len(other_pop_coords) + num_points_in_pop == len(all_pop_coords), "Number of inds does not match up!"
if num_points_in_pop >= min_points_required:
num_other_points_in_hull = sum(in_hull(other_pop_coords, this_pop_coords))
frac_other_pop_points_in_hull = num_other_points_in_hull / (num_points_in_pop + num_other_points_in_hull)
frac_sum += frac_other_pop_points_in_hull
if plot:
hull = ConvexHull(this_pop_coords)
plt.scatter(this_pop_coords[:,0], this_pop_coords[:,1], color="red")
plt.scatter(other_pop_coords[:,0], other_pop_coords[:,1], color="black", alpha=0.8)
for simplex in hull.simplices:
plt.plot(this_pop_coords[simplex, 0], this_pop_coords[simplex, 1], 'k-')
plt.show()
plt.close()
else:
print("Too few for hull: " + str(pop) )
if num_points_in_pop < 3:
print("-- shape: " + str(this_pop_coords.shape) + ": skipping")
continue
elif num_points_in_pop == 3:
n_dim = int(num_points_in_pop) - 1
else:
n_dim = int(num_points_in_pop) - 2
print("-- shape: " + str(this_pop_coords.shape) + ": using " + str(n_dim) + " dimensions instead")
num_other_points_in_hull = sum(in_hull(other_pop_coords[:,0:n_dim], this_pop_coords[:,0:n_dim]))
frac_other_pop_points_in_hull = num_other_points_in_hull / (num_points_in_pop + num_other_points_in_hull)
frac_sum += frac_other_pop_points_in_hull
hull_error = frac_sum / num_pops
except Exception as e:
print("Exception in calculating hull error: {0}".format(e))
traceback.print_exc(file=sys.stdout)
hull_error = -1.0
return hull_error
def get_baseline_gc(genotypes):
'''
Get the genotype concordance of guessing the most frequent genotype per SNP for every sample.
:param genotypes: n_samples x n_genos array of genotypes coded as 0,1,2
:return: genotype concordance value
'''
n_samples = float(genotypes.shape[0])
modes = stats.mode(genotypes)
most_common_genos = modes.mode
counts = modes.count
# num with most | |
k in (self.dyn_values[aname].dependencies or []):
self.debug("In update_dependencies(%s): "
" (%s,last read at %s, KeepTime is %s)"
%(aname,k,self._last_read.get(k,0),self.KeepTime))
old = self._locals.get(k)
updated = self.dyn_values[k].updated
if self.KeepTime and (
not updated or now>(updated+(self.KeepTime/1e3))):
self.debug("In update_dependencies(%s): read value"%(k,))
if USE_STATIC_METHODS:
self.read_dyn_attr(self,tango.fakeAttributeValue(k))
else:
self.read_dyn_attr(tango.fakeAttributeValue(k))
v = self.dyn_values[k]
if (k.lower().strip()!=aname.lower().strip()
and isinstance(v.value,Exception)):
self.warning('evalAttr(%s): An exception is rethrowed '
'from attribute %s'%(aname,k))
print(k,aname,v.value)
#Exceptions are passed to dependent attributes
raise RethrownException(v.value)
else:
self._locals[k]=v.value #.value
try:
if self._locals[k] != old:
changed = True
except:
changed = True
return changed
#@Catched #Catched decorator is not compatible with PyTango_Throw_Exception
@self_locked
def read_dyn_attr(self,attr,fire_event=True):
"""
Method to evaluate attributes from external clients.
Internally, evalAttr is used instead, triggering push_event if needed
"""
#if not USE_STATIC_METHODS: self = self.myClass.DynDev
attr = tango.fakeAttributeValue(attr) if isString(attr) else attr
aname = self.get_attr_name(attr.get_name())
result = None
tstart=time.time()
self.debug("DynamicDS(%s)::read_dyn_atr(%s), entering at %s\n%s"
% (self.get_name(),aname,time2str(tstart),'<'*80))
v = self.get_attr_cache(aname)
if v is not None:
self.debug('Returning cached (%s) value for %s: %s(%s)'%(
time2str(v.updated),aname,type(v.value),shortstr(v.value)))
return attr.set_value_date_quality(v.value,v.date,v.quality)
try:
self.debug("DynamicDS(%s)::read_dyn_atr(%s) => evalAttr()"
% (self.get_name(),aname))
result = self.evalAttr(aname) #push is done here
quality = getattr(result,'quality',
self.get_attr_quality(aname,result))
date = self.get_attr_date(aname,result)
result = self.dyn_types[aname].pytype(result)
attr.set_value_date_quality(result,date,quality)
text_result = (type(result) is list and result and '%s[%s]'
%(type(result[0]),len(result))) or str(result)
now=time.time()
self._last_period[aname]=now-self._last_read.get(aname,0)
self._last_read[aname]=now
self._read_times[aname]=now-self._hook_epoch
self._total_usage += now-self._hook_epoch
self.debug('DynamicDS('+self.get_name()+
").read_dyn_attr("+aname+")="+text_result+
", ellapsed %1.2e"%(self._eval_times[aname])+" seconds.\n")
if 'debug' in str(self.getLogLevel()) and \
(time.time()>(self._cycle_start+self.PollingCycle*1e-3) \
if hasattr(self,'PollingCycle') \
else aname==sorted(self.dyn_values.keys())[-1]):
self.attribute_polling_report()
except Exception, e:
now=time.time()
self.dyn_values[aname].update(e,now,AttrQuality.ATTR_INVALID) #Exceptions always kept!
self._last_period[aname]=now-self._last_read.get(aname,0)
self._last_read[aname]=now
self._read_times[aname]=now-self._hook_epoch #Internal debugging
self._eval_times[aname]=now-tstart #Internal debugging
if aname==self.dyn_values.keys()[-1]: self._cycle_start = now
last_exc = str(e)
self.error('DynamicDS_read_%s_Exception: %s\n\tresult=%s'
% (aname,last_exc,result))
if not isinstance(e,RethrownException):
print(traceback.format_exc())
raise Exception('DynamicDS_read_%s_Exception: %s' % (aname,last_exc))
##This hook has been used to force self to be passed always as argument and avoid dynattr missmatching
if USE_STATIC_METHODS: read_dyn_attr=staticmethod(read_dyn_attr)
#@Catched
@self_locked
def write_dyn_attr(self,attr,fire_event=True):
aname = attr.get_name()
self.info("DynamicDS("+self.get_name()+")::write_dyn_atr("+aname+"), entering at "+time.ctime()+"...")
#THIS CHANGE MUST BE TESTED AGAINST PYTANGO7!!!! For Scalar/Spectrum/Image R/W Attrs!!
try: #PyTango8
data = attr.get_write_value()
except:
data = []
attr.get_write_value(data)
if fun.isSequence(data) and self.dyn_types[aname].dimx==1:
data = data[0]
elif self.dyn_types[aname].dimy!=1:
x = attr.get_max_dim_x()
data = [data[i:i+x] for i in range(len(data))[::x]]
self.setAttr(aname,data)
#self.dyn_values[aname].update(result,time.time(),PyTango.AttrQuality.ATTR_VALID)
##if fire_event: self.fireAttrEvent(aname,data)
##This hook has been used to force self to be passed always as argument and avoid dynattr missmatching
if USE_STATIC_METHODS: write_dyn_attr=staticmethod(write_dyn_attr)
def push_dyn_attr(self,aname,value=None,date=None,quality=None,
events=None,changed=None,queued=False):
try:
aname = self.get_attr_name(aname)
queued = queued and self.MaxEventStream
if fun.clmatch('state$',aname):
aname = 'State'
events,changed = True,True
value = value if value is not None else self.get_state()
date,quality = time.time(),AttrQuality.ATTR_VALID
t = self.dyn_values.get(aname,DynamicAttribute())
value = notNone(value,t.value)
date = notNone(date,fun.now())
quality = notNone(quality,t.quality)
if events is None:
# That call parses the contents of UseEvents property
events = self.check_attribute_events(aname)
if changed is None:
changed = self.check_changed_event(aname,value,events)
if not events or not changed:
return 'nothing to do'
self.info('push_dyn_attr(%s,%s)=%s(%s))\n%s'%(aname,
queued and 'queued' or 'pushed',type(value),
shortstr(value),'<'*80))
if queued:
try:
self._events_lock.acquire()
self._events_queue.put((aname,value,date,quality,events))
finally:
self._events_lock.release()
return 'queued'
else:
if aname.lower() in ('state','status'):
self.push_change_event(aname)
else:
self.push_change_event(aname,value,date,quality)
if fun.clsearch('archive',events):
if aname.lower() in ('state','status'):
self.push_archive_event(aname)
else:
self.push_archive_event(aname,value,date,quality)
return 'pushed'
except Exception as e:
self.error('push_dyn_attr(%s,%s(%s),%s,%s) failed!\n%s' %
(aname,type(value),value,date,quality,traceback.format_exc()))
#--------------------------------------------------------------------------
# Attributes and State Evaluation Methods
#--------------------------------------------------------------------------
## DYNAMIC ATTRIBUTE EVALUATION ...
# Copy it to your device and add any method you will need
def evalAttr(self,aname,WRITE=False,VALUE=None,_locals=None, push=False):
'''
SPECIFIC METHODS DEFINITION DONE IN self._locals!!!
@remark Generators don't work inside eval!, use lists instead
If push=True, any result is considered as change
'''
aname,formula = self.get_attr_name(aname),''
self.debug("DynamicDS(%s)::evalAttr(%s,%s): ... last value was %s"
% (self.get_name(), aname, push, shortstr(
getattr(self.dyn_values.get(aname,None),'value',None))))
tstart = time.time()
try:
aname,formula,compiled = self.get_attr_formula(aname,full=True)
##Checking attribute dependencies
# dependencies assigned at dyn_attr by self.check_dependencies()
deps = False
if (self.CheckDependencies and aname in self.dyn_values and
self.dyn_values[aname].dependencies):
deps = self.update_dependencies(aname)
else:
self.debug("In evalAttr ... updating locals from dyn_values")
for k,v in self.dyn_values.items():
if v.keep and k in formula:
self._locals[k]=v.value
cache = self.get_attr_cache(aname) if (
not WRITE and not push and not deps) else None
if cache is not None:
self.debug('Returning cached (%s) value for %s: %s(%s)'
%(fun.time2str(cache.date),aname,type(cache.value),
shortstr(cache.value)))
return cache.value
if formula in self.Lambdas:
f = self.Lambdas[formula]
self.info("In evalAttr(push=%s) ... using Lambdas[%s] = %s"
% (push,formula,f))
if fun.isString(f):
f = self._locals.get(f,self.__getattr__(f,None))
result = (f() if fun.isCallable(f) else f)
else:
self.debug("In evalAttr ... updating locals defaults")
try:
self._locals.update({
't':time.time()-self.time0,
'WRITE':WRITE,
'READ':bool(not WRITE),
'ATTRIBUTE':aname,
'NAME':self.get_name(),
'VALUE':(VALUE if VALUE is None or aname not in self.dyn_types
else self.dyn_types[aname].pytype(VALUE)),
'STATE':self.get_state(),
'LOCALS':self._locals,
'ATTRIBUTES':sorted(self.dyn_values.keys()),
'FORMULAS':dict((k,v.formula) for k,v in self.dyn_values.items()),
'XATTRS':self._external_attributes,
}) #It is important to keep this values persistent; becoming available for quality/date/state/status management
# Redundant but needed
[self._locals.__setitem__(str(quality),quality) for quality in AttrQuality.values.values()]
[self._locals.__setitem__(k,dst.pytype) for k,dst in DynamicDSTypes.items()]
#Adding states for convenience evaluation
#self.TangoStates = dict((str(v),v) for k,v in PyTango.DevState.values.items())
#self._locals.update(self.TangoStates)
if _locals is not None:
#High Priority: variables passed as argument
self._locals.update(_locals)
except Exception,e:
self.error('<'*80)
self.error(traceback.format_exc())
for t in (
VALUE,type(VALUE),aname,self.dyn_types.get(aname,None),aname
in self.dyn_types and self.dyn_types[aname].pytype):
self.warning(str(t))
self.error('<'*80)
raise e
if WRITE:
self.debug('%s::evalAttr(WRITE): Attribute=%s; formula=%s; VALUE=%s'%(self.get_name(),aname,formula,shortstr(VALUE)))
elif aname in self.dyn_values:
self.debug('%s::evalAttr(READ): Attribute=%s; formula=%s;'%(self.get_name(),aname,formula,))
else:
self.info('%s::evalAttr(COMMAND): formula=%s;'%(self.get_name(),formula,))
###################################################################
result = eval(compiled or formula,self._globals,self._locals)
###################################################################
self.debug('eval result: '+str(result))
if aname not in self.dyn_values:
return result
elif WRITE:
if self.ReadOnWrite:
self.evalAttr(aname,WRITE=False,_locals=_locals,push=push)
return result
###################################################################
#Push/Keep Read Attributes
quality = self.get_attr_quality(aname,result)
if hasattr(result,'quality'):
result.quality = quality
date = self.get_attr_date(aname,result)
value = self.dyn_types[aname].pytype(result)
#UseEvents must be checked before updating the cache
events = self.check_attribute_events(aname)
check = events and (
push or self.check_changed_event(aname,result,events))
et = 1e3*(fn.now()-tstart)
cached = events or self.dyn_values[aname].keep
(self.debug if et < 1. else self.warning)(
'evalAttr(%s): events = %s, check = %s, cached = %s, '
'eval_ms = %d' % (aname,events,check,cached,et))
if events and check:
self.push_dyn_attr(aname,value=value,
events=events,changed=1,queued=1)
#Updating the cache
if cached:
old = self.dyn_values[aname].value
self.dyn_values[aname].update(value,date,quality)
self._locals[aname] = value
self.debug('evalAttr(%s):Value kept for reuse' % (aname,))
#Updating state if needed:
try:
if old!=value and self.dyn_values.get(aname).states_queue:
self.check_state()
except:
self.warning('Unable to check state!')
self.warning(traceback.format_exc())
return result
except PyTango.DevFailed, e:
if self.trace:
print('-'*80)
print('\n'.join(['DynamicDS_evalAttr(%s)_WrongFormulaException:'%aname,'\t"%s"'%(formula,),str(traceback.format_exc())]))
print('\n'.join([str(e.args[0])]) + '\n'+'*'*80)
print('-'*80)
err = e.args[0]
self.error(e)
raise e #Exception,';'.join([err.origin,err.reason,err.desc])
except Exception,e:
if self.last_attr_exception and self.last_attr_exception[0]>tstart:
e = self.last_attr_exception[-1]
if 1:
print('\n'.join(['DynamicDS_evalAttr_WrongFormulaException','%s is not a valid expression!'%(formula,),]))
s = traceback.format_exc()
#self.error(s)
print(s)
raise e #Exception(s)
finally:
self._eval_times[aname] = fun.now()-tstart
self._locals['ATTRIBUTE'] = ''
def evalCommand(self,cmd,argin=None):
"""This method will execute a command declared using DynamicCommands property"""
k = cmd if '/' in cmd else self.get_name()+'/'+cmd
assert k in self.dyn_comms.keys(),('%s command not declared in properties!'%(k,))
return self.evalAttr(self.dyn_comms[k],_locals={'ARGS':argin})
# DYNAMIC STATE EVALUATION
def evalState(self,formula,_locals={}):
"""
Overloading the eval method to be used to evaluate State expressions
... To customize it: copy it to your device and add any method you will need
@remark Generators don't work inside eval!, use lists instead
The main difference with evalAttr is that evalState will not Check Dependencies nor push events
"""
self.debug('DynamicDS.evalState/evaluateFormula(%s)'%(isinstance(formula,str) and formula or 'code'))
#MODIFIIED!! to use pure DynamicAttributes
#Attr = lambda a: self.dyn_values[a].value
if formula in self.Lambdas:
self.info('DynamicDS.evalState: using Lambdas')
f = self.Lambdas[formula]
return f() if fun.isCallable(f) else f
t = time.time()-self.time0
for k,v in self.dyn_values.items(): self._locals[k]=v#.value #Updating Last Attribute Values
__locals = {}#__locals=locals().copy() #Low priority: local variables
__locals.update(self._locals) #Second priority: object statements
__locals.update(_locals) #High Priority: variables passed as argument
__locals.update(
{'STATE':self.get_state(),'t':time.time()-self.time0,'NAME': self.get_name(),
'ATTRIBUTES':sorted(self.dyn_values.keys()),#'ATTRIBUTES':dict((a,getattr(self.dyn_values[a],'value',None)) for a in self.dyn_values if a in self._locals),
'FORMULAS':dict((k,v.formula) for k,v in self.dyn_values.items()),
'WRITE':False,
'VALUE':None,
})
return eval(formula,self._globals,__locals)
def rawState(self):
self.debug('In DynamicDS.rawState(), overriding attribute-based State.')
state = self.get_state()
self.debug('In DynamicDS.State()='+str(state))
return state
def check_state(self,set_state=True,current=None):
''' The thread automatically close if there's no activity for 5 minutes,
an always_executed_hook call or a new event will restart the thread.
'''
new_state = self.get_state()
try:
if self.state_lock.locked():
self.debug('In DynamicDS.check_state(): lock already acquired')
return new_state
self.state_lock.acquire()
if self.dyn_states:
self.info('In DynamicDS.check_state()')
old_state = new_state if current is None else current
## @remarks: the device state is not changed | |
(data) = self.dettach_pickup_with_http_info(booking_id, **kwargs)
return data
def dettach_pickup_with_http_info(self, booking_id, **kwargs):
"""
Dettach a pickup location for a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.dettach_pickup_with_http_info(booking_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int id:
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id', 'id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method dettach_pickup" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `dettach_pickup`")
resource_path = '/booking/remove-pickup'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
if 'id' in params:
query_params['id'] = params['id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2003',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def edit_booking_info(self, **kwargs):
"""
Edit the information related to a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.edit_booking_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id:
:param float discount:
:param str comment:
:return: InlineResponse20014
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.edit_booking_info_with_http_info(**kwargs)
else:
(data) = self.edit_booking_info_with_http_info(**kwargs)
return data
def edit_booking_info_with_http_info(self, **kwargs):
"""
Edit the information related to a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.edit_booking_info_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id:
:param float discount:
:param str comment:
:return: InlineResponse20014
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id', 'discount', 'comment']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_booking_info" % key
)
params[key] = val
del params['kwargs']
resource_path = '/booking/edit-info'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
if 'discount' in params:
query_params['discount'] = params['discount']
if 'comment' in params:
query_params['comment'] = params['comment']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20014',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def filter_bookings(self, **kwargs):
"""
Get all bookings matching a filter
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.filter_bookings(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str reference:
:param date date:
:param str lastname:
:return: InlineResponse20013
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.filter_bookings_with_http_info(**kwargs)
else:
(data) = self.filter_bookings_with_http_info(**kwargs)
return data
def filter_bookings_with_http_info(self, **kwargs):
"""
Get all bookings matching a filter
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.filter_bookings_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str reference:
:param date date:
:param str lastname:
:return: InlineResponse20013
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['reference', 'date', 'lastname']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method filter_bookings" % key
)
params[key] = val
del params['kwargs']
resource_path = '/booking/filter'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'reference' in params:
query_params['reference'] = params['reference']
if 'date' in params:
query_params['date'] = params['date']
if 'lastname' in params:
query_params['lastname'] = params['lastname']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20013',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_bookings(self, **kwargs):
"""
Retrieve all bookings
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_bookings(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Booking]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_bookings_with_http_info(**kwargs)
else:
(data) = self.get_all_bookings_with_http_info(**kwargs)
return data
def get_all_bookings_with_http_info(self, **kwargs):
"""
Retrieve all bookings
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_bookings_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Booking]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_bookings" % key
)
params[key] = val
del params['kwargs']
resource_path = '/booking/all'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Booking]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_with_trashed_bookings(self, **kwargs):
"""
Retrieve all bookings including any deleted models
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_with_trashed_bookings(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Booking]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_with_trashed_bookings_with_http_info(**kwargs)
else:
(data) = self.get_all_with_trashed_bookings_with_http_info(**kwargs)
return data
def get_all_with_trashed_bookings_with_http_info(self, **kwargs):
"""
Retrieve all bookings including any deleted models
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_with_trashed_bookings_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Booking]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_with_trashed_bookings" % key
)
params[key] = val
del params['kwargs']
resource_path | |
<gh_stars>0
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Module for interfacing with an IBMQ Backend."""
import logging
import warnings
from typing import Dict, List, Union, Optional, Any
from datetime import datetime # pylint: disable=unused-import
from marshmallow import ValidationError
from qiskit.qobj import Qobj
from qiskit.providers import BaseBackend, JobStatus
from qiskit.providers.models import (BackendStatus, BackendProperties,
PulseDefaults, BackendConfiguration)
from .api import ApiError, IBMQConnector
from .api_v2.clients import BaseClient, AccountClient
from .apiconstants import ApiJobStatus, ApiJobKind
from .credentials import Credentials
from .exceptions import IBMQBackendError, IBMQBackendValueError
from .job import IBMQJob
from .utils import update_qobj_config
logger = logging.getLogger(__name__)
class IBMQBackend(BaseBackend):
"""Backend class interfacing with an IBMQ backend."""
def __init__(
self,
configuration: BackendConfiguration,
provider,
credentials: Credentials,
api: Union[AccountClient, IBMQConnector]
) -> None:
"""Initialize remote backend for IBM Quantum Experience.
Args:
configuration (BackendConfiguration): configuration of backend.
provider (IBMQProvider): provider.
credentials (Credentials): credentials.
api (Union[AccountClient, IBMQConnector]):
api for communicating with the Quantum Experience.
"""
super().__init__(provider=provider, configuration=configuration)
self._api = api
self._credentials = credentials
self.hub = credentials.hub
self.group = credentials.group
self.project = credentials.project
# Attributes used by caching functions.
self._properties = None
self._defaults = None
def run(self, qobj: Qobj, job_name: Optional[str] = None) -> IBMQJob:
"""Run a Qobj asynchronously.
Args:
qobj (Qobj): description of job.
job_name (str): custom name to be assigned to the job. This job
name can subsequently be used as a filter in the
``jobs()`` function call. Job names do not need to be unique.
This parameter is ignored if IBM Q Experience v1 account is used.
Returns:
IBMQJob: an instance derived from BaseJob
"""
# pylint: disable=arguments-differ
kwargs = {}
if isinstance(self._api, BaseClient):
# Default to using object storage and websockets for new API.
kwargs = {'use_object_storage': True,
'use_websockets': True}
job = IBMQJob(self, None, self._api, qobj=qobj, **kwargs)
job.submit(job_name=job_name)
return job
def properties(
self,
refresh: bool = False,
datetime: Optional[datetime] = None # pylint: disable=redefined-outer-name
) -> Optional[BackendProperties]:
"""Return the online backend properties with optional filtering.
Args:
refresh (bool): if True, the return is via a QX API call.
Otherwise, a cached version is returned.
datetime (datetime.datetime): by specifying a datetime,
this function returns an instance of the BackendProperties whose
timestamp is closest to, but older than, the specified datetime.
Returns:
BackendProperties: The properties of the backend. If the backend has
no properties to display, it returns ``None``.
"""
# pylint: disable=arguments-differ
if datetime:
if not isinstance(self._api, BaseClient):
warnings.warn('Retrieving the properties of a '
'backend in a specific datetime is '
'only available when using IBM Q v2')
return None
# Do not use cache for specific datetime properties.
api_properties = self._api.backend_properties(self.name(), datetime=datetime)
if not api_properties:
return None
return BackendProperties.from_dict(api_properties)
if refresh or self._properties is None:
api_properties = self._api.backend_properties(self.name())
self._properties = BackendProperties.from_dict(api_properties)
return self._properties
def status(self) -> BackendStatus:
"""Return the online backend status.
Returns:
BackendStatus: The status of the backend.
Raises:
LookupError: If status for the backend can't be found.
IBMQBackendError: If the status can't be formatted properly.
"""
api_status = self._api.backend_status(self.name())
try:
return BackendStatus.from_dict(api_status)
except ValidationError as ex:
raise LookupError(
"Couldn't get backend status: {0}".format(ex))
def defaults(self, refresh: bool = False) -> Optional[PulseDefaults]:
"""Return the pulse defaults for the backend.
Args:
refresh (bool): if True, the return is via a QX API call.
Otherwise, a cached version is returned.
Returns:
PulseDefaults: the pulse defaults for the backend. If the backend
does not support defaults, it returns ``None``.
"""
if not self.configuration().open_pulse:
return None
if refresh or self._defaults is None:
api_defaults = self._api.backend_defaults(self.name())
if api_defaults:
self._defaults = PulseDefaults.from_dict(api_defaults)
else:
self._defaults = None
return self._defaults
def jobs(
self,
limit: int = 10,
skip: int = 0,
status: Optional[Union[JobStatus, str]] = None,
job_name: Optional[str] = None,
db_filter: Optional[Dict[str, Any]] = None
) -> List[IBMQJob]:
"""Return the jobs submitted to this backend.
Return the jobs submitted to this backend, with optional filtering and
pagination. Note that the API has a limit for the number of jobs
returned in a single call, and this function might involve making
several calls to the API. See also the `skip` parameter for more control
over pagination.
Note that jobs submitted with earlier versions of Qiskit
(in particular, those that predate the Qobj format) are not included
in the returned list.
Args:
limit (int): number of jobs to retrieve.
skip (int): starting index for the job retrieval.
status (None or qiskit.providers.JobStatus or str): only get jobs
with this status, where status is e.g. `JobStatus.RUNNING` or
`'RUNNING'`
job_name (str): only get jobs with this job name.
db_filter (dict): `loopback-based filter
<https://loopback.io/doc/en/lb2/Querying-data.html>`_.
This is an interface to a database ``where`` filter. Some
examples of its usage are:
Filter last five jobs with errors::
job_list = backend.jobs(limit=5, status=JobStatus.ERROR)
Filter last five jobs with counts=1024, and counts for
states ``00`` and ``11`` each exceeding 400::
cnts_filter = {'shots': 1024,
'qasms.result.data.counts.00': {'gt': 400},
'qasms.result.data.counts.11': {'gt': 400}}
job_list = backend.jobs(limit=5, db_filter=cnts_filter)
Filter last five jobs from 30 days ago::
past_date = datetime.datetime.now() - datetime.timedelta(days=30)
date_filter = {'creationDate': {'lt': past_date.isoformat()}}
job_list = backend.jobs(limit=5, db_filter=date_filter)
Returns:
list(IBMQJob): list of IBMQJob instances
Raises:
IBMQBackendValueError: status keyword value unrecognized
"""
# Build the filter for the query.
backend_name = self.name()
api_filter = {'backend.name': backend_name}
if status:
if isinstance(status, str):
status = JobStatus[status]
if status == JobStatus.RUNNING:
this_filter = {'status': ApiJobStatus.RUNNING.value,
'infoQueue': {'exists': False}}
elif status == JobStatus.QUEUED:
this_filter = {'status': ApiJobStatus.RUNNING.value,
'infoQueue.status': 'PENDING_IN_QUEUE'}
elif status == JobStatus.CANCELLED:
this_filter = {'status': ApiJobStatus.CANCELLED.value}
elif status == JobStatus.DONE:
this_filter = {'status': ApiJobStatus.COMPLETED.value}
elif status == JobStatus.ERROR:
this_filter = {'status': {'regexp': '^ERROR'}}
else:
raise IBMQBackendValueError('unrecognized value for "status" keyword '
'in job filter')
api_filter.update(this_filter)
if job_name:
api_filter['name'] = job_name
if db_filter:
# status takes precedence over db_filter for same keys
api_filter = {**db_filter, **api_filter}
# Retrieve the requested number of jobs, using pagination. The API
# might limit the number of jobs per request.
job_responses = []
current_page_limit = limit
while True:
job_page = self._api.get_status_jobs(limit=current_page_limit,
skip=skip, filter=api_filter)
job_responses += job_page
skip = skip + len(job_page)
if not job_page:
# Stop if there are no more jobs returned by the API.
break
if limit:
if len(job_responses) >= limit:
# Stop if we have reached the limit.
break
current_page_limit = limit - len(job_responses)
else:
current_page_limit = 0
job_list = []
for job_info in job_responses:
kwargs = {}
try:
job_kind = ApiJobKind(job_info.get('kind', None))
except ValueError:
# Discard pre-qobj jobs.
break
if isinstance(self._api, BaseClient):
# Default to using websockets for new API.
kwargs['use_websockets'] = True
if job_kind == ApiJobKind.QOBJECT_STORAGE:
kwargs['use_object_storage'] = True
job = IBMQJob(self, job_info.get('id'), self._api,
creation_date=job_info.get('creationDate'),
api_status=job_info.get('status'),
**kwargs)
job_list.append(job)
return job_list
def retrieve_job(self, job_id: str) -> IBMQJob:
"""Return a job submitted to this backend.
Args:
job_id (str): the job id of the job to retrieve
Returns:
IBMQJob: class instance
Raises:
IBMQBackendError: if retrieval failed
"""
try:
job_info = self._api.get_job(job_id)
# Check for generic errors.
if 'error' in job_info:
raise IBMQBackendError('Failed to get job "{}": {}'
.format(job_id, job_info['error']))
# Check for jobs from a different backend.
job_backend_name = job_info['backend']['name']
if job_backend_name != self.name():
warnings.warn('Job "{}" belongs to another backend than the one queried. '
'The query was made on backend "{}", '
'but the job actually belongs to backend "{}".'
.format(job_id, self.name(), job_backend_name))
raise IBMQBackendError('Failed to get job "{}": '
'job does not belong to backend "{}".'
.format(job_id, self.name()))
# Check for pre-qobj jobs.
kwargs = {}
try:
job_kind = ApiJobKind(job_info.get('kind', None))
if isinstance(self._api, BaseClient):
# Default to using websockets for new API.
kwargs['use_websockets'] = True
if job_kind == ApiJobKind.QOBJECT_STORAGE:
kwargs['use_object_storage'] = True
except ValueError:
warnings.warn('The result of job {} is in a no longer supported format. '
'Please send the job using Qiskit 0.8+.'.format(job_id),
DeprecationWarning)
raise IBMQBackendError('Failed to get job "{}": {}'
| |
<filename>applications/connection_search/aciConSearch_test.py
################################################################################
# #
# Copyright (c) 2015 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
"""
Search test
"""
import unittest
from acitoolkit import BridgeDomain, Filter, ContractInterface
from aciConSearch import *
import radix
import copy
LIVE_TEST = False
def get_tree():
"""
Will build an object tree with attributes in each object
:return:
"""
tenant = Tenant('tenant')
tenant.dn = '/tn-tenant'
app1 = AppProfile('app1', tenant)
app1.dn = app1.get_parent().dn + '/app-app1'
app2 = AppProfile('app2', tenant)
app2.dn = app2.get_parent().dn + '/app-app2'
epg11 = EPG('epg11', app1)
epg11.dn = epg11.get_parent().dn + '/epg-epg11'
for index in range(1, 5):
ep = Endpoint('endpoint_' + str(index), epg11)
ep.ip = '192.168.11.' + str(index)
epg12 = EPG('epg12', app1)
epg12.dn = epg12.get_parent().dn + '/epg-epg12'
for index in range(1, 5):
ep = Endpoint('endpoint_' + str(index), epg12)
ep.ip = '192.168.12.' + str(index)
epg21 = EPG('epg21', app2)
epg21.dn = epg21.get_parent().dn + '/epg-epg21'
for index in range(1, 5):
ep = Endpoint('endpoint_' + str(index), epg21)
ep.ip = '192.168.21.' + str(index)
epg22 = EPG('epg22', app2)
epg22.dn = epg22.get_parent().dn + '/epg-epg22'
for index in range(1, 5):
ep = Endpoint('endpoint_' + str(index), epg22)
ep.ip = '192.168.22.' + str(index)
bd1 = BridgeDomain('bd1', tenant)
bd1.dn = bd1.get_parent().dn + '/bd-bd1'
bd2 = BridgeDomain('bd2', tenant)
bd2.dn = bd2.get_parent().dn + '/bd-bd2'
epg11.add_bd(bd1)
epg12.add_bd(bd2)
epg21.add_bd(bd1)
epg22.add_bd(bd2)
context = Context('ctx', tenant)
context.dn = context.get_parent().dn + '/ctx-ctx'
bd1.add_context(context)
bd2.add_context(context)
outside_l3 = OutsideL3('out_l3_1', tenant)
outside_l3.add_context(context)
outside_epg_1 = OutsideEPG('out_epg_1', outside_l3)
outside_epg_2 = OutsideEPG('out_epg_2', outside_l3)
outside_epg_3 = OutsideEPG('out_epg_3', outside_l3)
subnet_11 = OutsideNetwork('subnet_11', outside_epg_1)
subnet_11.set_addr('10.10.1.0/24')
subnet_12 = OutsideNetwork('subnet_12', outside_epg_1)
subnet_12.set_addr('10.10.2.0/24')
subnet_13 = OutsideNetwork('subnet_13', outside_epg_1)
subnet_13.set_addr('10.10.1.0/16')
subnet_21 = OutsideNetwork('subnet_21', outside_epg_2)
subnet_21.set_addr('10.21.2.1/32')
subnet_22 = OutsideNetwork('subnet_22', outside_epg_2)
subnet_22.set_addr('10.22.2.1/32')
subnet_23 = OutsideNetwork('subnet_23', outside_epg_2)
subnet_23.set_addr('10.23.2.1/32')
subnet_31 = OutsideNetwork('subnet_31', outside_epg_3)
subnet_31.set_addr('10.30.2.1/32')
subnet_32 = OutsideNetwork('subnet_32', outside_epg_3)
subnet_32.set_addr('10.30.3.1/24')
subnet_33 = OutsideNetwork('subnet_33', outside_epg_3)
subnet_33.set_addr('10.30.2.1/25')
contract1 = Contract('contract-1', tenant)
contract1.dn = contract1.get_parent().dn + '/con-contract1'
entry1 = FilterEntry('entry1',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='80',
dToPort='80',
etherT='ip',
prot='tcp',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract1)
subjects = contract1.get_children(ContractSubject)
for subject in subjects:
subject.dn = subject.get_parent().dn + '/subj-' + subject.name
filters = tenant.get_children(Filter)
for atk_filter in filters:
atk_filter.dn = atk_filter.get_parent().dn + '/flt-' + atk_filter.name
entry1.dn = entry1.get_parent().dn + '/flte-entry1'
contract2 = Contract('contract-2', tenant)
contract3 = Contract('contract-3', tenant)
contract4 = Contract('contract-4', tenant)
entry2 = FilterEntry('entry2',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='443',
dToPort='443',
etherT='ip',
prot='tcp',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract2)
entry3 = FilterEntry('entry3',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='20',
dToPort='25',
etherT='ip',
prot='tcp',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract3)
entry4 = FilterEntry('entry4',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='unspecified',
dToPort='unspecified',
etherT='ip',
prot='tcp',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract4)
epg11.provide(contract1)
epg11.consume(contract1)
epg11.provide(contract4)
epg12.consume(contract1)
epg12.provide(contract2)
epg12.consume(contract3)
epg12.provide(contract4)
epg12.consume(contract4)
outside_epg_1.consume(contract2)
outside_epg_2.provide(contract3)
outside_epg_3.consume(contract4)
outside_epg_3.provide(contract4)
epg21.consume(contract2)
epg22.provide(contract4)
return [tenant]
def get_tree2():
"""
Will build an object tree with attributes in each object
:return:
"""
tenant = Tenant('tenant2')
tenant.dn = '/tn-tenant2'
app1 = AppProfile('t2_app1', tenant)
app1.dn = app1.get_parent().dn + '/app-app1'
app2 = AppProfile('t2_app2', tenant)
app2.dn = app2.get_parent().dn + '/app-app2'
epg11 = EPG('t2_epg11', app1)
epg11.dn = epg11.get_parent().dn + '/epg-epg11'
for index in range(1, 5):
ep = Endpoint('t2_endpoint_' + str(index), epg11)
ep.ip = '192.168.11.' + str(index)
epg12 = EPG('t2_epg12', app1)
epg12.dn = epg12.get_parent().dn + '/epg-epg12'
for index in range(1, 5):
ep = Endpoint('t2_endpoint_' + str(index), epg12)
ep.ip = '192.169.12.' + str(index)
epg21 = EPG('t2_epg21', app1)
epg21.dn = epg11.get_parent().dn + '/epg-epg21'
for index in range(1, 5):
ep = Endpoint('t2_endpoint_' + str(index), epg21)
ep.ip = '192.170.11.' + str(index)
epg22 = EPG('t2_epg22', app1)
epg22.dn = epg22.get_parent().dn + '/epg-epg22'
for index in range(1, 5):
ep = Endpoint('t2_endpoint_' + str(index), epg22)
ep.ip = '192.170.12.' + str(index)
bd1 = BridgeDomain('bd1', tenant)
bd1.dn = bd1.get_parent().dn + '/bd-bd1'
bd2 = BridgeDomain('bd2', tenant)
bd2.dn = bd2.get_parent().dn + '/bd-bd2'
bd3 = BridgeDomain('bd3', tenant)
bd3.dn = bd3.get_parent().dn + '/bd-bd3'
epg11.add_bd(bd1)
epg12.add_bd(bd2)
epg21.add_bd(bd3)
epg22.add_bd(bd3)
context = Context('ctx', tenant)
context.dn = context.get_parent().dn + '/ctx-ctx'
context2 = Context('ctx2', tenant)
context2.dn = context.get_parent().dn + '/ctx-ctx2'
bd1.add_context(context)
bd2.add_context(context)
bd3.add_context(context2)
outside_l3 = OutsideL3('out_l3_1', tenant)
outside_l3.add_context(context)
outside_epg_3 = OutsideEPG('out_epg_3', outside_l3)
subnet_31 = OutsideNetwork('subnet_31', outside_epg_3)
subnet_31.set_addr('10.30.2.1/32')
subnet_32 = OutsideNetwork('subnet_32', outside_epg_3)
subnet_32.set_addr('10.30.3.1/24')
subnet_33 = OutsideNetwork('subnet_33', outside_epg_3)
subnet_33.set_addr('10.30.2.1/25')
contract1 = Contract('contract-1', tenant)
contract1.dn = contract1.get_parent().dn + '/con-contract1'
contract2 = Contract('contract-2', tenant)
contract2.dn = contract2.get_parent().dn + '/con-contract2'
entry1 = FilterEntry('entry1',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='80',
dToPort='80',
etherT='ip',
prot='tcp',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract1)
subjects = contract1.get_children(ContractSubject)
for subject in subjects:
subject.dn = subject.get_parent().dn + '/subj-' + subject.name
filters = tenant.get_children(Filter)
for atk_filter in filters:
atk_filter.dn = atk_filter.get_parent().dn + '/flt-' + atk_filter.name
entry1.dn = entry1.get_parent().dn + '/flte-entry1'
contract4 = Contract('contract-4', tenant)
entry3 = FilterEntry('entry3',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='443',
dToPort='443',
etherT='ip',
prot='tcp',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract4)
entry4 = FilterEntry('entry4',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='80',
dToPort='80',
etherT='ip',
prot='tcp',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract4)
entry4 = FilterEntry('entry4',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='unspecified',
dToPort='unspecified',
etherT='ip',
prot='tcp',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract2)
epg11.provide(contract1)
epg11.consume(contract1)
epg12.consume(contract1)
epg12.provide(contract1)
epg12.provide(contract4)
epg12.consume(contract4)
outside_epg_3.consume(contract4)
outside_epg_3.provide(contract4)
epg21.consume(contract2)
epg22.provide(contract2)
return [tenant]
def get_tree3():
"""
Will build an object tree with attributes in each object
:return:
"""
tenant = Tenant('tenant3')
tenant.dn = '/tn-tenant3'
app1 = AppProfile('t3_app1', tenant)
app1.dn = app1.get_parent().dn + '/app-app1'
app2 = AppProfile('t3_app2', tenant)
app2.dn = app2.get_parent().dn + '/app-app2'
epg11 = EPG('t3_epg11', app1)
epg11.dn = epg11.get_parent().dn + '/epg-epg11'
for index in range(1, 5):
ep = Endpoint('t3_endpoint_' + str(index), epg11)
ep.ip = 'abcd:168:11::' + str(index)
epg12 = EPG('t3_epg12', app1)
epg12.dn = epg12.get_parent().dn + '/epg-epg12'
for index in range(1, 5):
ep = Endpoint('t3_endpoint_' + str(index), epg12)
ep.ip = 'abcd:169:12::' + str(index)
epg21 = EPG('t3_epg21', app1)
epg21.dn = epg11.get_parent().dn + '/epg-epg21'
for index in range(1, 5):
ep = Endpoint('t3_endpoint_' + str(index), epg21)
ep.ip = 'abcd:170:11::' + str(index)
epg22 = EPG('t3_epg22', app1)
epg22.dn = epg22.get_parent().dn + '/epg-epg22'
for index in range(1, 5):
ep = Endpoint('t3_endpoint_' + str(index), epg22)
ep.ip = 'abcd:170:12::' + str(index)
bd1 = BridgeDomain('bd1', tenant)
bd1.dn = bd1.get_parent().dn + '/bd-bd1'
bd2 = BridgeDomain('bd2', tenant)
bd2.dn = bd2.get_parent().dn + '/bd-bd2'
bd3 = BridgeDomain('bd3', tenant)
bd3.dn = bd3.get_parent().dn + '/bd-bd3'
epg11.add_bd(bd1)
epg12.add_bd(bd2)
epg21.add_bd(bd3)
epg22.add_bd(bd3)
context = Context('ctx', tenant)
context.dn = context.get_parent().dn + '/ctx-ctx'
context2 = Context('ctx2', tenant)
context2.dn = context.get_parent().dn + '/ctx-ctx2'
bd1.add_context(context)
bd2.add_context(context)
bd3.add_context(context2)
outside_l3 = OutsideL3('out_l3_1', tenant)
outside_l3.add_context(context)
outside_epg_3 = OutsideEPG('out_epg_3', outside_l3)
subnet_31 = OutsideNetwork('subnet_31', outside_epg_3)
subnet_31.set_addr('10.30.2.1/32')
subnet_32 = OutsideNetwork('subnet_32', outside_epg_3)
subnet_32.set_addr('10.30.3.1/24')
subnet_33 = OutsideNetwork('subnet_33', outside_epg_3)
subnet_33.set_addr('10.30.2.1/25')
contract1 = Contract('contract-1', tenant)
contract1.dn = contract1.get_parent().dn + '/con-contract1'
contract2 = Contract('contract-2', tenant)
contract2.dn = contract2.get_parent().dn + '/con-contract2'
entry1 = FilterEntry('entry1',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='80',
dToPort='80',
etherT='ip',
prot='tcp',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract1)
subjects = contract1.get_children(ContractSubject)
for subject in subjects:
subject.dn = subject.get_parent().dn + '/subj-' + subject.name
filters = tenant.get_children(Filter)
for atk_filter in filters:
atk_filter.dn = atk_filter.get_parent().dn + '/flt-' + atk_filter.name
entry1.dn = entry1.get_parent().dn + '/flte-entry1'
contract4 = Contract('contract-4', tenant)
entry3 = FilterEntry('entry3',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='443',
dToPort='443',
etherT='ip',
prot='tcp',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract4)
entry4 = FilterEntry('entry4',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='80',
dToPort='80',
etherT='ip',
prot='tcp',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract4)
entry4 = FilterEntry('entry4',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='unspecified',
dToPort='unspecified',
etherT='ip',
prot='tcp',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract2)
epg11.provide(contract1)
epg11.consume(contract1)
epg12.consume(contract1)
epg12.provide(contract1)
epg12.provide(contract4)
epg12.consume(contract4)
outside_epg_3.consume(contract4)
outside_epg_3.provide(contract4)
epg21.consume(contract2)
epg22.provide(contract2)
return [tenant]
class TestImportData(unittest.TestCase):
"""
Checks that the object model is correctly setup
"""
def setUp(self):
self.sdb = SearchDb()
self.tenants = get_tree()
self.sdb.build(self.tenants)
def get_all_epgs(self):
"""
will return list of all EPGs as a tuple (context, app_profile, epg)
:return:
"""
result = []
for tenant in self.tenants:
app_profiles = tenant.get_children(AppProfile)
for app_profile in app_profiles:
epgs = app_profile.get_children(EPG)
for epg in epgs:
result.append(epg)
return result
def get_all_outside_epgs(self):
result = []
for tenant in self.tenants:
outside_l3s = tenant.get_children(OutsideL3)
for outside_l3 in outside_l3s:
outside_epg = outside_l3.get_children(OutsideEPG)
for epg in outside_epg:
result.append(epg)
return result
def get_all_contracts(self):
"""
Will get a list of all contracts returned as tuple (tenant, contract)
:return:
"""
result = []
for tenant in self.tenants:
contracts = tenant.get_children(Contract)
for contract in contracts:
result.append((tenant, contract))
return result
def get_all_filter_entries(self):
result = []
contracts = self.get_all_contracts()
for (tenant, contract) in contracts:
result.extend(contract.get_children(FilterEntry))
subjects = contract.get_children(ContractSubject)
for subject in | |
def head(self: DF, length: int = 5) -> DF:
"""
Get first N rows as DataFrame.
Parameters
----------
length
Length of the head.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3, 4, 5],
... "bar": [6, 7, 8, 9, 10],
... "ham": ["a", "b", "c", "d", "e"],
... }
... )
>>> df.head(3)
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└─────┴─────┴─────┘
"""
return self._from_pydf(self._df.head(length))
def tail(self: DF, length: int = 5) -> DF:
"""
Get last N rows as DataFrame.
Parameters
----------
length
Length of the tail.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3, 4, 5],
... "bar": [6, 7, 8, 9, 10],
... "ham": ["a", "b", "c", "d", "e"],
... }
... )
>>> df.tail(3)
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 3 ┆ 8 ┆ c │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 4 ┆ 9 ┆ d │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 5 ┆ 10 ┆ e │
└─────┴─────┴─────┘
"""
return self._from_pydf(self._df.tail(length))
def drop_nulls(self: DF, subset: Optional[Union[str, List[str]]] = None) -> DF:
"""
Return a new DataFrame where the null values are dropped.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, None, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.drop_nulls()
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└─────┴─────┴─────┘
This method only drops nulls row-wise if any single value of the row is null.
Below are some example snippets that show how you could drop null values based on other
conditions
>>> df = pl.DataFrame(
... {
... "a": [None, None, None, None],
... "b": [1, 2, None, 1],
... "c": [1, None, None, 1],
... }
... )
>>> df
shape: (4, 3)
┌──────┬──────┬──────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ f64 ┆ i64 ┆ i64 │
╞══════╪══════╪══════╡
│ null ┆ 1 ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 2 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ null ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 1 ┆ 1 │
└──────┴──────┴──────┘
Drop a row only if all values are null:
>>> df.filter(
... ~pl.fold(
... acc=True,
... f=lambda acc, s: acc & s.is_null(),
... exprs=pl.all(),
... )
... )
shape: (3, 3)
┌──────┬─────┬──────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ f64 ┆ i64 ┆ i64 │
╞══════╪═════╪══════╡
│ null ┆ 1 ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 2 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 1 ┆ 1 │
└──────┴─────┴──────┘
Drop a column if all values are null:
>>> df[:, [not (s.null_count() == df.height) for s in df]]
shape: (4, 2)
┌──────┬──────┐
│ b ┆ c │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞══════╪══════╡
│ 1 ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 1 ┆ 1 │
└──────┴──────┘
"""
if isinstance(subset, str):
subset = [subset]
return self._from_pydf(self._df.drop_nulls(subset))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
"""
Apply a function on Self.
Parameters
----------
func
Callable.
args
Arguments.
kwargs
Keyword arguments.
Examples
--------
>>> def cast_str_to_int(data, col_name):
... return data.with_column(pl.col(col_name).cast(pl.Int64))
...
>>> df = pl.DataFrame({"a": [1, 2, 3, 4], "b": ["10", "20", "30", "40"]})
>>> df.pipe(cast_str_to_int, col_name="b")
shape: (4, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 10 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 20 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 30 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 4 ┆ 40 │
└─────┴─────┘
"""
return func(self, *args, **kwargs)
def with_row_count(self: DF, name: str = "row_nr", offset: int = 0) -> DF:
"""
Add a column at index 0 that counts the rows.
Parameters
----------
name
Name of the column to add.
offset
Start the row count at this offset. Default = 0
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 3, 5],
... "b": [2, 4, 6],
... }
... )
>>> df.with_row_count()
shape: (3, 3)
┌────────┬─────┬─────┐
│ row_nr ┆ a ┆ b │
│ --- ┆ --- ┆ --- │
│ u32 ┆ i64 ┆ i64 │
╞════════╪═════╪═════╡
│ 0 ┆ 1 ┆ 2 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 3 ┆ 4 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 5 ┆ 6 │
└────────┴─────┴─────┘
"""
return self._from_pydf(self._df.with_row_count(name, offset))
def groupby(
self: DF,
by: Union[str, "pli.Expr", Sequence[str], Sequence["pli.Expr"]],
maintain_order: bool = False,
) -> "GroupBy[DF]":
"""
Start a groupby operation.
Parameters
----------
by
Column(s) to group by.
maintain_order
Make sure that the order of the groups remain consistent. This is more expensive than a default groupby.
Note that this only works in expression aggregations.
Examples
--------
Below we group by column `"a"`, and we sum column `"b"`.
>>> df = pl.DataFrame(
... {
... "a": ["a", "b", "a", "b", "b", "c"],
... "b": [1, 2, 3, 4, 5, 6],
... "c": [6, 5, 4, 3, 2, 1],
... }
... )
>>> df.groupby("a")["b"].sum().sort(by="a")
shape: (3, 2)
┌─────┬───────┐
│ a ┆ b_sum │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═══════╡
│ a ┆ 4 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ b ┆ 11 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ c ┆ 6 │
└─────┴───────┘
We can also loop over the grouped `DataFrame`
>>> for sub_df in df.groupby("a"):
... print(sub_df) # doctest: +IGNORE_RESULT
...
shape: (3, 3)
┌─────┬─────┬─────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ b ┆ 2 ┆ 5 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 4 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 5 ┆ 2 │
└─────┴─────┴─────┘
shape: (1, 3)
┌─────┬─────┬─────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ c ┆ 6 ┆ 1 │
└─────┴─────┴─────┘
"""
if isinstance(by, str):
by = [by]
return GroupBy(
self._df,
by, # type: ignore
dataframe_class=self.__class__,
maintain_order=maintain_order,
)
def groupby_rolling(
self: DF,
index_column: str,
period: str,
offset: Optional[str] = None,
closed: str = "right",
by: Optional[Union[str, List[str], "pli.Expr", List["pli.Expr"]]] = None,
) -> "RollingGroupBy[DF]":
"""
Create rolling groups based on a time column (or index value of type Int32, Int64).
Different from a rolling groupby the windows are now determined by the individual values and are not of constant
intervals. For constant intervals use *groupby_dynamic*
.. seealso::
groupby_dynamic
The `period` and `offset` arguments are created with
the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 day)
- 1w (1 week)
- 1mo (1 calendar month)
- 1y (1 calendar year)
- 1i (1 index count)
Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
In case of a groupby_rolling on an integer column, the windows are defined by:
- **"1i" # length 1**
- **"10i" # length 10**
Parameters
----------
index_column
Column used to group based on the time window.
Often to type Date/Datetime
This column must be sorted in ascending order. If not the output will not make sense.
In case of a rolling groupby on indices, dtype needs to be one | |
k, v in self._generators.items()})
def generate(self,
size: Optional[int] = None,
key: Optional[str] = None,
context: Optional[ContextBase] = None) -> Any:
if self._with_context:
context = FlatContext()
if size is None:
return self.generate_record(key, context)
return [self.generate_record(key, context) for _ in range(size)]
def generate_record(self, key: Optional[str], context: ContextBase) -> Any:
record = {}
if self._with_context:
context.append(record)
elif key is not None and context:
context.set(key, record)
for k, g in self._generators.items():
record[k] = g.generate(None, _GenKey(k, key), context)
return record
def result_type(self) -> type:
return Dict[str, Any]
class NestedGenerator(RecordGenerator):
"""Generates "nested" dicts - for specific set of keys, specific values."""
def __init__(self, rand: numpy.random.Generator,
generators: Dict[str, Generator]):
super().__init__(rand, generators, Names.NESTED)
class DataframeGenerator(RecordGenerator):
"""Similar w/ NestedGenerator, but generates keys to sequences."""
def __init__(self, rand: numpy.random.Generator,
generators: Dict[str, Generator]):
super().__init__(rand, generators, Names.DATAFRAME)
def generate(self,
size: Optional[int] = None,
key: Optional[str] = None,
context: Optional[ContextBase] = None) -> Dict[Any, Any]:
record = {}
if self._with_context:
context = ColumnarContext(record)
for k, g in self._generators.items():
# this is mainly to provide storage during generation
# for nested subkeys.
record[k] = []
record[k] = g.generate(1 if size is None else size, _GenKey(k, key),
context)
return record
class DataclassGenerator(RecordGenerator):
"""Similar w/ NestedGenerator, but generates classes and sets members."""
def __init__(
self,
rand: numpy.random.Generator,
generators: Dict[str, Generator],
# Specify the class:
build_class: Optional[type] = None,
# Or the module and class
module_name: Optional[str] = None,
class_name: Optional[str] = None):
super().__init__(rand, generators, Names.DATACLASS)
self.set_dataclass(build_class, module_name, class_name)
self._cls = None
def set_dataclass(self,
build_class: Optional[type] = None,
module_name: Optional[str] = None,
class_name: Optional[str] = None):
if build_class:
self._cls = build_class
elif module_name and class_name:
self._cls = getattr(sys.modules[module_name], class_name)
@classmethod
def build(cls, builder: Builder, spec):
if not isinstance(spec, (tuple, list)) or len(spec) not in [2, 3]:
raise ValueError(f'{cls} expects two or three underlying '
f'arguments. Got {spec}')
instance = super().build(builder, spec[0])
if len(spec) == 2:
if not isinstance(spec[1], type):
raise ValueError(
f'{cls} expects a class when providing two arguments')
instance.set_dataclass(build_class=spec[1])
else:
instance.set_dataclass(module_name=spec[1], class_name=spec[2])
return instance
def generate_record(self, key: Optional[str], context: ContextBase) -> Any:
record = super().generate_record(key, context)
return self._cls(**record)
def result_type(self) -> type:
return self._cls
def save(self) -> Any:
return (Names.DATACLASS,
({k: v.save() for k, v in self._generators.items()},
self._cls.__module__, self._cls.__qualname__))
class FakerGenerator(Generator):
"""Generates values using the faker library."""
def __init__(self,
rand: numpy.random.Generator,
fake_generator: str,
locale: Optional[Union[str, List[str]]] = None):
super().__init__(rand)
if faker is None:
raise NotImplementedError('faker module not available')
self._faker = faker.Faker(locale)
self._fake_generator = fake_generator
self._locale = locale
self._generator = getattr(self._faker, fake_generator)
if not callable(self._generator):
raise ValueError(
f'The faker generator `{fake_generator}` is not callable.')
self._result_type = type(self._generator())
@classmethod
def build(cls, builder: Builder, spec):
if isinstance(spec, (tuple, list)):
if len(spec) != 2:
raise ValueError(f'{cls} expects two underlying arguments for '
'parametrize creation')
return cls(builder.rand, spec[0], spec[1])
return cls(builder.rand, spec)
def result_type(self) -> type:
return self._result_type
def save(self) -> Any:
return (Names.FAKER, (self._fake_generator, self._locale))
def generate(self,
size: Optional[int] = None,
key: Optional[str] = None,
context: Optional[ContextBase] = None) -> Any:
if size is None:
return self._generator()
return [self._generator() for _ in range(size)]
class JointGenerator(Generator):
"""Generates same values across various calls, from different destination."""
class IdData:
"""Helper that holds information about per key index."""
def __init__(self, index, is_choice):
self.index = index
self.is_choice = is_choice
def next_index(self):
self.index += 1
return self.index - 1
def __init__(self,
rand: numpy.random.Generator,
child: Generator,
choice_keys: Optional[Set[str]] = None):
super().__init__(rand)
self._child = child
self._ids = {}
self._id_data = [] # List[IdData]
self._values = []
if isinstance(choice_keys, list):
self._choice_keys = set(choice_keys)
elif isinstance(choice_keys, set):
self._choice_keys = choice_keys
elif choice_keys is None:
self._choice_keys = set()
else:
raise ValueError(
f'Invalid `choice_keys` argument provided: {choice_keys}')
def add_choice(self, key):
self._choice_keys.add(key)
def generate(self,
size: Optional[int] = None,
key: Optional[str] = None,
context: Optional[ContextBase] = None) -> Any:
if key not in self._ids:
self._ids[key] = len(self._id_data)
id_data = JointGenerator.IdData(0, key in self._choice_keys)
self._id_data.append(id_data)
else:
id_data = self._id_data[self._ids[key]]
next_id = id_data.next_index()
while len(self._values) <= next_id:
self._values.append(self._child.generate(size, key, context))
if id_data.is_choice:
return self._values[self._rand.choice(len(self._values))]
return self._values[next_id]
def result_type(self) -> type:
return self._child.result_type()
@classmethod
def build(cls, builder: Builder, spec):
if not isinstance(spec, (tuple, list)) or len(spec) != 2:
raise ValueError(f'Expecting a tuple argument to {cls}. Got {spec}')
return cls(builder.rand, BuildGenerator(builder, spec[0]), spec[1])
def save(self) -> Any:
# Save choices as a sorted list - easier to test and serialize.
choices = list(self._choice_keys)
choices.sort()
return (Names.JOINT, (self._child.save(), choices))
class FieldGenerator(Builder):
"""Generates a field from the currently generated record."""
def __init__(self, rand: numpy.random.Generator, field_name: str,
default: Generator, delta: Generator, result_type: type):
super().__init__(rand)
if (default.result_type() != type(None) and
default.result_type() != result_type):
raise ValueError(
f'{type(self)} has default value of a different type than the '
f'one discovered for field {field_name}: '
f'`{default.result_type()}` vs. `{result_type}`')
if delta.result_type() != int:
raise ValueError(f'{type(self)} expects int from delta generator. '
f'found: {delta.result_type()}')
self._field_name = field_name
self._field = ContextField(field_name)
self._default = default
self._delta = delta
self._result_type = result_type
self._last_context = None
def generate(self,
size: Optional[int] = None,
key: Optional[str] = None,
context: Optional[ContextBase] = None) -> Any:
if not context:
return self._default.generate(size, key, context)
if context != self._last_context:
self._last_context = context
self._index = 0
if not size:
return self._generate_value(self, context)
elif key is not None and context and key == self._field_name:
# Self reference - generate and update the context value:
record = []
for _ in range(size):
value = self._generate_value(self, context)
context.set(key, value)
record.append(value)
return record
else:
return [self._generate_value(self, context) for _ in range(size)]
def _generate_value(self, key: Optional[str], context: ContextBase):
delta = self._delta.generate(None, key, context)
value = None
if delta is not None and delta >= 0:
value = context.get(self._index - delta, self._field)
self._index += 1
if value is None:
return self._default.generate(None, key, context)
return value
@classmethod
def build(cls, builder: Builder, spec):
default = ValueGenerator(builder.rand, None)
delta = ValueGenerator(builder.rand, 0)
if not isinstance(spec, (tuple, list)):
field_name = spec
elif len(spec) not in (1, 2, 3):
raise ValueError(
f'{cls} expects one to three arguments got `{spec}`.')
else:
field_name = spec[0]
if len(spec) > 1:
default = spec[1]
if len(spec) > 2:
delta = spec[2]
default_gen = BuildGenerator(builder, default)
if field_name not in builder.record_structure:
if default_gen is None:
raise ValueError(
f'Cannot reference field `{field_name}` under existing generators. '
'Make sure that this generator is created after the one '
'for the referenced field. This may mean to rearrange the '
'fields in the dataclass, or to shuffle the order of keys in '
'the dictionary passed to the top record generator.')
result_type = default_gen.result_type()
else:
result_type = builder.record_structure[field_name].result_type()
if (default_gen.result_type() != type(None) and
result_type != default_gen.result_type()):
raise ValueError(
f'The field `{field_name}` under existing generator '
'produces a field of a different type than the '
f'default generator: `{result_type}` vs. '
f'`{default_gen.result_type()}`')
return cls(builder.rand, field_name, default_gen,
BuildGenerator(builder, delta), result_type)
def save(self) -> Any:
return (Names.FIELD, (self._field_name, self._default.save(),
self._delta.save()))
def result_type(self) -> type:
return self._result_type
class ApplyGenerator(Generator):
"""Combines input using actual code from the combiner function"""
def __init__(self,
rand: numpy.random.Generator,
combiner: Callable,
children: List[Generator],
result_type: Optional[type] = None):
super().__init__(rand)
self._combiner = combiner
self._children = children
if result_type is None:
# Try to find the result type from function annotation.
if 'return' not in combiner.__annotations__:
raise ValueError(
f'{type(self)} needs a result type, either annotate '
f'your function or pass a `result_type` parameter')
self._result_type = combiner.__annotations__['return']
else:
self._result_type = result_type
def result_type(self) -> type:
return str
def save(self) -> Any:
return (Names.APPLY, (encoder_module.dumps(self._combiner),
[child.save() for child in self._children],
encoder_module.dumps(self._result_type)))
@classmethod
def build(cls, builder: Builder, spec):
if not isinstance(spec, (tuple, list)) or len(spec) not in (2, 3):
raise ValueError(f'{cls} expects two underlying arguments')
if isinstance(spec[0], bytes):
combiner = encoder_module.loads(spec[0])
else:
combiner = spec[0]
result_type = None
if len(spec) > 2:
if isinstance(spec[2], bytes):
result_type = encoder_module.loads(spec[2])
else:
result_type = spec[2]
# pylint: disable=no-value-for-parameter
return cls(builder.rand, combiner,
[BuildGenerator(builder, s) for s in spec[1]], result_type)
def generate(self,
size: Optional[int] = None,
key: Optional[str] = None,
context: Optional[ContextBase] = None) -> str:
if size is None:
return self._combiner(*[
child.generate(None, key, context) for child in self._children
])
return [
self._combiner(*elems) for elems in zip(*[
child.generate(size, key, context) for child in self._children
])
]
_CLASS_MAP | |
"""
FIN module customisations for RLPPTM
License: MIT
"""
from collections import OrderedDict
from gluon import current, A, DIV, IS_EMPTY_OR, IS_INT_IN_RANGE, TAG
from core import FS, IS_ONE_OF, s3_str
ISSUER_ORG_TYPE = "pe_id$pe_id:org_organisation.org_organisation_organisation_type.organisation_type_id"
# -------------------------------------------------------------------------
def fin_voucher_resource(r, tablename):
T = current.T
auth = current.auth
has_role = auth.s3_has_role
s3db = current.s3db
table = s3db.fin_voucher
# Determine form mode
resource = r.resource
group_voucher = resource.tablename == "fin_voucher" and \
r.get_vars.get("g") == "1"
# Customise fields
field = table.pe_id
field.label = T("Issuer##fin")
from core import WithAdvice
field = table.bearer_dob
if group_voucher:
label = T("Group Representative Date of Birth")
intro = "GroupDoBIntro"
else:
label = T("Beneficiary Date of Birth")
intro = "BearerDoBIntro"
field.label = label
field.widget = WithAdvice(field.widget,
text = ("fin", "voucher", intro),
)
if not has_role("VOUCHER_ISSUER"):
field.readable = field.writable = False
field = table.initial_credit
field.label = T("Number of Beneficiaries")
if group_voucher:
field.default = None
field.requires = IS_INT_IN_RANGE(1, 51,
error_message = T("Enter the number of beneficiaries (max %(max)s)"),
)
field.readable = field.writable = True
field = table.comments
field.label = T("Memoranda")
field.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Memoranda"),
T("Notes of the Issuer"),
),
)
if not has_role("VOUCHER_PROVIDER"):
field.readable = field.writable = False
# Custom list fields
if has_role("VOUCHER_ISSUER"):
list_fields = ["program_id",
"signature",
(T("Beneficiary/Representative Date of Birth"), "bearer_dob"),
"initial_credit",
"credit_spent",
(T("Status"), "status"),
"date",
#"valid_until",
"comments",
]
else:
list_fields = ["program_id",
"signature",
(T("Status"), "status"),
"pe_id",
#(T("Issuer Type"), ISSUER_ORG_TYPE),
"eligibility_type_id",
"initial_credit",
"credit_spent",
"date",
#"valid_until",
]
# Report Options
if r.method == "report":
facts = ((T("Credit Redeemed"), "sum(credit_spent)"),
(T("Credit Issued"), "sum(initial_credit)"),
(T("Remaining Credit"), "sum(balance)"),
(T("Number of Vouchers"), "count(id)"),
)
axes = [ISSUER_ORG_TYPE,
"eligibility_type_id",
"program_id",
"status",
"pe_id",
]
report_options = {
"rows": axes,
"cols": axes,
"fact": facts,
"defaults": {"rows": axes[0],
"cols": axes[1],
"fact": facts[0],
"totals": True,
},
}
s3db.configure("fin_voucher",
report_options = report_options,
)
s3db.configure("fin_voucher",
list_fields = list_fields,
orderby = "fin_voucher.date desc",
)
# -------------------------------------------------------------------------
def fin_voucher_controller(**attr):
T = current.T
s3 = current.response.s3
settings = current.deployment_settings
# Enable bigtable features
settings.base.bigtable = True
# Custom prep
standard_prep = s3.prep
def prep(r):
# Call standard prep
result = standard_prep(r) if callable(standard_prep) else True
# Restrict data formats
settings.ui.export_formats = None
representation = r.representation
ALLOWED_FORMATS = ("html", "iframe", "popup", "aadata", "json")
if representation not in ALLOWED_FORMATS and \
not(r.record and representation == "card"):
r.error(403, current.ERROR.NOT_PERMITTED)
is_program_manager = current.auth.s3_has_role("PROGRAM_MANAGER")
db = current.db
s3db = current.s3db
# Check which programs and organisations the user can issue vouchers for
program_ids, org_ids, pe_ids = s3db.fin_voucher_permitted_programs(mode="issuer")
resource = r.resource
table = resource.table
if program_ids and org_ids:
etypes = s3db.fin_voucher_eligibility_types(program_ids, org_ids)
program_ids = list(etypes.keys())
if not program_ids or not org_ids:
# User is not permitted to issue vouchers for any programs/issuers
resource.configure(insertable = False)
else:
# Limit the program selector to permitted+active programs
field = table.program_id
ptable = s3db.fin_voucher_program
dbset = db(ptable.id.belongs(program_ids))
field.requires = IS_ONE_OF(dbset, "fin_voucher_program.id",
field.represent,
sort = True,
)
# Default the program selector if only one program can be chosen
if len(program_ids) == 1:
program_id = program_ids[0]
field.default = program_id
field.writable = False
# Limit the eligibility type selector to applicable types
allow_empty = False
if len(program_ids) == 1:
etype_ids = etypes[program_ids[0]]
else:
etype_ids = []
for item in etypes.values():
if item:
etype_ids += item
else:
allow_empty = True
etype_ids = list(set(etype_ids)) if etype_ids else None
field = table.eligibility_type_id
if etype_ids is None:
# No selectable eligibility types => hide selector
field.readable = field.writable = False
elif len(etype_ids) == 1 and not allow_empty:
# Only one type selectable => default
field.default = etype_ids[0]
field.writable = False
else:
# Multiple types selectable
ttable = s3db.fin_voucher_eligibility_type
etset = db(ttable.id.belongs(etype_ids))
field.requires = IS_ONE_OF(etset, "fin_voucher_eligibility_type.id",
field.represent,
sort = True,
)
if allow_empty:
field.requires = IS_EMPTY_OR(field.requires)
# Limit the issuer selector to permitted entities
etable = s3db.pr_pentity
field = table.pe_id
dbset = db(etable.pe_id.belongs(pe_ids))
field.requires = IS_ONE_OF(dbset, "pr_pentity.pe_id",
field.represent,
)
# Hide the issuer selector if only one entity can be chosen
if len(pe_ids) == 1:
field.default = pe_ids[0]
field.readable = field.writable = False
if r.interactive:
if r.get_vars.get("g") == "1":
s3.crud_strings["fin_voucher"]["label_create"] = T("Create Group Voucher")
# Hide valid_until from create-form (will be set onaccept)
field = table.valid_until
field.readable = bool(r.record)
field.writable = False
# Always show number of beneficiaries
if r.record:
field = table.initial_credit
field.readable = True
# Filter Widgets
from core import DateFilter, TextFilter
text_fields = ["signature", "comments", "program_id$name"]
if is_program_manager:
text_fields.append("pe_id$pe_id:org_organisation.name")
filter_widgets = [
TextFilter(text_fields,
label = T("Search"),
),
DateFilter("date",
),
]
if is_program_manager:
from core import OptionsFilter, get_filter_options
filter_widgets.extend([
OptionsFilter("eligibility_type_id",
hidden = True,
label = T("Type of Eligibility"),
),
OptionsFilter(ISSUER_ORG_TYPE,
hidden = True,
label = T("Issuer Type"),
options = lambda: get_filter_options("org_organisation_type"),
),
])
resource.configure(filter_widgets = filter_widgets,
)
elif r.representation == "card":
# Configure ID card layout
from ..vouchers import VoucherCardLayout
resource.configure(pdf_card_layout = VoucherCardLayout,
pdf_card_suffix = lambda record: \
s3_str(record.signature) \
if record and record.signature else None,
)
return result
s3.prep = prep
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if not r.component and isinstance(output, dict):
if r.record and r.method in (None, "update", "read"):
# Custom CRUD buttons
if "buttons" not in output:
buttons = output["buttons"] = {}
else:
buttons = output["buttons"]
# PDF-button
pdf_download = A(T("Download PDF"),
_href = "/%s/fin/voucher/%s.card" % (r.application, r.record.id),
_class="action-btn",
)
# Render in place of the delete-button
buttons["delete_btn"] = TAG[""](pdf_download,
)
return output
s3.postp = custom_postp
# Custom rheader
from ..rheaders import rlpptm_fin_rheader
attr["rheader"] = rlpptm_fin_rheader
return attr
# -------------------------------------------------------------------------
def fin_voucher_debit_resource(r, tablename):
T = current.T
auth = current.auth
has_role = auth.s3_has_role
s3db = current.s3db
table = s3db.fin_voucher_debit
# Determine form mode
resource = r.resource
group_voucher = resource.tablename == "fin_voucher_debit" and \
r.get_vars.get("g") == "1"
# Customise fields
field = table.comments
field.label = T("Memoranda")
field.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Memoranda"),
T("Notes of the Provider"),
),
)
if not has_role("VOUCHER_PROVIDER"):
field.readable = field.writable = False
field = table.bearer_dob
if group_voucher:
label = T("Group Representative Date of Birth")
else:
label = T("Beneficiary Date of Birth")
field.label = label
if not has_role("VOUCHER_PROVIDER"):
field.readable = field.writable = False
field = table.quantity
if group_voucher:
field.default = None
field.requires = IS_INT_IN_RANGE(1,
error_message = T("Enter the service quantity"),
)
field.readable = field.writable = True
field = table.balance
field.label = T("Remaining Compensation Claims")
# Custom list_fields
list_fields = [(T("Date"), "date"),
"program_id",
"voucher_id$signature",
"quantity",
"status",
]
if current.auth.s3_has_roles(("PROGRAM_MANAGER", "PROGRAM_ACCOUNTANT")):
# Include issuer and provider
list_fields[3:3] = ["voucher_id$pe_id",
"pe_id",
]
if has_role("VOUCHER_PROVIDER"):
# Include provider notes
list_fields.append("comments")
s3db.configure("fin_voucher_debit",
list_fields = list_fields,
)
# Filters
if r.interactive:
from core import DateFilter, TextFilter
filter_widgets = [TextFilter(["program_id$name",
"signature",
],
label = T("Search"),
),
DateFilter("date",
label = T("Date"),
),
]
s3db.configure("fin_voucher_debit",
filter_widgets = filter_widgets,
)
# Report options
if r.method == "report":
field = table.created_by
field.represent = s3db.auth_UserRepresent(show_name = True,
show_email = False,
)
facts = ((T("Total Services Rendered"), "sum(quantity)"),
(T("Number of Accepted Vouchers"), "count(id)"),
(T("Remaining Compensation Claims"), "sum(balance)"),
)
axes = ["program_id",
"status",
]
has_role = auth.s3_has_role
if has_role("PROGRAM_MANAGER"):
axes.insert(0, "pe_id")
if has_role("VOUCHER_PROVIDER"):
axes.append((T("User"), "created_by"))
report_options = {
"rows": axes,
"cols": axes,
"fact": facts,
"defaults": {"rows": axes[0],
"cols": None,
"fact": facts[0],
"totals": True,
},
}
s3db.configure("fin_voucher_debit",
report_options = report_options,
)
# -------------------------------------------------------------------------
def fin_voucher_debit_controller(**attr):
T = current.T
s3 = current.response.s3
# Enable bigtable features
current.deployment_settings.base.bigtable = True
# Custom prep
standard_prep = s3.prep
def prep(r):
# Call standard prep
result = standard_prep(r) if callable(standard_prep) else True
db = current.db
s3db = current.s3db
resource = r.resource
# Catch inappropriate cancel-attempts
record = r.record
if record and not r.component and r.method == "cancel":
from ..helpers import can_cancel_debit
if not can_cancel_debit(record):
r.unauthorised()
has_role = current.auth.s3_has_role
if has_role("PROGRAM_ACCOUNTANT") and not has_role("PROGRAM_MANAGER"):
# PROGRAM_ACCOUNTANT can only see debits where they are assigned
# for the billing process
from ..helpers import get_role_realms
role_realms = get_role_realms("PROGRAM_ACCOUNTANT")
if role_realms is not None:
query = FS("billing_id$organisation_id$pe_id").belongs(role_realms)
resource.add_filter(query)
# PROGRAM_ACCOUNTANT does not (need to) see cancelled debits
resource.add_filter(FS("cancelled") == False)
# Check which programs and organisations the user can accept vouchers for
program_ids, org_ids, pe_ids = s3db.fin_voucher_permitted_programs(
mode = "provider",
partners_only = True,
)
table = resource.table
if not program_ids or not org_ids:
# User is not permitted to accept vouchers for any programs/providers
| |
= ' '.join([str(elem) for elem in output_INT_list])
print('Converted TXT to INTs:', npi, ' / ', nsi)
return output_INT_list, output_INT_string, npi, nsi
###################################################################################
def Tegridy_INT_to_TXT_Converter(input_INT_list):
'''Tegridy Intergers to TXT Converter
Input: List of intergers in TMIDI-TXT-INT format
Output: Decoded TXT string in TMIDI-TXT format
Project Los Angeles
Tegridy Code 2020'''
output_TXT_string = ''
for i in input_INT_list:
output_TXT_string += chr(int(i))
return output_TXT_string
###################################################################################
def Tegridy_INT_String_to_TXT_Converter(input_INT_String, line_by_line_input=True):
'''Tegridy Intergers String to TXT Converter
Input: List of intergers in TMIDI-TXT-INT-String format
Output: Decoded TXT string in TMIDI-TXT format
Project Los Angeles
Tegridy Code 2020'''
print('Tegridy Intergers String to TXT Converter')
if line_by_line_input:
input_string = input_INT_String.split('\n')
else:
input_string = input_INT_String.split(' ')
output_TXT_string = ''
for i in input_string:
try:
output_TXT_string += chr(abs(int(i)))
except:
print('Bad note:', i)
continue
print('Done!')
return output_TXT_string
###################################################################################
def Tegridy_SONG_to_MIDI_Converter(SONG,
output_signature = 'Tegridy TMIDI Module',
track_name = 'Composition Track',
number_of_ticks_per_quarter = 425,
list_of_MIDI_patches = [0, 24, 32, 40, 42, 46, 56, 71, 73, 0, 0, 0, 0, 0, 0, 0],
output_file_name = 'TMIDI-Composition',
text_encoding='ISO-8859-1'):
'''Tegridy SONG to MIDI Converter
Input: Input SONG in TMIDI SONG/MIDI.py Score format
Output MIDI Track 0 name / MIDI Signature
Output MIDI Track 1 name / Composition track name
Number of ticks per quarter for the output MIDI
List of 16 MIDI patch numbers for output MIDI. Def. is MuseNet compatible patches.
Output file name w/o .mid extension.
Optional text encoding if you are working with text_events/lyrics. This is especially useful for Karaoke. Please note that anything but ISO-8859-1 is a non-standard way of encoding text_events according to MIDI specs.
Output: MIDI File
Detailed MIDI stats
Project Los Angeles
Tegridy Code 2020'''
print('Converting to MIDI. Please stand-by...')
output_header = [number_of_ticks_per_quarter,
[['track_name', 0, bytes(output_signature, text_encoding)]]]
patch_list = [['patch_change', 0, 0, list_of_MIDI_patches[0]],
['patch_change', 0, 1, list_of_MIDI_patches[1]],
['patch_change', 0, 2, list_of_MIDI_patches[2]],
['patch_change', 0, 3, list_of_MIDI_patches[3]],
['patch_change', 0, 4, list_of_MIDI_patches[4]],
['patch_change', 0, 5, list_of_MIDI_patches[5]],
['patch_change', 0, 6, list_of_MIDI_patches[6]],
['patch_change', 0, 7, list_of_MIDI_patches[7]],
['patch_change', 0, 8, list_of_MIDI_patches[8]],
['patch_change', 0, 9, list_of_MIDI_patches[9]],
['patch_change', 0, 10, list_of_MIDI_patches[10]],
['patch_change', 0, 11, list_of_MIDI_patches[11]],
['patch_change', 0, 12, list_of_MIDI_patches[12]],
['patch_change', 0, 13, list_of_MIDI_patches[13]],
['patch_change', 0, 14, list_of_MIDI_patches[14]],
['patch_change', 0, 15, list_of_MIDI_patches[15]],
['track_name', 0, bytes(track_name, text_encoding)]]
output = output_header + [patch_list + SONG]
midi_data = score2midi(output, text_encoding)
detailed_MIDI_stats = score2stats(output)
with open(output_file_name + '.mid', 'wb') as midi_file:
midi_file.write(midi_data)
midi_file.close()
print('Done! Enjoy! :)')
return detailed_MIDI_stats
###################################################################################
def Tegridy_File_Time_Stamp(input_file_name='File_Created_on_', ext = ''):
'''Tegridy File Time Stamp
Input: Full path and file name without extention
File extension
Output: File name string with time-stamp and extension (time-stamped file name)
Project Los Angeles
Tegridy Code 2021'''
print('Time-stamping output file...')
now = ''
now_n = str(datetime.now())
now_n = now_n.replace(' ', '_')
now_n = now_n.replace(':', '_')
now = now_n.replace('.', '_')
fname = input_file_name + str(now) + ext
return(fname)
###################################################################################
def Tegridy_Any_Pickle_File_Writer(Data, input_file_name='TMIDI_Pickle_File'):
'''Tegridy Pickle File Writer
Input: Data to write (I.e. a list)
Full path and file name without extention
Output: Named Pickle file
Project Los Angeles
Tegridy Code 2021'''
print('Tegridy Pickle File Writer')
full_path_to_output_dataset_to = input_file_name + '.pickle'
if os.path.exists(full_path_to_output_dataset_to):
os.remove(full_path_to_output_dataset_to)
print('Removing old Dataset...')
else:
print("Creating new Dataset file...")
with open(full_path_to_output_dataset_to, 'wb') as filehandle:
# store the data as binary data stream
pickle.dump(Data, filehandle, protocol=pickle.HIGHEST_PROTOCOL)
print('Dataset was saved as:', full_path_to_output_dataset_to)
print('Task complete. Enjoy! :)')
###################################################################################
def Tegridy_Any_Pickle_File_Reader(input_file_name='TMIDI_Pickle_File', ext='.pickle'):
'''Tegridy Pickle File Loader
Input: Full path and file name without extention
File extension if different from default .pickle
Output: Standard Python 3 unpickled data object
Project Los Angeles
Tegridy Code 2021'''
print('Tegridy Pickle File Loader')
print('Loading the pickle file. Please wait...')
with open(input_file_name + ext, 'rb') as pickle_file:
content = pickle.load(pickle_file)
return content
###################################################################################
# TMIDI X Code is below
###################################################################################
def Optimus_MIDI_TXT_Processor(MIDI_file,
line_by_line_output=True,
chordify_TXT=False,
dataset_MIDI_events_time_denominator=1,
output_velocity=True,
output_MIDI_channels = False,
MIDI_channel=0,
MIDI_patch=[0, 1],
char_offset = 30000,
transpose_by = 0,
flip=False,
melody_conditioned_encoding=False,
melody_pitch_baseline = 0,
number_of_notes_to_sample = -1,
sampling_offset_from_start = 0,
karaoke=False,
karaoke_language_encoding='utf-8',
song_name='Song',
perfect_timings=False,
musenet_encoding=False,
transform=0,
zero_token=False,
reset_timings=False):
'''Project Los Angeles
Tegridy Code 2021'''
###########
debug = False
ev = 0
chords_list_final = []
chords_list = []
events_matrix = []
melody = []
melody1 = []
itrack = 1
min_note = 0
max_note = 0
ev = 0
patch = 0
score = []
rec_event = []
txt = ''
txtc = ''
chords = []
melody_chords = []
karaoke_events_matrix = []
karaokez = []
sample = 0
start_sample = 0
bass_melody = []
INTS = []
bints = 0
###########
def list_average(num):
sum_num = 0
for t in num:
sum_num = sum_num + t
avg = sum_num / len(num)
return avg
###########
#print('Loading MIDI file...')
midi_file = open(MIDI_file, 'rb')
if debug: print('Processing File:', file_address)
try:
opus = midi2opus(midi_file.read())
except:
print('Problematic MIDI. Skipping...')
print('File name:', MIDI_file)
midi_file.close()
return txt, melody, chords
midi_file.close()
score1 = to_millisecs(opus)
score2 = opus2score(score1)
# score2 = opus2score(opus) # TODO Improve score timings when it will be possible.
if MIDI_channel == 16: # Process all MIDI channels
score = score2
if MIDI_channel >= 0 and MIDI_channel <= 15: # Process only a selected single MIDI channel
score = grep(score2, [MIDI_channel])
if MIDI_channel == -1: # Process all channels except drums (except channel 9)
score = grep(score2, [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15])
#print('Reading all MIDI events from the MIDI file...')
while itrack < len(score):
for event in score[itrack]:
if perfect_timings:
if event[0] == 'note':
event[1] = round(event[1], -1)
event[2] = round(event[2], -1)
if event[0] == 'text_event' or event[0] == 'lyric' or event[0] == 'note':
if perfect_timings:
event[1] = round(event[1], -1)
karaokez.append(event)
if event[0] == 'text_event' or event[0] == 'lyric':
if perfect_timings:
event[1] = round(event[1], -1)
try:
event[2] = str(event[2].decode(karaoke_language_encoding, 'replace')).replace('/', '').replace(' ', '').replace('\\', '')
except:
event[2] = str(event[2]).replace('/', '').replace(' ', '').replace('\\', '')
continue
karaoke_events_matrix.append(event)
if event[0] == 'patch_change':
patch = event[3]
if event[0] == 'note' and patch in MIDI_patch:
if len(event) == 6: # Checking for bad notes...
eve = copy.deepcopy(event)
eve[1] = int(event[1] / dataset_MIDI_events_time_denominator)
eve[2] = int(event[2] / dataset_MIDI_events_time_denominator)
eve[4] = int(event[4] + transpose_by)
if flip == True:
eve[4] = int(127 - (event[4] + transpose_by))
if number_of_notes_to_sample > -1:
if sample <= number_of_notes_to_sample:
if start_sample >= sampling_offset_from_start:
events_matrix.append(eve)
sample += 1
ev += 1
else:
start_sample += 1
else:
events_matrix.append(eve)
ev += 1
start_sample += 1
itrack +=1 # Going to next track...
#print('Doing some heavy pythonic sorting...Please stand by...')
fn = os.path.basename(MIDI_file)
song_name = song_name.replace(' ', '_').replace('=', '_').replace('\'', '-')
if song_name == 'Song':
sng_name = fn.split('.')[0].replace(' ', '_').replace('=', '_').replace('\'', '-')
song_name = sng_name
# Zero token
if zero_token:
txt += chr(char_offset) + chr(char_offset)
if output_MIDI_channels:
txt += chr(char_offset)
if output_velocity:
txt += chr(char_offset) + chr(char_offset)
else:
txt += chr(char_offset)
txtc += chr(char_offset) + chr(char_offset)
if output_MIDI_channels:
txtc += chr(char_offset)
if output_velocity:
txtc += chr(char_offset) + chr(char_offset)
else:
txtc += chr(char_offset)
txt += '=' + song_name + '_with_' + str(len(events_matrix)-1) + '_notes'
txtc += '=' + song_name + '_with_' + str(len(events_matrix)-1) + '_notes'
else:
# Song stamp
txt += 'SONG=' + song_name + '_with_' + str(len(events_matrix)-1) + '_notes'
txtc += 'SONG=' + song_name + '_with_' + str(len(events_matrix)-1) + '_notes'
if line_by_line_output:
txt += chr(10)
txtc += chr(10)
else:
txt += chr(32)
txtc += chr(32)
#print('Sorting input by start time...')
events_matrix.sort(key=lambda x: x[1]) # Sorting input by start time
#print('Timings converter')
if reset_timings:
ev_matrix = Tegridy_Timings_Converter(events_matrix)[0]
else:
ev_matrix = events_matrix
chords.extend(ev_matrix)
#print(chords)
#print('Extracting melody...')
melody_list = []
#print('Grouping by start time. This will take a while...')
values = set(map(lambda x:x[1], ev_matrix)) # Non-multithreaded function version just in case
groups = [[y for y in ev_matrix if y[1]==x and len(y) == 6] for x in values] # Grouping notes into chords while discarting bad notes...
#print('Sorting events...')
for items in groups:
items.sort(reverse=True, | |
message.author != bot.user:
msg = message.content
if msg == "Within on stem.":
await asyncio.sleep(average_typing_speed * 3)
await message.channel.send("And dreadfully distinct.")
else:
pass
else:
pass
@bot.listen("on_message")
async def earth_diameter(message):
"""
"""
global average_typing_speed
if message.author != bot.user:
msg = message.content
if msg == "What is Earth's diameter in miles?" \
or msg == "What is the diameter of the Earth in miles?" \
or msg == "What is the Earth's diameter in miles?":
await asyncio.sleep(average_typing_speed * 2)
await message.channel.send("7,917.5 miles")
elif msg == "What is Earth's diameter in kilometres?" \
or msg == "What is the diameter of the Earth in kilometres?" \
or msg == "What is the Earth's diameter in kilometres?":
await asyncio.sleep(average_typing_speed * 2)
await message.channel.send("12,742 kilometres")
elif msg == "What is Earth's diameter in kilometers?" \
or msg == "What is the diameter of the Earth in kilometers?" \
or msg == "What is the Earth's diameter in kilometers?":
await asyncio.sleep(average_typing_speed * 2)
await message.channel.send("12,742 kilometers")
else:
pass
else:
pass
@bot.listen("on_message")
async def equal_rights(message):
"""
"""
global average_typing_speed
if message.author != bot.user:
msg = message.content
if any(name in msg for name in names) \
and "you want anything" in msg \
and "?" in msg:
await asyncio.sleep(average_typing_speed * 2)
await message.channel.send("Equal rights?")
else:
pass
else:
pass
@bot.listen("on_message")
async def fountain(message):
"""
"""
global average_typing_speed
if message.author != bot.user:
msg = message.content
if msg == "Have you ever seen the fountain in Lincoln center?" \
or msg == "Have you seen fountains out in the wild?" \
or msg == "Have you seen the Trevi fountain in Rome?" \
or msg == "What's it like when you have an orgasm?" \
or msg == "What's your favorite part of the moon?":
await asyncio.sleep(average_typing_speed)
await message.channel.send("Fountain.")
elif msg == "Have you read the Fountainhead?" \
or msg == "How did the white Fountain make you feel?" \
or msg == "Is it pure white?" \
or msg == "Is that a metaphor?":
await asyncio.sleep(average_typing_speed * 2)
await message.channel.send("White Fountain.")
else:
pass
else:
pass
@bot.listen("on_message")
async def rock_paper_scissors_prompt(message):
"""
"""
global average_typing_speed
if message.author != bot.user:
msg = message.content
if any(name in msg for name in names) \
and "paper" in msg \
and "rock" in msg \
and "scissors" in msg \
and "?" in msg:
await asyncio.sleep(average_typing_speed * 7)
await message.channel.send("On go, okay? One, two, three, go!")
else:
pass
else:
pass
@bot.listen("on_message")
async def rock_paper_scissors_response(message):
"""
"""
global average_typing_speed
if message.author != bot.user:
msg = message.content
possible_actions = [
"rock",
"paper",
"scissors"
]
deme_throw = random.choice(possible_actions)
if msg == deme_throw:
await asyncio.sleep(average_typing_speed * 7)
await message.channel.send(f"We both selected {msg}. It's a tie!")
elif msg == "rock":
if deme_throw == "scissors":
await asyncio.sleep(average_typing_speed * 5)
await message.channel.send("Rock smashes scissors! You win!")
else:
await asyncio.sleep(average_typing_speed * 5)
await message.channel.send("Paper covers rock! You lose.")
elif msg == "paper":
if deme_throw == "rock":
await asyncio.sleep(average_typing_speed * 5)
await message.channel.send("Paper covers rock! You win!")
else:
await asyncio.sleep(average_typing_speed * 5)
await message.channel.send("Scissors cuts paper! You lose.")
elif msg == "scissors":
if deme_throw == "paper":
await asyncio.sleep(average_typing_speed * 5)
await message.channel.send("Scissors cuts paper! You win!")
else:
await asyncio.sleep(average_typing_speed * 5)
await message.channel.send("Rock smashes scissors! You lose.")
else:
pass
else:
pass
@bot.listen("on_message")
async def hmm_response(message):
"""
This function listens of Deme's name, when asked in the form of a
question.
"""
global average_typing_speed
if message.author != bot.user:
msg = message.content
if msg == "Deme?":
await asyncio.sleep(average_typing_speed)
await message.channel.send("Hmm?")
else:
pass
else:
pass
@bot.listen("on_message")
async def i_dont_know(message):
"""
"""
global average_typing_speed
if message.author != bot.user:
msg = message.content
if any(name in msg for name in names) \
and "am I here" in msg \
and "?" in msg:
await asyncio.sleep(average_typing_speed * 8)
await message.channel.send("It's inherent to the programming of the matrix.")
else:
pass
else:
pass
@bot.listen("on_message")
async def interlinked(message):
"""
"""
global average_typing_speed
if message.author != bot.user:
msg = message.content
if msg == "Do they teach you how to feel finger to finger?" \
or msg == "Do you dream about being interlinked?" \
or msg == "Do you feel that there's a part of you that's missing?" \
or msg == "Do you like to connect to things?" \
or msg == "Do you long to have your heart interlinked?" \
or msg == "Have they left a place for you where you can dream?" \
or msg == "Have they let you feel heartbreak?" \
or msg == "What happens when that linkage is broken?" \
or msg == "What's it like to hold the hand of someone you love?" \
or msg == "What's it like to hold your child in your arms?" \
or msg == "What's it like to play with your dog?" \
or msg == """
When you're not performing your duties do they keep you in a little box?
""":
await asyncio.sleep(average_typing_speed)
await message.channel.send("Interlinked.")
else:
pass
else:
pass
@bot.listen("on_message")
async def mystery(message):
"""
This function listens for the initial phrase of the Baseline Test from
Blade Runner 2049.
"""
if message.author != bot.user:
msg = message.content
if msg == "Could you be any more cryptic?":
await asyncio.sleep(average_typing_speed * 6)
await message.channel.send("It's a mystery. You'll love it.")
else:
pass
else:
pass
@bot.listen("on_message")
async def no_resoonse(message):
"""
"""
global average_typing_speed
if message.author != bot.user:
msg = message.content
if any(name in msg for name in names) \
and "you marry me" in msg \
and "?" in msg:
await asyncio.sleep(average_typing_speed)
await message.channel.send("No.")
else:
pass
else:
pass
@bot.event
async def on_message(message):
"""
This function checks who sent the message.
"""
if message.author == bot.user:
return
else:
pass
@bot.event
async def on_ready():
"""
This function sends a message to the temrinal when Deme has finisshed
setting up with Discord.
"""
print(f"{bot.user} (Deme v0.0.0.01-48), at your service.")
@bot.listen("on_message")
async def phrasing_boom(message):
"""
This function listens for certain phrases (sexual innuendos).
Inspired by the adult animation Archer.
"""
global archer_phrasing_phrases, average_typing_speed
if message.author != bot.user:
msg = message.content
if any(phrase in msg for phrase in archer_phrasing_phrases):
phrasing_response = [
"Phrasing!",
"Phrasing! Boom!",
"So are we just done with 'phrasing'?"
]
response = random.choice(phrasing_response)
await asyncio.sleep(average_typing_speed * 3.33)
await message.channel.send(response)
else:
pass
else:
pass
@bot.listen("on_message")
async def a_tall_white_fountain(message):
"""
This function checks for one of the prompts from the Baseline Test from
Blade Runner 2049.
"""
global average_typing_speed
if message.author != bot.user:
msg = message.content
if msg == "Do you like fire, earth, air or water?" \
or msg == "Do you like skipping around in the water?" \
or msg == "When you were little did you ever fall into a Fountain?":
await asyncio.sleep(average_typing_speed * 4)
await message.channel.send("A Tall White Fountain.")
elif msg == "Against the dark." \
or msg == "How did the white Fountain make you feel?":
await asyncio.sleep(average_typing_speed * 5)
await message.channel.send("A tall white fountain played.")
else:
pass
else:
pass
@bot.listen("on_message")
async def stem(message):
"""
"""
global average_typing_speed
if message.author != bot.user:
msg = message.content
if msg == "Did you pick asparagus stems?" \
or msg == "Do you have a heart?" \
or msg == "Have you been to the source of a river?" \
or msg == "Have you ever been in a legal battle?" \
or msg == "Have you planted things in the ground?" \
or msg == "Is it a slang word for people's legs?" \
or msg == "What comes from something else?" \
or msg == "What did she look like?" \
or msg == "When's the first time you gave a flower to a girl?":
await asyncio.sleep(average_typing_speed)
await message.channel.send("Stem.")
else:
pass
else:
pass
@bot.listen("on_message")
async def style_guide_e125(message):
"""
"""
global average_typing_speed, names
if message.author != bot.user:
msg = message.content
if any(name in msg for name in names) \
and "you look up" in msg \
and "E125" in msg \
and "style guide" in msg \
and "?" in msg:
await asyncio.sleep(average_typing_speed * 15)
await message.channel.send("""
In PEP8, E125 refers to a continuation line with same indent as next logical line.
""")
elif any(name in | |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 9 14:10:25 2020.
@author: pielsticker
"""
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras.initializers import glorot_uniform
from tensorflow.python.keras import backend as K
#%%
class EmptyModel(models.Model):
"""Base Model class."""
def __init__(
self,
inputs,
outputs,
inputshape,
num_classes,
no_of_inputs=1,
name="New_Model",
):
"""
Intialize emppy keras model.
Aside from the inputs and outputs for the instantion of the
Model class from Keras, the EmptyModel class also gets as
paramters the input shape of the data, the no. of classes
of the labels as well as how many times the input shall be
used.
Parameters
----------
inputs : keras.Input object or list of keras.Input objects.
Inputs for the instantion of the Model class from Keras.
outputs : Outputs of the last layer.
Outputs for the instantion of the Model class from Keras.
inputshape : ndarray
Shape of the features of the training data set.
num_classes : ndarray
Shape of the labels of the training data set.
no_of_inputs : int, optional
Number of times the input shall be used in the Model.
The default is 1.
name : str, optional
Name of the model.
The default is 'New_Model'.
Returns
-------
None.
"""
self.inputshape = inputshape
self.num_classes = num_classes
self.no_of_inputs = no_of_inputs
super(EmptyModel, self).__init__(
inputs=inputs, outputs=outputs, name=name
)
def get_config(self):
"""
Overwrite get_config method.
For serialization, all input paramters of the model are added to
the get_config method from the keras.Model class.
Returns
-------
config : dict
Configuration of the model.
"""
# For serialization with 'custom_objects'
config = super(EmptyModel, self).get_config()
config["inputshape"] = self.inputshape
config["num_classes"] = self.num_classes
config["no_of_inputs"] = self.no_of_inputs
return config
class CustomMLP(EmptyModel):
"""A neural net with some hidden layers, but no convolutions."""
def __init__(self, inputshape, num_classes):
"""
Initialize model with dense and batch norm layers.
Parameters
----------
inputshape : ndarray
Shape of the features of the training data set.
num_classes : ndarray
Shape of the labels of the training data set.
Returns
-------
None.
"""
self.input_1 = layers.Input(shape=inputshape, name="input_1")
self.flatten_1 = layers.Flatten(name="flatten1")(self.input_1)
self.drop_1 = layers.Dropout(rate=0.5, name="drop_1")(self.flatten_1)
self.dense_1 = layers.Dense(
units=64, activation="relu", name="dense1"
)(self.drop_1)
self.batch_norm_1 = layers.BatchNormalization(name="batch_norm_1")(
self.dense_1
)
self.dense_2 = layers.Dense(
units=num_classes, activation="softmax", name="dense2"
)(self.batch_norm_1)
super(CustomMLP, self).__init__(
inputs=self.input_1,
outputs=self.dense_2,
inputshape=inputshape,
num_classes=num_classes,
name="Custom_MLP",
)
class ClassificationCNN(EmptyModel):
"""A CNN for XPS data with only one phase."""
def __init__(self, inputshape, num_classes):
"""
Initialize model with convolutional layers.
A CNN with three convolutional layers of different kernel size
at the beginning. Works well for learning across scales.
This is to be used for classification -> softmax activation in
the last layer.
Parameters
----------
inputshape : ndarray
Shape of the features of the training data set.
num_classes : ndarray
Shape of the labels of the training data set.
Returns
-------
None.
"""
self.input_1 = layers.Input(shape=inputshape)
self.conv_1_short = layers.Conv1D(
filters=4,
kernel_size=5,
strides=1,
padding="same",
activation="relu",
name="conv_1_short",
)(self.input_1)
self.conv_1_medium = layers.Conv1D(
filters=4,
kernel_size=10,
strides=1,
padding="same",
activation="relu",
name="conv_1_medium",
)(self.input_1)
self.conv_1_long = layers.Conv1D(
filters=4,
kernel_size=15,
strides=1,
padding="same",
activation="relu",
name="conv_1_long",
)(self.input_1)
sublayers = [self.conv_1_short, self.conv_1_medium, self.conv_1_long]
merged_sublayers = layers.concatenate(sublayers)
self.conv_2 = layers.Conv1D(
filters=4,
kernel_size=10,
strides=1,
padding="valid",
activation="relu",
name="conv_2",
)(merged_sublayers)
self.conv_3 = layers.Conv1D(
filters=10,
kernel_size=10,
strides=1,
padding="valid",
activation="relu",
name="conv_3",
)(self.conv_2)
self.average_pool_1 = layers.AveragePooling1D(name="average_pool_1")(
self.conv_3
)
self.flatten_1 = layers.Flatten(name="flatten1")(self.average_pool_1)
self.drop_1 = layers.Dropout(rate=0.2, name="drop_1")(self.flatten_1)
self.dense_1 = layers.Dense(
units=1000, activation="relu", name="dense_1"
)(self.drop_1)
self.dense_2 = layers.Dense(
units=num_classes, activation="softmax", name="dense_2"
)(self.dense_1)
no_of_inputs = len(sublayers)
super(ClassificationCNN, self).__init__(
inputs=self.input_1,
outputs=self.dense_2,
inputshape=inputshape,
num_classes=num_classes,
no_of_inputs=no_of_inputs,
name="ClassificationCNN",
)
class RegressionCNN(EmptyModel):
"""A CNN for XPS data with mixed phases."""
def __init__(self, inputshape, num_classes):
"""
Initialize model with convolutional layers.
A CNN with three convolutional layers of different kernel size
at the beginning. Works well for learning across scales.
This is to be used for regression on all labels. -> sigmoid
activation in the last layer.
"""
self.input_1 = layers.Input(shape=inputshape)
self.conv_1_short = layers.Conv1D(
filters=12,
kernel_size=5,
strides=1,
padding="same",
activation="relu",
name="conv_1_short",
)(self.input_1)
self.conv_1_medium = layers.Conv1D(
filters=12,
kernel_size=10,
strides=1,
padding="same",
activation="relu",
name="conv_1_medium",
)(self.input_1)
self.conv_1_long = layers.Conv1D(
filters=12,
kernel_size=15,
strides=1,
padding="same",
activation="relu",
name="conv_1_long",
)(self.input_1)
sublayers = [self.conv_1_short, self.conv_1_medium, self.conv_1_long]
merged_sublayers = layers.concatenate(sublayers)
self.conv_2 = layers.Conv1D(
filters=10,
kernel_size=5,
strides=1,
padding="valid",
activation="relu",
name="conv_2",
)(merged_sublayers)
self.conv_3 = layers.Conv1D(
filters=10,
kernel_size=5,
strides=1,
padding="valid",
activation="relu",
name="conv_3",
)(self.conv_2)
self.average_pool_1 = layers.AveragePooling1D(name="average_pool_1")(
self.conv_3
)
self.flatten_1 = layers.Flatten(name="flatten1")(self.average_pool_1)
self.drop_1 = layers.Dropout(rate=0.2, name="drop_1")(self.flatten_1)
self.dense_1 = layers.Dense(
units=4000, activation="relu", name="dense_1"
)(self.drop_1)
self.dense_2 = layers.Dense(
units=num_classes, activation="sigmoid", name="dense_2"
)(self.dense_1)
self.output_norm = layers.Lambda(
lambda x: x / tf.reshape(K.sum(x, axis=-1), (-1, 1)),
name="output_normalization",
)(self.dense_2)
no_of_inputs = len(sublayers)
super(RegressionCNN, self).__init__(
inputs=self.input_1,
outputs=self.output_norm,
inputshape=inputshape,
num_classes=num_classes,
no_of_inputs=no_of_inputs,
name="RegressionCNN",
)
class ClassificationCNN2D(EmptyModel):
"""
A CNN with three convolutional layers of different kernel size at
the beginning. Works well for learning across scales.
This is to be used for classification -> softmax activation in the
last layer.
2D model for e.g. MNIST.
"""
def __init__(self, inputshape, num_classes):
self.input_1 = layers.Input(shape=inputshape)
self.conv_1_short = layers.Conv2D(
filters=4,
kernel_size=5,
strides=1,
padding="same",
activation="relu",
name="conv_1_short",
)(self.input_1)
self.conv_1_medium = layers.Conv2D(
filters=4,
kernel_size=10,
padding="same",
activation="relu",
name="conv_1_medium",
)(self.input_1)
self.conv_1_long = layers.Conv2D(
filters=4,
kernel_size=15,
padding="same",
activation="relu",
name="conv_1_long",
)(self.input_1)
sublayers = [self.conv_1_short, self.conv_1_medium, self.conv_1_long]
merged_sublayers = layers.concatenate(sublayers)
self.conv_2 = layers.Conv2D(
filters=4,
kernel_size=5,
padding="valid",
activation="relu",
name="conv_2",
)(merged_sublayers)
self.average_pool_1 = layers.AveragePooling2D(name="average_pool_1")(
self.conv_2
)
self.flatten_1 = layers.Flatten(name="flatten1")(self.average_pool_1)
self.drop_1 = layers.Dropout(rate=0.2, name="drop_1")(self.flatten_1)
self.dense_1 = layers.Dense(
units=1000, activation="relu", name="dense_1"
)(self.drop_1)
self.dense_2 = layers.Dense(
units=num_classes, activation="softmax", name="dense_2"
)(self.dense_1)
no_of_inputs = len(sublayers)
super(ClassificationCNN2D, self).__init__(
inputs=self.input_1,
outputs=self.dense_2,
inputshape=inputshape,
num_classes=num_classes,
no_of_inputs=no_of_inputs,
name="ClassificationCNN2D",
)
### RESNET50 implementation ###
class IdentityBlock(models.Model):
"""Model implementing the IdentityBlock in ResNets."""
def __init__(self, filters, kernel_size_2, stage, block, strides=1):
"""
Initialize the layers.
IdentityBlock contain a main path (3 convolutional layers with
subsequent batch norm layers) and a shortcut path
without convolutional layers.
Parameters
----------
filters : tuple
Filter sizes for the 3 convolutional layer at main path.
The third filter is used in the shortcut path, too.
kernel_size_2 : int
The kernel size of the middle convolutional layer at
main path.
stage : int
Current stage label, used for generating layer names.
1, 2, ...
block : str
Current block label, used for generating layer names.
"a", "b", ...
strides : int , optional
Strides for the first convolutional layer in the block.
The default is 1.
Returns
-------
None.
"""
name = str(stage) + str(block) + "_ID"
super(IdentityBlock, self).__init__(name=name)
# Store filters
filter1, filter2, filter3 = filters
### Main Path ###
# Component 1
self.conv_1 = layers.Conv1D(
filters=filter1,
kernel_size=1,
strides=strides,
padding="valid",
kernel_initializer=glorot_uniform(seed=0),
name=name + "_conv1",
)
self.batch_1 = layers.BatchNormalization(axis=1, name=name + "_bn1")
# Component 2
self.conv_2 = layers.Conv1D(
filters=filter2,
kernel_size=kernel_size_2,
strides=strides,
padding="same",
kernel_initializer=glorot_uniform(seed=0),
name=name + "_conv2",
)
self.batch_2 = layers.BatchNormalization(axis=1, name=name + "_bn2")
# Component 3
self.conv_3 = layers.Conv1D(
filters=filter3,
kernel_size=1,
strides=strides,
padding="valid",
kernel_initializer=glorot_uniform(seed=0),
name=name + "_conv3",
)
self.batch_3 = layers.BatchNormalization(axis=1, name=name + "_bn3")
def call(self, x, training=False):
"""
Call the model on new inputs.
There is no convolution in the shortcut path.
Parameters
----------
x : A tensor or list of tensors.
Inputs for the forward pass.
training : bool, optional
Boolean or boolean scalar tensor, indicating whether to run
the model in training mode or inference mode.
Returns
-------
tensor or list of tensors
Model output.
"""
# Intermediate save of the input.
x_shortcut = x
### Main Path ###
x = self.conv_1(x)
x = self.batch_1(x, training=training)
x = tf.nn.relu(x)
x = self.conv_2(x)
x = self.batch_2(x, training=training)
x = tf.nn.relu(x)
x = self.conv_3(x)
x = self.batch_3(x, training=training)
# Final step: Add shortcut to main path.
x += x_shortcut
return tf.nn.relu(x)
class ConvBlock(models.Model):
"""Model implementing the ConvBlock in ResNets."""
def __init__(self, filters, kernel_size_2, stage, block, strides=2):
"""
Initialize the layers.
ConvBlocks contain a main path (3 convolutional layers with
subsequent batch norm layers) and a shortcut path
(3 convolutional layers with subsequent batch norm layers).
Parameters
----------
filters : tuple
Filter sizes for the 3 convolutional layer at main path.
The third filter is used in the shortcut path, too.
kernel_size_2 : int
The kernel size of the middle convolutional layer at
main path.
stage : int
| |
<gh_stars>0
"""
This file reads in a pygsti dataset file and converts it to a valid
OpenQL sequence. FIXME: copy/paste error
"""
from os.path import join
import numpy as np
from pycqed.measurement.randomized_benchmarking import \
randomized_benchmarking as rb
from pycqed.measurement.openql_experiments import openql_helpers as oqh
from pycqed.measurement.randomized_benchmarking.two_qubit_clifford_group \
import SingleQubitClifford, TwoQubitClifford, common_cliffords
def randomized_benchmarking(qubits: list, platf_cfg: str,
nr_cliffords, nr_seeds: int,
net_cliffords: list=[0],
max_clifford_idx: int=11520,
flux_codeword: str='cz',
simultaneous_single_qubit_RB=False,
initialize: bool=True,
interleaving_cliffords=[None],
program_name: str='randomized_benchmarking',
cal_points: bool=True,
f_state_cal_pts: bool=True,
sim_cz_qubits: list = None,
recompile: bool=True):
'''
Input pars:
qubits: list of ints specifying qubit indices.
based on the length this function detects if it should
generate a single or two qubit RB sequence.
platf_cfg: filename of the platform config file
nr_cliffords: list nr_cliffords for which to generate RB seqs
nr_seeds: int nr_seeds for which to generate RB seqs
net_cliffords: list of ints index of net clifford the sequence
should perform. See examples below on how to use this.
Important clifford indices
0 -> Idx
3 -> rx180
3*24+3 -> {rx180 q0 | rx180 q1}
4368 -> CZ
max_clifford_idx: Set's the maximum clifford group index from which
to sample random cliffords.
Important clifford indices
24 -> Size of the single qubit Cl group
576 -> Size of the single qubit like class
contained in the two qubit Cl group
11520 -> Size of the complete two qubit Cl group
initialize: if True initializes qubits to 0, disable for restless
tuning
interleaving_cliffords: list of integers which specifies which cliffords
to interleave the sequence with (for interleaved RB)
program_name: some string that can be used as a label.
cal_points: bool whether to replace the last two elements with
calibration points, set to False if you want
to measure a single element (for e.g. optimization)
sim_cz_qubits:
A list of qubit indices on which a simultaneous cz
instruction must be applied. This is for characterizing
CZ gates that are intended to be performed in parallel
with other CZ gates.
recompile: True -> compiles the program,
'as needed' -> compares program to timestamp of config
and existence, if required recompile.
False -> compares program to timestamp of config.
if compilation is required raises a ValueError
If the program is more recent than the config
it returns an empty OpenQL program object with
the intended filename that can be used to upload the
previously compiled file.
Returns:
p: OpenQL Program object
***************************************************************************
Examples:
1. Single qubit randomized benchmarking:
p = cl_oql.randomized_benchmarking(
qubits=[0],
nr_cliffords=[2, 4, 8, 16, 32, 128, 512, 1024],
nr_seeds=1, # for CCL memory reasons
platf_cfg=qubit.cfg_openql_platform_fn(),
program_name='RB_{}'.format(i))
2. Two qubit simultaneous randomized benchmarking:
p = cl_oql.randomized_benchmarking(
qubits=[0, 1], # simultaneous RB on both qubits
simultaneous_single_qubit_RB=True,
nr_cliffords=[2, 4, 8, 16, 32, 128, 512, 1024],
nr_seeds=1, # for CCL memory reasons
platf_cfg=qubit.cfg_openql_platform_fn(),
program_name='RB_{}'.format(i))
3. Single qubit interleaved randomized benchmarking:
p = cl_oql.randomized_benchmarking(
qubits=[0],
interleaving_cliffords=[None, 0, 16, 3],
cal_points=False # relevant here because of data binning
nr_cliffords=[2, 4, 8, 16, 32, 128, 512, 1024],
nr_seeds=1,
platf_cfg=qubit.cfg_openql_platform_fn(),
program_name='Interleaved_RB_s{}_int{}_ncl{}_{}'.format(i))
'''
p = oqh.create_program(program_name, platf_cfg)
# attribute get's added to program to help finding the output files
p.filename = join(p.output_dir, p.name + '.qisa') # FIXME: platform dependency
if not oqh.check_recompilation_needed(
program_fn=p.filename, platf_cfg=platf_cfg, recompile=recompile):
return p
if len(qubits) == 1:
qubit_map = {'q0': qubits[0]}
number_of_qubits = 1
Cl = SingleQubitClifford
elif len(qubits) == 2 and not simultaneous_single_qubit_RB:
qubit_map = {'q0': qubits[0],
'q1': qubits[1]}
number_of_qubits = 2
Cl = TwoQubitClifford
elif len(qubits) == 2 and simultaneous_single_qubit_RB:
qubit_map = {'q0': qubits[0],
'q1': qubits[1]}
# arguments used to generate 2 single qubit sequences
number_of_qubits = 2
Cl = SingleQubitClifford
else:
raise NotImplementedError()
for seed in range(nr_seeds):
for j, n_cl in enumerate(nr_cliffords):
for interleaving_cl in interleaving_cliffords:
if not simultaneous_single_qubit_RB:
cl_seq = rb.randomized_benchmarking_sequence(
n_cl, number_of_qubits=number_of_qubits,
desired_net_cl=None, # net_clifford,
max_clifford_idx=max_clifford_idx,
interleaving_cl=interleaving_cl
)
net_cl_seq = rb.calculate_net_clifford(cl_seq, Cl)
cl_seq_decomposed = []
for cl in cl_seq:
# FIXME: hacking in exception for benchmarking only CZ
# (not as a member of CNOT group)
if cl == -4368:
cl_seq_decomposed.append([('CZ', ['q0', 'q1'])])
else:
cl_seq_decomposed.append(Cl(cl).gate_decomposition)
for net_clifford in net_cliffords:
recovery_to_idx_clifford = net_cl_seq.get_inverse()
recovery_clifford = Cl(
net_clifford)*recovery_to_idx_clifford
cl_seq_decomposed_with_net = cl_seq_decomposed + \
[recovery_clifford.gate_decomposition]
k = oqh.create_kernel('RB_{}Cl_s{}_net{}_inter{}'.format(
int(n_cl), seed, net_clifford, interleaving_cl), p)
if initialize:
for qubit_idx in qubit_map.values():
k.prepz(qubit_idx)
for gates in cl_seq_decomposed_with_net:
for g, q in gates:
if isinstance(q, str):
k.gate(g, [qubit_map[q]])
elif isinstance(q, list):
if sim_cz_qubits is None:
k.gate("wait", list(qubit_map.values()), 0)
k.gate(flux_codeword, list(qubit_map.values()),) # fix for QCC
k.gate("wait", list(qubit_map.values()), 0)
else:
# A simultaneous CZ is applied to characterize cz gates that
# have been calibrated to be used in parallel.
k.gate("wait", list(qubit_map.values())+sim_cz_qubits, 0)
k.gate(flux_codeword, list(qubit_map.values()),) # fix for QCC
k.gate(flux_codeword, sim_cz_qubits) # fix for QCC
k.gate("wait", list(qubit_map.values())+sim_cz_qubits, 0)
# FIXME: This hack is required to align multiplexed RO in openQL..
k.gate("wait", list(qubit_map.values()), 0)
for qubit_idx in qubit_map.values():
k.measure(qubit_idx)
k.gate("wait", list(qubit_map.values()), 0)
p.add_kernel(k)
elif simultaneous_single_qubit_RB:
for net_clifford in net_cliffords:
k = oqh.create_kernel('RB_{}Cl_s{}_net{}_inter{}'.format(
int(n_cl), seed, net_clifford, interleaving_cl), p)
if initialize:
for qubit_idx in qubit_map.values():
k.prepz(qubit_idx)
# FIXME: Gate seqs is a hack for failing openql scheduling
gate_seqs = [[], []]
for gsi, q_idx in enumerate(qubits):
cl_seq = rb.randomized_benchmarking_sequence(
n_cl, number_of_qubits=1,
desired_net_cl=net_clifford,
interleaving_cl=interleaving_cl)
for cl in cl_seq:
gates = Cl(cl).gate_decomposition
# for g, q in gates:
# k.gate(g, q_idx)
# FIXME: THIS is a hack because of OpenQL
# scheduling issues #157
gate_seqs[gsi] += gates
# OpenQL #157 HACK
l = max([len(gate_seqs[0]), len(gate_seqs[1])])
for gi in range(l):
for gj, q_idx in enumerate(qubits):
# gj = 0
# q_idx = 0
try: # for possible different lengths in gate_seqs
g = gate_seqs[gj][gi]
k.gate(g[0], [q_idx])
except IndexError as e:
pass
# end of #157 HACK
# FIXME: This hack is required to align multiplexed RO in openQL..
k.gate("wait", list(qubit_map.values()), 0)
for qubit_idx in qubit_map.values():
k.measure(qubit_idx)
k.gate("wait", list(qubit_map.values()), 0)
p.add_kernel(k)
if cal_points:
if number_of_qubits == 1:
p = oqh.add_single_qubit_cal_points(
p, qubit_idx=qubits[0],
f_state_cal_pts=f_state_cal_pts)
elif number_of_qubits == 2:
if f_state_cal_pts:
combinations = ['00', '01', '10', '11', '02', '20', '22']
else:
combinations = ['00', '01', '10', '11']
p = oqh.add_multi_q_cal_points(p, qubits=qubits,
combinations=combinations)
p = oqh.compile(p)
return p
def character_benchmarking(
qubits: list, platf_cfg: str,
nr_cliffords, nr_seeds: int,
interleaving_cliffords=[None],
program_name: str='character_benchmarking',
cal_points: bool=True, f_state_cal_pts: bool=True,
flux_codeword='cz',
recompile: bool=True):
"""
Create OpenQL program to perform two-qubit character benchmarking.
Character benchmarking is described in:
https://arxiv.org/abs/1808.00358 (theory)
https://arxiv.org/abs/1811.04002 (implementation in Si/SiGe spins)
Two-qubit character benchmarking:
q0: P - C1 - C2 - C3 - ... - Cn- R - M
q1: P - C1 - C2 - C3 - ... - Cn- R - M
P -> Single qubit Pauli's. Single qubit Paulis are chosen so as to
prepare in |00>, |01>, |10> and |11>.
N.B. data should be averaged over all single qubit Paulis.
Ci -> Single qubit Cliffords, different seqs for both qubits.
R -> Recovery Clifford so that seq of C1 - Cn correspond to Idx.
M -> Measurement in Z-basis.
Outcomes should be averaged according to the "character function".
Averaging scheme:
seeds (average over different randomizations)
nr of cliffords (peform for different nr of cliffords)
paulis (perform for different Paulis)
"""
assert len(qubits) == 2
p = oqh.create_program(program_name, platf_cfg)
# attribute get's added to program to help finding the output files
p.filename = join(p.output_dir, p.name + '.qisa')
if not oqh.check_recompilation_needed(
program_fn=p.filename, platf_cfg=platf_cfg, recompile=recompile):
return p
qubit_map = {'q0': qubits[0], 'q1': qubits[1]}
Cl = TwoQubitClifford
paulis = {'00': ['II', 'IZ', 'ZI', 'ZZ'],
'01': ['IX', 'IY', 'ZX', 'ZY'],
'10': ['XI', 'XZ', 'YI', 'YZ'],
'11': ['XX', 'XY', 'YX', 'YY']}
for seed in range(nr_seeds):
for j, n_cl in enumerate(nr_cliffords):
for interleaving_cl in interleaving_cliffords:
cl_seq = rb.randomized_benchmarking_sequence(
n_cl, number_of_qubits=2,
desired_net_cl=0, # desired to do identity
max_clifford_idx=567,
# The benchmarking group is the single qubit Clifford group
# for two qubits this corresponds to all single qubit like
# Cliffords.
interleaving_cl=interleaving_cl)
cl_seq_decomposed = []
# first element not included in decomposition because it will
# be merged with the character paulis
for cl in cl_seq[1:]:
# hacking in exception for benchmarking only CZ
# (not as a member of | |
<reponame>Lingfeng-Wei/smart
import smart
import numpy as np
import sys, os, os.path, time
from astropy.table import Table
from astropy.io import fits
from numpy.linalg import inv, det
from ..utils.interpolations import trilinear_interpolation
##############################################################################################################
def InterpModel(teff, logg=4, metal=0, alpha=0, modelset='phoenix-aces-agss-cond-2011', instrument='nirspec', order='O33'):
#print('Parameters', teff, logg, modelset, instrument, order)
FULL_PATH = os.path.realpath(__file__)
BASE, NAME = os.path.split(FULL_PATH)
# Check the model set and instrument
if instrument.lower() == 'nirspec':
path = BASE + '/../libraries/%s/%s-O%s/'%(smart.ModelSets[modelset.lower()], instrument.upper(), order.upper())
else:
path = BASE + '/../libraries/%s/%s-%s/'%(smart.ModelSets[modelset.lower()], instrument.upper(), order.upper())
Gridfile = BASE + '/../libraries/%s/%s_gridparams.csv'%(smart.ModelSets[modelset.lower()], smart.ModelSets[modelset.lower()])
if modelset.lower() == 'btsettl08':
path = BASE + '/../libraries/btsettl08/NIRSPEC-O%s-RAW/'%order
Gridfile = BASE + '/../libraries/btsettl08/btsettl08_gridparams.csv'
# Read the grid file
T1 = Table.read(Gridfile)
###################################################################################
def GetModel(temp, wave=False, **kwargs):
logg = kwargs.get('logg', 4.5)
metal = kwargs.get('metal', 0)
alpha = kwargs.get('alpha', 0)
gridfile = kwargs.get('gridfile', None)
instrument = kwargs.get('instrument', 'nirspec')
order = kwargs.get('order', None)
#print(temp, logg, metal, alpha)
if gridfile is None:
raise ValueError('Model gridfile must be provided.')
if modelset.lower() == 'btsettl08':
filename = 'btsettl08_t'+ str(int(temp.data[0])) + '_g' + '{0:.2f}'.format(float(logg)) + '_z-' + '{0:.2f}'.format(float(metal)) + '_en' + '{0:.2f}'.format(float(alpha)) + '_NIRSPEC-O' + str(order) + '-RAW.txt'
elif modelset.lower() == 'sonora':
if instrument.lower() == 'nirspec':
filename = '%s'%smart.ModelSets[modelset.lower()] + '_t{0:03d}'.format(int(temp.data[0])) + '_g{0:.2f}'.format(float(logg)) + '_FeH0.00_Y0.28_CO1.00' + '_%s-O%s.fits'%(instrument.upper(), order.upper())
else:
filename = '%s'%smart.ModelSets[modelset.lower()] + '_t{0:03d}'.format(int(temp.data[0])) + '_g{0:.2f}'.format(float(logg)) + '_FeH0.00_Y0.28_CO1.00' + '_%s-%s.fits'%(instrument.upper(), order.upper())
elif modelset.lower() == 'marcs-apogee-dr15':
cm = kwargs.get('cm', 0)
nm = kwargs.get('nm', 0)
filename = '%s'%smart.ModelSets[modelset.lower()] + '_t{0:03d}'.format(int(temp.data[0])) + '_g{0:.2f}'.format(float(logg)) + '_z{0:.2f}'.format(float(metal)) + '_en{0:.2f}'.format(float(alpha)) + '_cm{0:.2f}'.format(float(cm)) + '_nm{0:.2f}'.format(float(nm)) + '_%s-%s.fits'%(instrument.upper(), order.upper())
else:
filename = '%s'%smart.ModelSets[modelset.lower()] + '_t{0:03d}'.format(int(temp.data[0])) + '_g{0:.2f}'.format(float(logg)) + '_z{0:.2f}'.format(float(metal)) + '_en{0:.2f}'.format(float(alpha)) + '_%s-%s.fits'%(instrument.upper(), order.upper())
# Read in the model FITS file
if modelset.lower() == 'btsettl08':
Tab = Table.read(path+filename, format='ascii.tab', names=['wave', 'flux'])
else:
Tab = Table.read(path+filename)
# Return the model (wave of flux)
if wave:
return Tab['wave']
else:
return Tab['flux']
###################################################################################
# Check if the model already exists (grid point)
if modelset.lower() == 'sonora':
if (teff, logg) in zip(T1['teff'], T1['logg']):
metal, ys = 0, 0.28
index0 = np.where( (T1['teff'] == teff) & (T1['logg'] == logg) & (T1['FeH'] == metal) & (T1['Y'] == ys) )
#flux2 = GetModel(T1['teff'][index0], T1['logg'][index0], T1['M_H'][index0], modelset=modelset )
#waves2 = GetModel(T1['teff'][index0], T1['logg'][index0], T1['M_H'][index0], modelset=modelset, wave=True)
flux2 = GetModel(T1['teff'][index0], logg=T1['logg'][index0], metal=T1['FeH'][index0], alpha=T1['Y'][index0], instrument=instrument, order=order, gridfile=T1)
waves2 = GetModel(T1['teff'][index0], logg=T1['logg'][index0], metal=T1['FeH'][index0], alpha=T1['Y'][index0], instrument=instrument, order=order, gridfile=T1, wave=True)
return waves2, flux2
else:
if (teff, logg, metal, alpha) in zip(T1['teff'], T1['logg'], T1['M_H'], T1['en']):
index0 = np.where( (T1['teff'] == teff) & (T1['logg'] == logg) & (T1['M_H'] == metal) & (T1['en'] == alpha) )
#flux2 = GetModel(T1['teff'][index0], T1['logg'][index0], T1['M_H'][index0], modelset=modelset )
#waves2 = GetModel(T1['teff'][index0], T1['logg'][index0], T1['M_H'][index0], modelset=modelset, wave=True)
flux2 = GetModel(T1['teff'][index0], logg=T1['logg'][index0], metal=T1['M_H'][index0], alpha=T1['en'][index0], instrument=instrument, order=order, gridfile=T1)
waves2 = GetModel(T1['teff'][index0], logg=T1['logg'][index0], metal=T1['M_H'][index0], alpha=T1['en'][index0], instrument=instrument, order=order, gridfile=T1, wave=True)
return waves2, flux2
try:
if modelset.lower() == 'sonora':
metal, alpha = 0, 0.28
# Get the nearest models to the gridpoint (teff)
x0 = np.max(T1['teff'][np.where(T1['teff'] <= teff)])
x1 = np.min(T1['teff'][np.where(T1['teff'] >= teff)])
#print(x0, x1)
# Get the nearest grid point to logg
y0 = np.max(list(set(T1['logg'][np.where( (T1['teff'] == x0) & (T1['logg'] <= logg) )]) &
set(T1['logg'][np.where( (T1['teff'] == x1) & (T1['logg'] <= logg) )])))
y1 = np.min(list(set(T1['logg'][np.where( (T1['teff'] == x0) & (T1['logg'] >= logg) )]) &
set(T1['logg'][np.where( (T1['teff'] == x1) & (T1['logg'] >= logg) )])))
#print(y0, y1)
# Get the nearest grid point to [M/H]
z0 = np.max(list(set(T1['FeH'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['FeH'] <= metal) )]) &
set(T1['FeH'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) & (T1['FeH'] <= metal) )])))
z1 = np.min(list(set(T1['FeH'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['FeH'] >= metal) )]) &
set(T1['FeH'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) & (T1['FeH'] >= metal) )])))
#print(z0, z1)
# Get the nearest grid point to Alpha
t0 = np.max(list(set(T1['Y'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['FeH'] == z0) & (T1['Y'] <= alpha) )]) &
set(T1['Y'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) & (T1['FeH'] == z1) & (T1['Y'] <= alpha) )])))
t1 = np.min(list(set(T1['Y'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['FeH'] == z0) & (T1['Y'] >= alpha) )]) &
set(T1['Y'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) & (T1['FeH'] == z1) & (T1['Y'] >= alpha) )])))
#print(t0, t1)
else:
# Get the nearest models to the gridpoint (teff)
x0 = np.max(T1['teff'][np.where(T1['teff'] <= teff)])
x1 = np.min(T1['teff'][np.where(T1['teff'] >= teff)])
#print('teff:', x0, teff, x1)
# Get the nearest grid point to logg
y0 = np.max(list(set(T1['logg'][np.where( (T1['teff'] == x0) & (T1['logg'] <= logg) )]) &
set(T1['logg'][np.where( (T1['teff'] == x1) & (T1['logg'] <= logg) )])))
y1 = np.min(list(set(T1['logg'][np.where( (T1['teff'] == x0) & (T1['logg'] >= logg) )]) &
set(T1['logg'][np.where( (T1['teff'] == x1) & (T1['logg'] >= logg) )])))
#print('logg:', y0, logg, y1)
# Get the nearest grid point to [M/H]
#print(metal)
#print(list(set(T1['M_H'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) )])))
#print(list(set(T1['M_H'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) )])))
#print(list(set(T1['M_H'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['M_H'] <= metal))])))
#print(list(set(T1['M_H'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) & (T1['M_H'] <= metal))])))
#print(list(set(T1['M_H'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['M_H'] >= metal))])))
#print(list(set(T1['M_H'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) & (T1['M_H'] >= metal))])))
z0 = np.max(list(set(T1['M_H'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['M_H'] <= metal) )]) &
set(T1['M_H'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) & (T1['M_H'] <= metal) )])))
z1 = np.min(list(set(T1['M_H'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['M_H'] >= metal) )]) &
set(T1['M_H'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) & (T1['M_H'] >= metal) )])))
#print('metal:', z0, metal, z1)
# Get the nearest grid point to Alpha
#print(list(set(T1['en'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['M_H'] == z0) )])))
#print(list(set(T1['en'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) & (T1['M_H'] == z1) )])))
#print(list(set(T1['en'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['M_H'] == z0) & (T1['en'] <= alpha) )])))
#print(list(set(T1['en'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) & (T1['M_H'] == z1) & (T1['en'] <= alpha) )])))
#print(list(set(T1['en'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['M_H'] == z0) & (T1['en'] >= alpha) )])))
#print(list(set(T1['en'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) & (T1['M_H'] == z1) & (T1['en'] >= alpha) )])))
t0 = np.max(list(set(T1['en'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['M_H'] == z0) & (T1['en'] <= alpha) )]) &
set(T1['en'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) & (T1['M_H'] == z1) & (T1['en'] <= alpha) )])))
t1 = np.min(list(set(T1['en'][np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['M_H'] == z0) & (T1['en'] >= alpha) )]) &
set(T1['en'][np.where( (T1['teff'] == x1) & (T1['logg'] == y1) & (T1['M_H'] == z1) & (T1['en'] >= alpha) )])))
#print('alpha:', z0, alpha, z1)
except:
raise ValueError('Model Parameters Teff: %0.3f, logg: %0.3f, [M/H]: %0.3f, Alpha: %0.3f are outside the model grid.'%(teff, logg, metal, alpha))
if modelset.lower() == 'sonora':
# Get the 16 points
ind0000 = np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['FeH'] == z0) & (T1['Y'] == t0) ) # 0000
ind1000 = np.where( (T1['teff'] == x1) & (T1['logg'] == y0) & (T1['FeH'] == z0) & (T1['Y'] == t0) ) # 1000
ind0100 = np.where( (T1['teff'] == x0) & (T1['logg'] == y1) & (T1['FeH'] == z0) & (T1['Y'] == t0) ) # 0100
ind0010 = np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['FeH'] == z1) & (T1['Y'] == t0) ) # 0010
ind0001 = np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['FeH'] == z0) & (T1['Y'] == t1) ) # 0001
ind1001 = np.where( (T1['teff'] == x1) & (T1['logg'] == y0) & (T1['FeH'] == z0) & (T1['Y'] == t1) ) # 1001
ind0101 = np.where( (T1['teff'] == x0) & (T1['logg'] == y1) & (T1['FeH'] == z0) & (T1['Y'] == t1) ) # 0101
ind0011 = np.where( (T1['teff'] == x0) & (T1['logg'] == y0) & (T1['FeH'] == z1) & (T1['Y'] == t1) ) # 0011
ind1011 = | |
<filename>linear_model/model_pick/random_forest/triplets_wei.py<gh_stars>0
import copy
import itertools
import re
import sys
from difflib import SequenceMatcher
import numpy as np
import pandas as pd
import scipy
import statsmodels.formula.api as sm
global tax_thr
tax_thr = 0
class Graph:
def __init__(self):
self.nodes = []
self.len = 0
self.wh_len = 0
def add_node(self, n):
present = self.find(n)
if present is None:
self.nodes.append(Node(n))
self.nodes[-1].index = len(self.nodes) - 1
self.len += 1
self.wh_len += 1
return (self.nodes[-1])
else:
return (present)
def find(self, name):
for n in self.nodes:
if n.name == name:
return (n)
return (None)
def add_edge(self, name1, name2):
N = [name1, name2]
present = [self.find(el) for el in N]
for i in range(2):
if present[i] is None:
new_node = self.add_node(N[i])
present[i] = new_node
a = self.find(name1)
b = self.find(name2)
a.neigh.append(b)
b.neigh.append(a)
def add_weighted_edge(self, i, j, pairG):
if (pairG.nodes[i] in pairG.nodes[j].neigh):
self.nodes[i].neigh += [self.nodes[j]]
self.nodes[j].neigh += [self.nodes[i]]
self.nodes[j].neigh = list(set(self.nodes[j].neigh))
self.nodes[i].neigh = list(set(self.nodes[i].neigh))
self.nodes[j].weight[self.nodes[i]] += 1
self.nodes[i].weight[self.nodes[j]] += 1
class Node:
def __init__(self, name):
self.name = name
self.neigh = []
self.color = 'white'
self.index = None
self.weight = {}
# No dots in dimnames!
# in: data with meta_data on pair graph (practically, an edge list), path to counts table
# out: edges list for truplets, table with triple models parameters
def reduce_to_triplets(fstats, counts):
##################################
# Takes in a meta-df for double models!
def table_to_graph(df):
G = Graph()
for i in range(1, len(df)):
edge = df.iloc[i]
G.add_edge(edge[0], edge[1])
return (G)
def tr_neighs(root):
closest = [list(x) for x in itertools.combinations(root.neigh, 2)]
far = {k: k.neigh for k in root.neigh if k.neigh != root}
temp = []
for el in far:
for i in far[el]:
temp.append([el] + [i])
all = closest + temp
all = [el for el in all if (root not in el)]
all = [el + [root] for el in all]
return (all)
def remove_zeroes(df):
# ((df.T == 2).all() == False).all()
# is true, if no lines have zero values
if not ((df.T == 0).all() == False).all():
# Remember one zero line
zero_line = df[(df.T == 0).all()].iloc[0]
# Now remove all zeros
df = df[(df.T != 0).any()]
# And add our zero-line
df.append(zero_line)
return (df)
def md_remove_outliers(df):
inv_cov = df.cov().as_matrix()
means = df.mean().as_matrix()
md = df.apply((lambda x: scipy.spatial.distance.mahalanobis(x, means, inv_cov)), axis=1)
# Q =scipy.stats.chi2.cdf(md, df.shape[1])
Q = scipy.stats.chi2.ppf(0.975, df.shape[1])
df = df[md < Q]
return (df)
def remove_outliers(df):
q = df.quantile([0.025, 0.975])
filt_df = df[(df.iloc[:, 0] > q.iloc[0, 0]) &
(df.iloc[:, 1] > q.iloc[0, 1]) &
(df.iloc[:, 2] > q.iloc[0, 2]) &
(df.iloc[:, 0] < q.iloc[1, 0]) &
(df.iloc[:, 1] < q.iloc[1, 1]) &
(df.iloc[:, 2] < q.iloc[1, 2])]
return (filt_df)
all_models = pd.DataFrame([np.nan] * 6,
index=['Response', 'Predictor1', 'Predictor2', 'Coef_p1', 'Coef_p2', 'Intercept']).T
all_models.drop(0, 0)
# Col: species; Row: samples
do = table_to_graph(fstats)
# Recursion depth issue
# tr = copy.deepcopy(do)
tr = table_to_graph(fstats)
# Add zero weight to all edges
# Later this weight will be showing the number of 3-models
# with this particular pair of taxons
# Then erase all edges, as we are making a new graph,
# although containing all the vertices for double graph
for n in tr.nodes:
n.weight = {x: 0 for x in n.neigh}
n.neigh = []
print('Nodes in pair graph: ' + str(do.len))
# For each node see all possible triplets that contain it
# Then check if corresponding triplet linear models are better than pair-models
# If they are --
computed = []
for i in range(do.len):
sets = tr_neighs(do.nodes[i])
# Get IDs of all vertices in all triplets to quickly look them up in our graph
temp = copy.copy(sets)
# Remove set if it has 1 black vertex
# Black vertex means all triplets containing have been accounted for previously
for el in temp:
colors = [j.color for j in el]
if 'black' in colors:
sets.remove(el)
# zip(set_indices, set_names)
indices_and_names = [([el.index for el in j],[el.name for el in j]) for j in sets]
# Now calculate models for the sets
# for ind, na in zip(set_indices, set_names):
for ind, na in indices_and_names:
pairs = list(itertools.permutations([na[0],na[1],na[2]], 2))
# Make sure there are no dublicates
test = any([x[0] == x[1] for x in pairs])
taxons = ['s__','g__', 'f__', 'o__', 'c__', 'p__']
tax_name = taxons[tax_thr]
if not(test):
for pair in pairs:
temp = SequenceMatcher(None, pair[0],pair[1])
temp = temp.find_longest_match(0, len(pair[0]), 0, len(pair[1]))
lcs = pair[0][temp[0]:(temp[0] + temp[2])]
if (tax_name in lcs):
test = True
if test:
continue
# How this line even works?
# Is counts a df? Or what?
temp = counts[[na[0], na[1], na[2]]]
temp = remove_zeroes(temp)
temp = remove_outliers(temp)
orders = [[na[i]] + [p for p in na if p != na[i]] for i in range(len(na))]
for order in orders:
resp = order[0]
pred1 = order[1]
pred2 = order[2]
text = resp + ' ~ ' + pred1 + ' + ' + pred2
if ([resp,pred1,pred2] in computed) or ([resp,pred2,pred1] in computed):
continue
computed.append(order)
if not (temp.empty):
model = sm.ols(formula=text, data=temp).fit()
# Pick a threshold
# First get all F-stats for all 6 possible pair models within a triplet
sub_meta = fstats[
((fstats['Response'] == na[0]) & (fstats['Predictor'] == na[1])) |
((fstats['Response'] == na[0]) & (fstats['Predictor'] == na[2])) |
((fstats['Response'] == na[1]) & (fstats['Predictor'] == na[2])) |
((fstats['Response'] == na[1]) & (fstats['Predictor'] == na[0])) |
((fstats['Response'] == na[2]) & (fstats['Predictor'] == na[0])) |
((fstats['Response'] == na[2]) & (fstats['Predictor'] == na[1]))
]
# Now pick the smallest one
# It is now your threshold for letting the triplet model in
cut = min(sub_meta.ix[:, 'F_Stat'])
if model.f_pvalue < cut:
new_row = pd.DataFrame(
[resp, pred1, pred2, model.f_pvalue, model.params[pred1], model.params[pred2], model.params['Intercept']],
index=['Response', 'Predictor1', 'Predictor2', 'F_Stat', 'Coef_p1', 'Coef_p2', 'Intercept']).T
all_models = all_models.append(new_row)
poss_edges = itertools.combinations(ind, 2)
# Add weights to our graph
for el in poss_edges:
tr.add_weighted_edge(el[0], el[1], do)
# Finally, paint the seed vertex black: we are not coming back here
do.nodes[i].color = 'black'
# Remove zero-neighbor vertices
tr.nodes = [el for el in tr.nodes if len(el.neigh) > 1]
# Remove zero-weight edges from graph
for el in tr.nodes:
el.weight = {x: y for x, y in el.weight.items() if y > 0}
return (tr, all_models)
# in: graph for triplets
# out: adjacency table w/out weights
def turn_3_graph_to_adj_table(graph):
adj = []
for n in graph.nodes:
adj.append([n.name])
for el in n.neigh:
adj[-1] += [el.name]
return (adj)
# in: graph for triplets
# out: list of edges with weights
def turn_3_graph_to_edge_list(graph):
edges = []
for el in graph.nodes:
edges += [sorted([el.name, i.name]) + [el.weight[i]] for i in el.neigh]
temp = [i[0] + '\t' + i[1] + '\t' + str(i[2]) for i in edges]
temp = sorted(list(set(temp)))
return (temp)
def return_tax_names(ID_to_taxon, profiles):
tax_code = {}
profiles = pd.read_table(profiles, header=0, index_col=0, sep='\t', engine = 'python')
with open(ID_to_taxon) as f:
for line in f.read().splitlines():
temp = line.split()
# tax ID is in the 1st column tax name -- 2nd
tax_code[temp[0]] = temp[1]
profiles.index = [tax_code[x] for x in profiles.index]
return (profiles)
def code_tax_names(taxon_to_ID, profiles):
tax_code = {}
profiles = pd.read_table(profiles, header=0, index_col=0, sep='\t', engine = 'python')
with open(taxon_to_ID) as f:
for line in f.read().splitlines():
temp = line.split()
# tax ID is in the 1st column tax name -- 2nd
tax_code[temp[1]] = temp[0]
profiles.index = [tax_code[x] for x in profiles.index]
return (profiles)
def fstat_to_triplet_edges(pair_mod, counts, path_out):
# Make replacemens in data to avoid further confusion
# E.g. KEGG considers Ruminococcus to be in Ruminococcacea
# While Greengenes -- in Lachnospiraceae
def prepair_counts_and_edges(counts, pair_mod):
# counts.index = [re.sub(r'_x(\w+)x$', r'_\1', x) for x in counts.index]
# counts.index = [re.sub(r'_x(\w+)x(?=,)', r'_\1', x) for x in counts.index]
# counts.index = [re.sub(r'c__Erysipelotrichi$|c__Erysipelotrichi(?=,)', 'c__Erysipelotrichia', x) for x in
# counts.index]
# counts.index = [re.sub(r'f__Lachnospiraceae(?=,g__Ruminococcus)', 'f__Ruminococcaceae', x) for x in
# counts.index]
# pair_mod = pair_mod.replace({r'_x(\w+)x$': r'_\1'}, regex=True)
# pair_mod = pair_mod.replace({r'_x(\w+)x(?=,)': r'_\1'}, regex=True)
# pair_mod = pair_mod.replace({r'c__Erysipelotrichi$|c__Erysipelotrichi(?=,)': 'c__Erysipelotrichia'}, regex=True)
# pair_mod = pair_mod.replace({r'f__Lachnospiraceae(?=,g__Ruminococcus)': 'f__Ruminococcaceae'}, regex=True)
# Model calling can't work with commas or brackets in variable names
counts.index = [x.replace(',', '00') for x in counts.index]
counts = counts.T
pair_mod['Response'] = pair_mod['Response'].str.replace(',', '00')
pair_mod['Predictor'] = pair_mod['Predictor'].str.replace(',', '00')
return (counts, pair_mod)
with open(pair_mod) | |
<filename>pygsti/tools/lindbladtools.py
"""
Utility functions relevant to Lindblad forms and projections
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
import scipy.sparse as _sps
from pygsti.tools import matrixtools as _mt
from pygsti.tools.basistools import basis_matrices
def create_elementary_errorgen_dual(typ, p, q=None, sparse=False, normalization_factor='auto'):
"""
Construct a "dual" elementary error generator matrix in the "standard" (matrix-unit) basis.
The elementary error generator that is dual to the one computed by calling
:function:`create_elementary_errorgen` with the same argument. This dual element
can be used to find the coefficient of the original, or "primal" elementary generator.
For example, if `A = sum(c_i * E_i)`, where `E_i` are the elementary error generators given
by :function:`create_elementary_errorgen`), then `c_i = dot(D_i.conj(), A)` where `D_i`
is the dual to `E_i`.
There are four different types of dual elementary error generators: 'H' (Hamiltonian),
'S' (stochastic), 'C' (correlation), and 'A' (active). See arxiv:2103.01928.
Each type transforms an input density matrix differently. The action of an elementary
error generator `L` on an input density matrix `rho` is given by:
Hamiltonian: `L(rho) = -1j/(2d^2) * [ p, rho ]`
Stochastic: `L(rho) = 1/(d^2) p * rho * p
Correlation: `L(rho) = 1/(2d^2) ( p * rho * q + q * rho * p)
Active: `L(rho) = 1j/(2d^2) ( p * rho * q - q * rho * p)
where `d` is the dimension of the Hilbert space, e.g. 2 for a single qubit. Square
brackets denotes the commutator and curly brackets the anticommutator.
`L` is returned as a superoperator matrix that acts on vectorized density matrices.
Parameters
----------
typ : {'H','S','C','A'}
The type of dual error generator to construct.
p : numpy.ndarray
d-dimensional basis matrix.
q : numpy.ndarray, optional
d-dimensional basis matrix; must be non-None if and only if `typ` is `'C'` or `'A'`.
sparse : bool, optional
Whether to construct a sparse or dense (the default) matrix.
Returns
-------
ndarray or Scipy CSR matrix
"""
d = p.shape[0]; d2 = d**2
pdag = p.T.conjugate()
qdag = q.T.conjugate() if (q is not None) else None
if sparse:
elem_errgen = _sps.lil_matrix((d2, d2), dtype=p.dtype)
else:
elem_errgen = _np.empty((d2, d2), dtype=p.dtype)
assert(typ in ('H', 'S', 'C', 'A')), "`typ` must be one of 'H', 'S', 'C', or 'A'"
assert((typ in 'HS' and q is None) or (typ in 'CA' and q is not None)), \
"Wrong number of basis elements provided for %s-type elementary errorgen!" % typ
# Loop through the standard basis as all possible input density matrices
for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx
# Only difference between H/S/C/A is how they transform input density matrices
if typ == 'H':
rho1 = -1j * (p @ rho0 - rho0 @ p) # -1j / (2 * d2) *
elif typ == 'S':
rho1 = (p @ rho0 @ pdag) # 1 / d2 *
elif typ == 'C':
rho1 = (p @ rho0 @ qdag + q @ rho0 @ pdag) # 1 / (2 * d2) *
elif typ == 'A':
rho1 = 1j * (p @ rho0 @ qdag - q @ rho0 @ pdag) # 1j / (2 * d2)
elem_errgen[:, i] = rho1.flatten()[:, None] if sparse else rho1.flatten()
return_normalization = bool(normalization_factor == 'auto_return')
if normalization_factor in ('auto', 'auto_return'):
primal = create_elementary_errorgen(typ, p, q, sparse)
if sparse:
normalization_factor = _np.vdot(elem_errgen.toarray().flatten(), primal.toarray().flatten())
else:
normalization_factor = _np.vdot(elem_errgen.flatten(), primal.flatten())
elem_errgen *= _np.asscalar(_np.real_if_close(1 / normalization_factor))
if sparse: elem_errgen = elem_errgen.tocsr()
return (elem_errgen, normalization_factor) if return_normalization else elem_errgen
def create_elementary_errorgen(typ, p, q=None, sparse=False):
"""
Construct an elementary error generator as a matrix in the "standard" (matrix-unit) basis.
There are four different types of elementary error generators: 'H' (Hamiltonian),
'S' (stochastic), 'C' (correlation), and 'A' (active). See arxiv:2103.01928.
Each type transforms an input density matrix differently. The action of an elementary
error generator `L` on an input density matrix `rho` is given by:
Hamiltonian: `L(rho) = -1j * [ p, rho ]`
Stochastic: `L(rho) = p * rho * p - rho
Correlation: `L(rho) = p * rho * q + q * rho * p - 0.5 {{p,q}, rho}
Active: `L(rho) = 1j( p * rho * q - q * rho * p + 0.5 {[p,q], rho} )
Square brackets denotes the commutator and curly brackets the anticommutator.
`L` is returned as a superoperator matrix that acts on vectorized density matrices.
Parameters
----------
typ : {'H','S','C','A'}
The type of error generator to construct.
p : numpy.ndarray
d-dimensional basis matrix.
q : numpy.ndarray, optional
d-dimensional basis matrix; must be non-None if and only if `typ` is `'C'` or `'A'`.
sparse : bool, optional
Whether to construct a sparse or dense (the default) matrix.
Returns
-------
ndarray or Scipy CSR matrix
"""
d = p.shape[0]; d2 = d**2
if sparse:
elem_errgen = _sps.lil_matrix((d2, d2), dtype=p.dtype)
else:
elem_errgen = _np.empty((d2, d2), dtype=p.dtype)
assert(typ in ('H', 'S', 'C', 'A')), "`typ` must be one of 'H', 'S', 'C', or 'A'"
assert((typ in 'HS' and q is None) or (typ in 'CA' and q is not None)), \
"Wrong number of basis elements provided for %s-type elementary errorgen!" % typ
pdag = p.T.conjugate()
qdag = q.T.conjugate() if (q is not None) else None
if typ in 'CA':
pq_plus_qp = pdag @ q + qdag @ p
pq_minus_qp = pdag @ q - qdag @ p
# Loop through the standard basis as all possible input density matrices
for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx
# Only difference between H/S/C/A is how they transform input density matrices
if typ == 'H':
rho1 = -1j * (p @ rho0 - rho0 @ p) # Add "/2" to have PP ham gens match previous versions of pyGSTi
elif typ == 'S':
pdag_p = (pdag @ p)
rho1 = p @ rho0 @ pdag - 0.5 * (pdag_p @ rho0 + rho0 @ pdag_p)
elif typ == 'C':
rho1 = p @ rho0 @ qdag + q @ rho0 @ pdag - 0.5 * (pq_plus_qp @ rho0 + rho0 @ pq_plus_qp)
elif typ == 'A':
rho1 = 1j * (p @ rho0 @ qdag - q @ rho0 @ pdag + 0.5 * (pq_minus_qp @ rho0 + rho0 @ pq_minus_qp))
elem_errgen[:, i] = rho1.flatten()[:, None] if sparse else rho1.flatten()
if sparse: elem_errgen = elem_errgen.tocsr()
return elem_errgen
def create_lindbladian_term_errorgen(typ, Lm, Ln=None, sparse=False): # noqa N803
"""
Construct the superoperator for a term in the common Lindbladian expansion of an error generator.
Mathematically, for d-dimensional matrices Lm and Ln, this routine
constructs the d^2-dimension Lindbladian matrix L whose action is
given by:
L(rho) = -i [Lm, rho] (when `typ == 'H'`)
or
L(rho) = Ln*rho*Lm^dag - 1/2(rho*Lm^dag*Ln + Lm^dag*Ln*rho) (`typ == 'O'`)
where rho is a density matrix. L is returned as a superoperator
matrix that acts on a vectorized density matrices.
Parameters
----------
typ : {'H', 'O'}
The type of error generator to construct.
Lm : numpy.ndarray
d-dimensional basis matrix.
Ln : numpy.ndarray, optional
d-dimensional basis matrix.
sparse : bool, optional
Whether to construct a sparse or dense (the default) matrix.
Returns
-------
ndarray or Scipy CSR matrix
"""
d = Lm.shape[0]; d2 = d**2
if sparse:
lind_errgen = _sps.lil_matrix((d2, d2), dtype=Lm.dtype)
else:
lind_errgen = _np.empty((d2, d2), dtype=Lm.dtype)
assert(typ in ('H', 'O')), "`typ` must be one of 'H' or 'O'"
assert((typ in 'H' and Ln is None) or (typ in 'O' and Ln is not None)), \
"Wrong number of basis elements provided for %s-type lindblad term errorgen!" % typ
if typ in | |
<reponame>ChrisBarker-NOAA/tamoc<gh_stars>0
"""
Unit tests for the `particle_size_model` module of ``TAMOC``
Provides testing of the classes, methods and functions defined in the
`particle_size_models` module of ``TAMOC``. These tests check the behavior
of the class objects, the results of simulations, and the related object
methods.
The ambient data used here to compute oil properties at `in situ` conditions
are from the `ctd_BM54.cnv` dataset, stored as::
./test/output/test_BM54.nc
This netCDF file is written by the `test_ambient.test_from_ctd` function,
which is run in the following as needed to ensure the dataset is available.
Since the `particle_size_models` module relies on the functions in the
`psf` module to compute results, this set of tests checks the performance
of both modules.
Notes
-----
All of the tests defined herein check the general behavior of each of the
programmed function--this is not a comparison against measured data. The
results of the hand calculations entered below as sample solutions have been
ground-truthed for their reasonableness. However, passing these tests only
means the programs and their interfaces are working as expected, not that they
have been validated against measurements.
"""
# <NAME>, March 2020, Texas A&M University <<EMAIL>>.
from __future__ import (absolute_import, division, print_function)
from tamoc import ambient, blowout
from tamoc import dbm_utilities
from tamoc import particle_size_models as psm
import test_sbm
from datetime import datetime
from netCDF4 import date2num
import os
import numpy as np
from numpy.testing import assert_approx_equal
from numpy.testing import assert_array_almost_equal
# ----------------------------------------------------------------------------
# Helper Functions
# ----------------------------------------------------------------------------
def get_blowout_model():
"""
Compute the inputs defining a synthetic blowout
Create the `ambient.Profile` object, `dbm.FluidMixture` object, and
other parameters defining a synthetic blowout scenario.
Returns
-------
profile : `ambient.Profile` object
Profile containing ambient CTD data
oil : `dbm.FluidMixture` object
A `dbm.FluidMixture` object that contains the chemical description
of an oil mixture.
mass_flux : ndarray
An array of mass fluxes (kg/s) of each pseudo-component in the live-
oil composition.
z0 : float
Release point of a synthetic blowout (m)
Tj : float
Temperature of the released fluids (K)
"""
# Get the CTD data from the requested file
nc = test_sbm.make_ctd_file()
profile = ambient.Profile(nc, chem_names='all')
profile.close_nc()
# Define an oil substance to use
substance={
'composition' : ['n-hexane', '2-methylpentane', '3-methylpentane',
'neohexane', 'n-heptane', 'benzene', 'toluene',
'ethylbenzene', 'n-decane'],
'masses' : np.array([0.04, 0.07, 0.08, 0.09, 0.11, 0.12, 0.15, 0.18,
0.16])
}
# Define the atmospheric gases to track
ca = ['oxygen']
# Define the oil flow rate, gas to oil ratio, and orifice size
q_oil = 20000. # bbl/d
gor = 500. # ft^3/bbl at standard conditions
z0 = 100. # release depth (m)
Tj = profile.get_values(z0, 'temperature') # release temperature (K)
# Import the oil with the desired gas to oil ratio
oil, mass_flux = dbm_utilities.get_oil(substance, q_oil, gor, ca)
return (profile, oil, mass_flux, z0, Tj)
def get_blowout_properties():
"""
Return the properties for the base blowout case
Return the fluid properties and initial conditions for the base case of
a blowout from the Model Inter-comparison Study for the case of
20,000 bbl/d, 2000 m depth, GOR of 2000, and 30 cm orifice.
"""
# Get the properties for the base case
d0 = 0.30
m_gas = 7.4
m_oil = 34.5
rho_gas = 131.8
mu_gas = 0.00002
sigma_gas = 0.06
rho_oil = 599.3
mu_oil = 0.0002
sigma_oil = 0.015
rho = 1037.1
mu = 0.002
return (d0, m_gas, m_oil, rho_gas, mu_gas, sigma_gas, rho_oil, mu_oil,
sigma_oil, rho, mu)
def get_blowout_ans():
"""
Report the correct answer for the base blowout case
"""
de_max_gas = 0.03294393791256645
de_max_oil = 0.007475379384955715
return (de_max_gas, de_max_oil)
def check_properties(rho_gas, mu_gas, sigma_gas, rho_oil, mu_oil, sigma_oil,
rho, mu, psd_obj):
"""
Check that the given properties match the properties inside the object
"""
assert psd_obj.rho_gas == rho_gas
assert psd_obj.mu_gas == mu_gas
assert psd_obj.sigma_gas == sigma_gas
assert psd_obj.rho_oil == rho_oil
assert psd_obj.mu_oil == mu_oil
assert psd_obj.sigma_oil == sigma_oil
assert psd_obj.rho == rho
assert psd_obj.mu == mu
# ----------------------------------------------------------------------------
# Unit Tests
# ----------------------------------------------------------------------------
def test_psm_ModelBase():
"""
Test the `ModelBase` and `PureJet` classes
Test all of the functionality in the `ModelBase` and `PureJet` classes.
These classes used fixed values of the fluid properties and do not rely
on the `dbm` module. Tests include jet of oil and gas, pure oil, and
pure gas.
"""
# Get properties for a blowout
d0, m_gas, m_oil, rho_gas, mu_gas, sigma_gas, rho_oil, mu_oil, \
sigma_oil, rho, mu = get_blowout_properties()
# Load the correct answer to the model properties
de_max_gas, de_max_oil = get_blowout_ans()
# Create a ModelBase object
spill = psm.ModelBase(rho_gas, mu_gas, sigma_gas, rho_oil, mu_oil,
sigma_oil, rho, mu)
# Check that parameters are correct
check_properties(rho_gas, mu_gas, sigma_gas, rho_oil, mu_oil, sigma_oil,
rho, mu, spill)
# Try setting the properties by using the .update() method
spill.update_properties(rho_gas, mu_gas, sigma_gas, rho_oil, mu_oil,
sigma_oil, rho, mu)
# Check that the parameters are still correct
check_properties(rho_gas, mu_gas, sigma_gas, rho_oil, mu_oil, sigma_oil,
rho, mu, spill)
# Try using the method to get a maximum gas and droplet size
de_max_gas_model = spill.get_de_max(0)
de_max_oil_model = spill.get_de_max(1)
# Compare results to the correct answer
assert de_max_gas_model == de_max_gas
assert de_max_oil_model == de_max_oil
# Simulate a given release condition
spill.simulate(d0, m_gas, m_oil)
# Create the particle size distributions
nbins_gas = 10
nbins_oil = 10
de_gas_model, vf_gas_model, de_oil_model, vf_oil_model = \
spill.get_distributions(nbins_gas, nbins_oil)
# Check that the object stores the correct attributes --------------------
assert spill.d0 == d0
assert spill.nbins_gas == nbins_gas
assert np.sum(spill.m_gas) == np.sum(spill.m_gas)
assert spill.nbins_oil == nbins_oil
assert np.sum(spill.m_oil) == np.sum(spill.m_oil)
assert spill.model_gas == 'wang_etal'
assert spill.model_oil == 'sintef'
assert spill.pdf_gas == 'lognormal'
assert spill.pdf_oil == 'rosin-rammler'
# Check that the model stores the right solution -------------------------
assert_approx_equal(spill.d50_gas, 0.01134713688939418, significant=6)
assert_approx_equal(spill.d50_oil, 0.0033149657926870454,significant=6)
assert_approx_equal(spill.de_max_gas, 0.03294393791256645,significant=6)
assert_approx_equal(spill.de_max_oil, 0.007475379384955715,
significant=6)
assert spill.sigma_ln_gas == 0.27
assert_approx_equal(spill.k_oil, -0.6931471805599453,significant=6)
assert spill.alpha_oil == 1.8
de_gas = np.array([0.0057077 , 0.00655033, 0.00751736, 0.00862716,
0.0099008, 0.01136247, 0.01303992, 0.01496502, 0.01717432,
0.01970979])
vf_gas = np.array([0.01545088, 0.0432876 , 0.09350044, 0.15570546,
0.19990978, 0.19788106, 0.15101303, 0.08885147, 0.04030462,
0.01409565])
de_oil = np.array([0.00037551, 0.00053191, 0.00075346, 0.00106728,
0.00151182, 0.0021415 , 0.00303346, 0.00429693, 0.00608665,
0.00862181])
vf_oil = np.array([0.00876514, 0.01619931, 0.02961302, 0.05303846,
0.09143938, 0.14684062, 0.20676085, 0.22874507, 0.16382356,
0.05477458])
assert_array_almost_equal(spill.de_gas, de_gas, decimal=6)
assert_array_almost_equal(spill.vf_gas, vf_gas, decimal=6)
assert_array_almost_equal(spill.de_oil, de_oil, decimal=6)
assert_array_almost_equal(spill.vf_oil, vf_oil, decimal=6)
# Try Li et al. for gas --------------------------------------------------
spill.simulate(d0, m_gas, m_oil, model_gas='li_etal')
# Create the particle size distributions
nbins_gas = 10
nbins_oil = 10
de_gas_model, vf_gas_model, de_oil_model, vf_oil_model = \
spill.get_distributions(nbins_gas, nbins_oil)
# Check whether the values were updated correctly
assert_approx_equal(spill.d50_gas, 0.006521627004747406, significant=6)
assert_approx_equal(spill.k_gas, -0.6931471805599453,significant=6)
assert spill.alpha_gas == 1.8
de_gas = np.array([0.00185333, 0.00238478, 0.00306863, 0.00394856,
0.00508083, 0.00653777, 0.00841249, 0.0108248 , 0.01392884,
0.01792298])
vf_gas = np.array([0.02515921, 0.06286577, 0.12110766, 0.17987423,
0.20597111, 0.18183759, 0.12376591, 0.0649469 , 0.02627579,
0.00819583])
assert_array_almost_equal(spill.de_gas, de_gas, decimal=6)
assert_array_almost_equal(spill.vf_gas, vf_gas, decimal=6)
assert_array_almost_equal(spill.de_oil, de_oil, decimal=6)
assert_array_almost_equal(spill.vf_oil, vf_oil, decimal=6)
# Try Li et al. for oil --------------------------------------------------
# Simulate a given release condition
spill.simulate(d0, m_gas, m_oil, model_oil='li_etal')
# Create the particle size distributions
nbins_gas = 10
nbins_oil = 10
de_gas_model, vf_gas_model, de_oil_model, vf_oil_model = \
spill.get_distributions(nbins_gas, nbins_oil)
# Check whether the values were updated correctly
assert_approx_equal(spill.d50_oil, 0.014962419470081935, significant=6)
assert_approx_equal(spill.k_oil, -0.6931471805599453,significant=6)
assert spill.alpha_oil == 1.8
de_gas = np.array([0.0057077 , 0.00655033, 0.00751736, 0.00862716,
0.0099008, 0.01136247, 0.01303992, 0.01496502, 0.01717432,
0.01970979])
vf_gas = np.array([0.01545088, 0.0432876 , 0.09350044, 0.15570546,
0.19990978, 0.19788106, 0.15101303, 0.08885147, 0.04030462,
0.01409565])
de_oil = np.array([0.00169489, 0.00240083, 0.00340081, 0.00481728,
0.00682373, 0.00966589, 0.01369183, 0.01939462, 0.02747269,
0.03891536])
vf_oil = np.array([0.00876514, 0.01619931, 0.02961302, 0.05303846,
0.09143938, 0.14684062, 0.20676085, 0.22874507, 0.16382356,
0.05477458])
assert_array_almost_equal(spill.de_gas, de_gas, decimal=6)
assert_array_almost_equal(spill.vf_gas, vf_gas, decimal=6)
assert_array_almost_equal(spill.de_oil, de_oil, decimal=6)
assert_array_almost_equal(spill.vf_oil, vf_oil, decimal=6)
# Try to run a case of a pure oil release using Sintef model -------------
spill.update_properties(None, None, None, rho_oil, mu_oil,
sigma_oil, rho, mu)
m_gas = np.array([0.])
spill.simulate(d0, m_gas, m_oil)
# Create the particle size distributions
nbins_gas = 0
nbins_oil = 10
de_gas_model, vf_gas_model, de_oil_model, vf_oil_model = \
spill.get_distributions(nbins_gas, nbins_oil)
# Check whether the values were updated correctly
assert_approx_equal(spill.d50_oil, 0.0033149657926870454, significant=6)
assert_approx_equal(spill.k_oil, -0.6931471805599453,significant=6)
assert spill.alpha_oil == 1.8
| |
import collections
import subprocess
import contextlib
import warnings
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
from Bio import BiopythonExperimentalWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore', BiopythonExperimentalWarning)
from Bio import SearchIO
from seqseqpan.exception import *
from seqseqpan.base import *
from seqseqpan.formatter import Splitter
class Parser:
def parse_xmfa(self, filename):
alignment = Alignment(filename)
with open(filename, "r") as xmfa:
line = xmfa.readline()
seq_parts = []
start = 0
end = 0
seq_nr = 0
ses = []
while line:
line = line.rstrip()
if line.startswith("#"): # parse comment section
# each sequence has an associated file (can be the same for more sequences -> multifasta)
m = re.match("#Sequence(\d+)File\s+(.+)", line)
if m is not None:
number = m.group(1)
fn = m.group(2)
entry = -1
line = xmfa.readline()
# with multifasta files sequence - entry numbers are reported in line after filename
m = re.match("#Sequence" + number + "Entry\s+(\d+)", line)
if m is not None:
entry = m.group(1)
line = xmfa.readline()
m = re.match("#Sequence" + number + "Format\s+(\w+)", line)
if m is not None:
file_format = m.group(1)
line = xmfa.readline()
genome = Genome(fn, file_format, entry)
alignment.add_genome(genome, number)
continue
elif line.startswith(">"): # a sequence start was encountered
if len(seq_parts) > 0 and int(end) > 0: # save previous sequence
seq = "".join(seq_parts)
ses.append(SequenceEntry(seq_nr, start, end, strand, seq))
alignment.genomes[int(seq_nr)].length = max(alignment.genomes[int(seq_nr)].length, int(end))
seq_parts = []
m = re.match(">\s*(\d+):(\d+)-(\d+) ([+-]) ", line)
if m is not None:
seq_nr = m.group(1)
start = m.group(2)
end = m.group(3)
strand = m.group(4)
else:
raise XMFAHeaderFormatError(line.strip())
elif line.startswith("="):
if len(seq_parts) > 0 and int(end) > 0:
seq = "".join(seq_parts)
ses.append(SequenceEntry(seq_nr, start, end, strand, seq))
alignment.genomes[int(seq_nr)].length = max(alignment.genomes[int(seq_nr)].length, int(end))
alignment.add_lcb_entries(ses)
seq_parts = []
ses = []
else:
seq_parts.append(line)
line = xmfa.readline()
return alignment
def _parse_consensus(self, filename):
try:
record = SeqIO.read(open(filename), "fasta")
except ValueError:
raise ConsensusFastaError()
m = re.match("^[^;]+;(\d+)\|(.*)", record.id)
if m is not None:
order = m.group(1)
xmfa_file = m.group(2)
else:
raise ConsensusFastaFormatError()
try:
cons = Consensus(str(record.seq), order, xmfa_file, filename)
except ParameterError:
raise ConsensusFastaFormatError()
return cons
def parse_block_separated_consensus(self, filename):
consensus = self._parse_consensus(filename + ".blockseparated.fasta")
consensus.block_start_indices = self._parse_consensus_separator(filename)
return consensus
def _parse_consensus_separator(self, filename):
with open(filename + ".blockseparated.idx", "r") as in_file:
# skip first two lines
line = in_file.readline()
line = in_file.readline()
line = in_file.readline()
if line != "":
return [int(idx) for idx in line.strip().split(";")]
else:
return []
def parse_consensus_index(self, filename):
with open(filename + ".idx", "r") as in_file:
line = in_file.readline()
m = re.match("#Fasta\t(.+)", line)
if m is not None:
fasta_file = m.group(1)
else:
raise ConsensusFastaIdxFormatError("Wrong format of Fasta header line.")
line = in_file.readline()
m = re.match("#XMFA\t(.+)", line)
if m is not None:
xmfa_file = m.group(1)
else:
raise ConsensusFastaIdxFormatError("Wrong format of XMFA header line.")
alignment = Alignment(xmfa_file)
lcb = LCB()
lcb_length = 0
lcb_ends_list = [0]
line = in_file.readline()
while line:
line = line.strip()
if line.startswith("#"):
# each sequence has an associated file (can be the same for more sequences -> multifasta)
m = re.match("#Sequence(\d+)File\s+(.+)", line)
if m is not None:
number = m.group(1)
fn = m.group(2)
entry = -1
line = in_file.readline()
# with multifasta files sequence - entry numbers are reported in line after filename
m = re.match("#Sequence" + number + "Entry\s+(\d+)", line)
if m is not None:
entry = m.group(1)
line = in_file.readline()
m = re.match("#Sequence" + number + "Format\s+(\w+)", line)
if m is not None:
file_format = m.group(1)
line = in_file.readline()
genome = Genome(fn, file_format, entry)
alignment.add_genome(genome, number)
continue
elif not line == "":
fields = line.split("\t")
block_id = fields[0]
nr = int(fields[1])
start = int(fields[2])
end = int(fields[3])
strand = fields[4]
if block_id == "b":
# add previous lcb with all entries
if lcb_length > 0:
lcb.length = lcb_length
alignment.add_lcb(lcb)
lcb = LCB()
# store end of current lcb
lcb_length = (end - start) + 1
lcb_ends_list.append(end)
elif block_id == "s":
# add entry to current lcb
e = SequenceEntry(nr, start, end, strand, '')
if len(fields) == 6:
gaps = fields[5]
gap_dict = {}
for interval in gaps.split(";"):
start, end = interval.split("-")
gap_dict[int(start)] = int(end)
e.gaps = gap_dict
lcb.entries.append(e)
else:
raise ConsensusFastaIdxFormatError("Lines can only start with 'b' or 's'.")
line = in_file.readline()
# add last LCB to alignment
if lcb_length > 0:
lcb.length = lcb_length
alignment.add_lcb(lcb)
consensus = Consensus(sequence="", order=0, xmfa_file=xmfa_file, fasta_file=fasta_file)
consensus.block_start_indices = lcb_ends_list
return alignment, consensus
def parse_mapping_coordinates(self, coord_f):
with open(coord_f, "r") as in_file:
header = in_file.readline().strip()
source, dest = header.split("\t")
dests = dest.split(",")
coords = [int(line.strip()) for line in in_file if line.strip() != '']
if source == "" or dests == "" or len(dests) == 0 or len(coords) == 0:
raise CoordinatesInputError()
return source, dests, coords
def parse_genome_description(self, genome_desc_f):
chromosome_desc = collections.defaultdict(list)
with open(genome_desc_f, "r") as in_file:
line = in_file.readline().strip()
while line:
fields = line.split("\t")
length = int(fields[2]) if len(fields) > 2 else None
chromosome_desc[int(fields[0])].append({"desc": fields[1], "length": length})
line = in_file.readline().strip()
return chromosome_desc
class Writer:
_mauve_format_string = "#FormatVersion Mauve1\n"
_mauve_genome_file = '#Sequence{0}File\t{1}\n'
_mauve_genome_entry = '#Sequence{0}Entry\t{1}\n'
_mauve_genome_format = '#Sequence{0}Format\t{1}\n'
_mauve_block_header = '> {0}:{1}-{2} {3} {4}\n'
_consensus_index_fasta = '#Fasta\t{0}\n'
_consensus_index_xmfa = '#XMFA\t{0}\n'
_consensus_index_block_line = '\nb\t{0}\t{1}\t{2}\t+\n'
_consensus_index_sequence_line = 's\t{0}\t{1}\t{2}\t{3}\t{4}\n'
_maf_format_string = "##maf version=1\n"
_maf_sequence_header = "\na label={0}\n"
_maf_entry_header = "s\t{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n"
def write_xmfa(self, alignment, path, name, order=0, check_invalid=True):
if check_invalid and alignment.is_invalid():
print("\n!!!!!!!!!\n!!!!!!!\nWARNING!!!!!!: XMFA is invalid!\n!!!!!!!!!\n!!!!!!!\n")
with open(path + "/" + name + ".xmfa", "w") as output:
output.write(self._mauve_format_string)
for nr, genome in sorted(alignment.genomes.items()):
output.write(self._mauve_genome_file.format(nr, genome.file_path))
if genome.entry > 0:
output.write(self._mauve_genome_entry.format(nr, genome.entry))
output.write(self._mauve_genome_format.format(nr, genome.format))
sorted_lcbs = alignment.get_sorted_lcbs(order)
count = 0
for lcb in sorted_lcbs:
count += 1
for entry in sorted(lcb.entries, key=lambda e: e.genome_nr):
output.write(self._mauve_block_header.format(entry.genome_nr,
entry.start,
entry.end,
entry.strand,
alignment.genomes[entry.genome_nr].file_path
)
)
output.write("\n".join(re.findall(".{1,80}", entry.sequence)) + "\n")
output.write("=\n")
def write_maf(self, alignment, path, name, chromosome_desc, check_invalid=True):
if check_invalid and alignment.is_invalid():
print("\n!!!!!!!!!\n!!!!!!!\nWARNING!!!!!!: MAF is invalid!\n!!!!!!!!!\n!!!!!!!\n")
with open(path + "/" + name + ".maf", "w") as output:
output.write(self._maf_format_string)
splitter = Splitter(alignment, chromosome_desc)
split = splitter.split_alignment()
count = 0
for lcb in split.lcbs:
count += 1
output.write(self._maf_sequence_header.format(count))
for entry in sorted(lcb.entries, key=lambda e: e.genome_nr):
genome = alignment.genomes[entry.genome_nr]
chrom_starts = splitter.get_chromosomes_for_entry(entry)
if len(chrom_starts) != 1:
raise Exception("Splitting by chromosomes went wrong.")
chrom_start = chrom_starts[0]
chrom = genome.chromosomes[chrom_start]
start = entry.start - chrom_start # MAF format: This is a zero-based number. If the strand field is '-' then this is the start relative to the reverse-complemented source sequence
output.write(self._maf_entry_header.format(chrom["desc"].replace(" ", "_"), start,
((entry.end - entry.start) + 1), entry.strand,
chrom["length"], entry.sequence))
def write_mapping_coordinates(self, source, destinations, coords_dict, path, name):
with open(os.path.abspath(path + "/" + name + ".txt"), "w") as output:
output.write(''.join([str(source), " (source)", "\t"]))
output.write('\t'.join(destinations))
output.write("\n")
for coord, cur_dict in sorted(coords_dict.items()):
output.write(str(cur_dict[source]) + "\t")
new_coords = [str(cur_dict.get(dest, "-")) for dest in destinations]
output.write('\t'.join(new_coords))
output.write("\n")
def write_consensus(self, alignment, path, name, order=0):
filename = os.path.abspath(path + "/" + name + "_consensus.fasta")
consensus = Consensus()
consensus.from_alignment(alignment, filename, order)
self._write_consensus_index(alignment, filename, order)
self._write_consensus_separator(consensus, alignment, filename)
header = consensus.get_fasta_header(name)
record = SeqRecord(Seq(consensus.sequence), id=header, description='')
with open(filename + ".blockseparated.fasta", "w") as handle:
SeqIO.write(record, handle, "fasta")
record.seq = Seq(consensus.get_undelimited_sequence())
with open(filename, "w") as handle:
SeqIO.write(record, handle, "fasta")
def write_fasta(self, chromosomes, sequences, path, name):
filename = os.path.abspath(path + "/" + name + ".fasta")
with open(filename, "w") as handle:
for chr, seq in zip(sorted(chromosomes.keys()), sequences):
chromosome = chromosomes[chr]
seq_name = chromosome["desc"]
region = chromosome.get("region", None)
seq_name = ":".join(seq_name, region) if region is not None else seq_name
record = SeqRecord(Seq(seq), id=seq_name, description='')
SeqIO.write(record, handle, "fasta")
def _write_consensus_index(self, alignment, fastafile, order=0):
with open(fastafile + ".idx", "w") as output:
output.write(self._consensus_index_fasta.format(fastafile))
output.write(self._consensus_index_xmfa.format(alignment.xmfa_file))
for nr, genome in sorted(alignment.genomes.items()):
output.write(self._mauve_genome_file.format(nr, genome.file_path))
if genome.entry > 0:
output.write(self._mauve_genome_entry.format(nr, genome.entry))
output.write(self._mauve_genome_format.format(nr, genome.format))
sorted_lcbs = alignment.get_sorted_lcbs(order)
consensus_end = 0
counter = 1
for lcb in sorted_lcbs:
output.write(self._consensus_index_block_line.format(counter, consensus_end + 1,
consensus_end + lcb.length))
consensus_end += lcb.length
counter += 1
for entry in lcb.entries:
output.write(self._consensus_index_sequence_line.format
(entry.genome_nr, entry.start, entry.end, entry.strand,
';'.join(
['-'.join([str(start), str(end)]) for start, end in sorted(entry.gaps.items())])
)
)
def _write_consensus_separator(self, consensus, alignment, fasta_file):
with open(fasta_file + ".blockseparated.idx", "w") as output:
output.write(self._consensus_index_fasta.format(fasta_file))
output.write(self._consensus_index_xmfa.format(alignment.xmfa_file))
output.write(';'.join([str(idx) for idx in consensus.block_start_indices]))
class Processor:
def __init__(self, path, blat="blat"):
self.blat = blat
self.path = path
def external_blat(self, seq_one, seq_two):
filename_one = os.path.abspath(self.path + "/" + "realigner_realign_seq1.fasta")
record_one = SeqRecord(Seq(seq_one), id="seq1", description='')
with open(filename_one, "w") as handle:
SeqIO.write(record_one, handle, "fasta")
filename_two = | |
"""defines the BDF attributes"""
from __future__ import annotations
from collections import defaultdict
from typing import List, Dict, Optional, Any, Union, TYPE_CHECKING
from numpy import array # type: ignore
from pyNastran.utils import object_attributes, object_methods, deprecated
#from pyNastran.bdf.case_control_deck import CaseControlDeck
from pyNastran.bdf.cards.coordinate_systems import CORD2R
#from pyNastran.bdf.cards.constraints import ConstraintObject
from pyNastran.bdf.cards.aero.zona import ZONA
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.cards.dmig import DMIG, DMI, DMIJ, DMIK, DMIJI
class BDFAttributes:
"""defines attributes of the BDF"""
def __init__(self):
"""creates the attributes for the BDF"""
self.__init_attributes()
self._is_cards_dict = False
self.is_nx = False
self.is_msc = False
self.is_nasa95 = False
self.is_zona = False
self.save_file_structure = False
self.is_superelements = False
self.set_as_msc()
self.units = [] # type: List[str]
def set_as_msc(self):
self._nastran_format = 'msc'
self.is_nx = False
self.is_msc = True
self.is_nasa95 = False
self.is_zona = False
def set_as_nx(self):
self._nastran_format = 'nx'
self.is_nx = True
self.is_msc = False
self.is_nasa95 = False
self.is_zona = False
def set_as_zona(self):
self._nastran_format = 'zona'
self.is_nx = False
self.is_msc = False
self.is_nasa95 = False
self.is_zona = True
def __properties__(self):
"""the list of @property attributes"""
return ['nastran_format', 'is_long_ids', 'sol', 'subcases',
'nnodes', 'node_ids', 'point_ids', 'npoints',
'nelements', 'element_ids', 'nproperties', 'property_ids',
'nmaterials', 'material_ids', 'ncoords', 'coord_ids',
'ncaeros', 'caero_ids', 'wtmass', 'is_bdf_vectorized', 'nid_map']
def object_attributes(self, mode: str='public',
keys_to_skip: Optional[List[str]]=None,
filter_properties: bool=False) -> List[str]:
"""
List the names of attributes of a class as strings. Returns public
attributes as default.
Parameters
----------
mode : str
defines what kind of attributes will be listed
* 'public' - names that do not begin with underscore
* 'private' - names that begin with single underscore
* 'both' - private and public
* 'all' - all attributes that are defined for the object
keys_to_skip : List[str]; default=None -> []
names to not consider to avoid deprecation warnings
filter_properties: bool: default=False
filters the @property objects
Returns
-------
attribute_names : List[str]
sorted list of the names of attributes of a given type or None
if the mode is wrong
"""
if keys_to_skip is None:
keys_to_skip = []
my_keys_to_skip = [
#'case_control_deck',
'log',
'node_ids', 'coord_ids', 'element_ids', 'property_ids',
'material_ids', 'caero_ids', 'is_long_ids',
'nnodes', 'ncoords', 'nelements', 'nproperties',
'nmaterials', 'ncaeros', 'npoints',
'point_ids', 'subcases',
'_card_parser', '_card_parser_b', '_card_parser_prepare',
'object_methods', 'object_attributes',
]
return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip,
filter_properties=filter_properties)
def object_methods(self, mode: str='public', keys_to_skip: Optional[List[str]]=None) -> List[str]:
"""
List the names of methods of a class as strings. Returns public methods
as default.
Parameters
----------
obj : instance
the object for checking
mode : str
defines what kind of methods will be listed
* "public" - names that do not begin with underscore
* "private" - names that begin with single underscore
* "both" - private and public
* "all" - all methods that are defined for the object
keys_to_skip : List[str]; default=None -> []
names to not consider to avoid deprecation warnings
Returns
-------
method : List[str]
sorted list of the names of methods of a given type
or None if the mode is wrong
"""
if keys_to_skip is None:
keys_to_skip = []
my_keys_to_skip = [] # type: List[str]
my_keys_to_skip = [
#'case_control_deck',
'log', #'mpcObject', 'spcObject',
'node_ids', 'coord_ids', 'element_ids', 'property_ids',
'material_ids', 'caero_ids', 'is_long_ids',
'nnodes', 'ncoords', 'nelements', 'nproperties',
'nmaterials', 'ncaeros',
'point_ids', 'subcases',
'_card_parser', '_card_parser_b',
'object_methods', 'object_attributes',
]
return object_methods(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip)
def deprecated(self, old_name: str, new_name: str, deprecated_version: str) -> None:
"""deprecates methods"""
return deprecated(old_name, new_name, deprecated_version, levels=[0, 1, 2])
def clear_attributes(self) -> None:
"""removes the attributes from the model"""
self.__init_attributes()
self.nodes = {}
self.loads = {} # type: Dict[int, List[Any]]
self.load_combinations = {} # type: Dict[int, List[Any]]
def reset_errors(self) -> None:
"""removes the errors from the model"""
self._ixref_errors = 0
self._stored_xref_errors = []
def __init_attributes(self) -> None:
"""
Creates storage objects for the BDF object.
This would be in the init but doing it this way allows for better
inheritance
References:
1. http://www.mscsoftware.com/support/library/conf/wuc87/p02387.pdf
"""
self.reset_errors()
self.bdf_filename = None
self.punch = None
self._encoding = None
self._is_long_ids = False # ids > 8 characters
#: ignore any ECHOON flags
self.force_echo_off = True
#: list of Nastran SYSTEM commands
self.system_command_lines = [] # type: List[str]
#: list of execive control deck lines
self.executive_control_lines = [] # type: List[str]
#: list of case control deck lines
self.case_control_lines = [] # type: List[str]
# dictionary of BDFs
self.superelement_models = {}
self.initial_superelement_models = [] # the keys before superelement mirroring
self._auto_reject = False
self._solmap_to_value = {
'NONLIN': 101, # 66 -> 101 per Reference 1
'SESTATIC': 101,
'SESTATICS': 101,
'SEMODES': 103,
'BUCKLING': 105,
'SEBUCKL': 105,
'NLSTATIC': 106,
'SEDCEIG': 107,
'SEDFREQ': 108,
'SEDTRAN': 109,
'SEMCEIG': 110,
'SEMFREQ': 111,
'SEMTRAN': 112,
'CYCSTATX': 114,
'CYCMODE': 115,
'CYCBUCKL': 116,
'CYCFREQ': 118,
'NLTRAN': 129,
'AESTAT': 144,
'FLUTTR': 145,
'SEAERO': 146,
'NLSCSH': 153,
'NLTCSH': 159,
'DBTRANS': 190,
'DESOPT': 200,
# guessing
#'CTRAN' : 115,
'CFREQ' : 118,
# solution 200 names
'STATICS': 101,
'MODES': 103,
'BUCK': 105,
'DFREQ': 108,
'MFREQ': 111,
'MTRAN': 112,
'DCEIG': 107,
'MCEIG': 110,
#'HEAT' : None,
#'STRUCTURE': None,
#'DIVERGE' : None,
'FLUTTER': 145,
'SAERO': 146,
}
self.rsolmap_to_str = {
66: 'NONLIN',
101: 'SESTSTATIC', # linear static
103: 'SEMODES', # modal
105: 'BUCKLING', # buckling
106: 'NLSTATIC', # non-linear static
107: 'SEDCEIG', # direct complex frequency response
108: 'SEDFREQ', # direct frequency response
109: 'SEDTRAN', # direct transient response
110: 'SEMCEIG', # modal complex eigenvalue
111: 'SEMFREQ', # modal frequency response
112: 'SEMTRAN', # modal transient response
114: 'CYCSTATX',
115: 'CYCMODE',
116: 'CYCBUCKL',
118: 'CYCFREQ',
129: 'NLTRAN', # nonlinear transient
144: 'AESTAT', # static aeroelastic
145: 'FLUTTR', # flutter/aeroservoelastic
146: 'SEAERO', # dynamic aeroelastic
153: 'NLSCSH', # nonlinear static thermal
159: 'NLTCSH', # nonlinear transient thermal
#187 - Dynamic Design Analysis Method
190: 'DBTRANS',
200: 'DESOPT', # optimization
}
# ------------------------ bad duplicates ----------------------------
self._iparse_errors = 0
self._nparse_errors = 0
self._stop_on_parsing_error = True
self._stop_on_duplicate_error = True
self._stored_parse_errors = [] # type: List[str]
self._duplicate_nodes = [] # type: List[str]
self._duplicate_elements = [] # type: List[str]
self._duplicate_properties = [] # type: List[str]
self._duplicate_materials = [] # type: List[str]
self._duplicate_masses = [] # type: List[str]
self._duplicate_thermal_materials = [] # type: List[str]
self._duplicate_coords = [] # type: List[str]
self.values_to_skip = {} # type: Dict[str, List[int]]
# ------------------------ structural defaults -----------------------
#: the analysis type
self._sol = None
#: used in solution 600, method
self.sol_method = None
#: the line with SOL on it, marks ???
self.sol_iline = None # type : Optional[int]
self.case_control_deck = None # type: Optional[Any]
#: store the PARAM cards
self.params = {} # type: Dict[str, Any]
# ------------------------------- nodes -------------------------------
# main structural block
#: stores POINT cards
self.points = {} # type: Dict[int, Any]
#self.grids = {}
self.spoints = {} # type: Dict[int, Any]
self.epoints = {} # type: Dict[int, Any]
#: stores GRIDSET card
self.grdset = None # type: Optional[Any]
#: stores SEQGP cards
self.seqgp = None # type: Optional[Any]
## stores RINGAX
self.ringaxs = {} # type: Dict[int, Any]
## stores GRIDB
self.gridb = {} # type: Dict[int, Any]
#: stores elements (CQUAD4, CTRIA3, CHEXA8, CTETRA4, CROD, CONROD,
#: etc.)
self.elements = {} # type: Dict[int, Any]
#: stores CBARAO, CBEAMAO
self.ao_element_flags = {} # type: Dict[int, Any]
#: stores BAROR
self.baror = None # type: Optional[Any]
#: stores BEAMOR
self.beamor = None # type: Optional[Any]
#: stores SNORM
self.normals = {} # type: Dict[int, Any]
#: stores rigid elements (RBE2, RBE3, RJOINT, etc.)
self.rigid_elements = {} # type: Dict[int, Any]
#: stores PLOTELs
self.plotels = {} # type: Optional[Any]
#: stores CONM1, CONM2, CMASS1,CMASS2, CMASS3, CMASS4, CMASS5
self.masses = {} # type: Dict[int, Any]
#: stores PMASS
self.properties_mass = {} # type: Dict[int, Any]
#: stores NSM, NSM1, NSML, NSML1
self.nsms = {} # type: Dict[int, List[Any]]
#: stores NSMADD
self.nsmadds = {} # type: Dict[int, List[Any]]
#: stores LOTS of propeties (PBAR, PBEAM, PSHELL, PCOMP, etc.)
self.properties = {} # type: Dict[int, Any]
#: stores MAT1, MAT2, MAT3, MAT8, MAT10, MAT11
self.materials = {} # type: Dict[int, Any]
#: defines the MAT4, MAT5
self.thermal_materials = {} # type: Dict[int, Any]
#: defines the MATHE, MATHP
self.hyperelastic_materials = {} # type: Dict[int, Any]
#: stores MATSx
self.MATS1 | |
document, preserving the order of the input documents in the combined document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.merge_document_pdf_with_http_info(input_file1, input_file2, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file1: First input file to perform the operation on. (required)
:param file input_file2: Second input file to perform the operation on (more than 2 can be supplied). (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file1', 'input_file2'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method merge_document_pdf" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input_file1' is set
if ('input_file1' not in params or
params['input_file1'] is None):
raise ValueError("Missing the required parameter `input_file1` when calling `merge_document_pdf`") # noqa: E501
# verify the required parameter 'input_file2' is set
if ('input_file2' not in params or
params['input_file2'] is None):
raise ValueError("Missing the required parameter `input_file2` when calling `merge_document_pdf`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'input_file1' in params:
local_var_files['inputFile1'] = params['input_file1'] # noqa: E501
if 'input_file2' in params:
local_var_files['inputFile2'] = params['input_file2'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/octet-stream']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/merge/pdf', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def merge_document_pdf_multi(self, input_file1, input_file2, **kwargs): # noqa: E501
"""Merge Multple PDF Files Together # noqa: E501
Combine multiple PDF files (pdf) into a single PDF document, preserving the order of the input documents in the combined document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.merge_document_pdf_multi(input_file1, input_file2, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file1: First input file to perform the operation on. (required)
:param file input_file2: Second input file to perform the operation on. (required)
:param file input_file3: Third input file to perform the operation on.
:param file input_file4: Fourth input file to perform the operation on.
:param file input_file5: Fifth input file to perform the operation on.
:param file input_file6: Sixth input file to perform the operation on.
:param file input_file7: Seventh input file to perform the operation on.
:param file input_file8: Eighth input file to perform the operation on.
:param file input_file9: Ninth input file to perform the operation on.
:param file input_file10: Tenth input file to perform the operation on.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.merge_document_pdf_multi_with_http_info(input_file1, input_file2, **kwargs) # noqa: E501
else:
(data) = self.merge_document_pdf_multi_with_http_info(input_file1, input_file2, **kwargs) # noqa: E501
return data
def merge_document_pdf_multi_with_http_info(self, input_file1, input_file2, **kwargs): # noqa: E501
"""Merge Multple PDF Files Together # noqa: E501
Combine multiple PDF files (pdf) into a single PDF document, preserving the order of the input documents in the combined document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.merge_document_pdf_multi_with_http_info(input_file1, input_file2, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file1: First input file to perform the operation on. (required)
:param file input_file2: Second input file to perform the operation on. (required)
:param file input_file3: Third input file to perform the operation on.
:param file input_file4: Fourth input file to perform the operation on.
:param file input_file5: Fifth input file to perform the operation on.
:param file input_file6: Sixth input file to perform the operation on.
:param file input_file7: Seventh input file to perform the operation on.
:param file input_file8: Eighth input file to perform the operation on.
:param file input_file9: Ninth input file to perform the operation on.
:param file input_file10: Tenth input file to perform the operation on.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file1', 'input_file2', 'input_file3', 'input_file4', 'input_file5', 'input_file6', 'input_file7', 'input_file8', 'input_file9', 'input_file10'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method merge_document_pdf_multi" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input_file1' is set
if ('input_file1' not in params or
params['input_file1'] is None):
raise ValueError("Missing the required parameter `input_file1` when calling `merge_document_pdf_multi`") # noqa: E501
# verify the required parameter 'input_file2' is set
if ('input_file2' not in params or
params['input_file2'] is None):
raise ValueError("Missing the required parameter `input_file2` when calling `merge_document_pdf_multi`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'input_file1' in params:
local_var_files['inputFile1'] = params['input_file1'] # noqa: E501
if 'input_file2' in params:
local_var_files['inputFile2'] = params['input_file2'] # noqa: E501
if 'input_file3' in params:
local_var_files['inputFile3'] = params['input_file3'] # noqa: E501
if 'input_file4' in params:
local_var_files['inputFile4'] = params['input_file4'] # noqa: E501
if 'input_file5' in params:
local_var_files['inputFile5'] = params['input_file5'] # noqa: E501
if 'input_file6' in params:
local_var_files['inputFile6'] = params['input_file6'] # noqa: E501
if 'input_file7' in params:
local_var_files['inputFile7'] = params['input_file7'] # noqa: E501
if 'input_file8' in params:
local_var_files['inputFile8'] = params['input_file8'] # noqa: E501
if 'input_file9' in params:
local_var_files['inputFile9'] = params['input_file9'] # noqa: E501
if 'input_file10' in params:
local_var_files['inputFile10'] = params['input_file10'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/octet-stream']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/merge/pdf/multi', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def merge_document_png(self, input_file1, input_file2, **kwargs): # noqa: E501
"""Merge Two PNG Files Together # noqa: E501
Combine two PNG files into a single PNG document, preserving the order of the input documents in the combined document by stacking them vertically # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.merge_document_png(input_file1, input_file2, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file1: First input file to perform the operation on. (required)
:param file input_file2: Second input file to perform the operation on (more than 2 can be supplied). (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.merge_document_png_with_http_info(input_file1, input_file2, **kwargs) # noqa: E501
else:
(data) = self.merge_document_png_with_http_info(input_file1, input_file2, **kwargs) # noqa: E501
return data
def merge_document_png_with_http_info(self, input_file1, input_file2, **kwargs): # noqa: E501
"""Merge Two PNG Files Together # noqa: E501
Combine two PNG files into a single PNG document, preserving the order of the input documents in the combined document by stacking them vertically # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.merge_document_png_with_http_info(input_file1, input_file2, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file1: First input file to perform the operation on. (required)
:param file input_file2: Second input file to perform the operation on (more than 2 can be supplied). (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file1', 'input_file2'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method | |
+= 1
# We count the deleted contributions here too since they should have a corresponding addition contribution
self.assertEqual(num_expected_new_contributions + num_expected_new_deleted_contributions,
num_actual_new_contributions)
self.assertEqual(num_expected_new_deleted_contributions, num_actual_new_deleted_contributions)
return hits_by_id
def _assert_new_bundle(self, num_expected_old_contributions=0, old_hits_by_id=None):
num_actual_old_contributions = 0
hits = self._get_all_hits()
# Six entities (two files, one project, one cell suspension, one sample and one bundle)
# One contribution and one aggregate per entity
self.assertEqual(6 + 6 + num_expected_old_contributions, len(hits))
for hit in hits:
entity_type, aggregate = config.parse_es_index_name(hit['_index'])
source = hit['_source']
version = one(source['bundles'])['version'] if aggregate else source['bundle_version']
contents = source['contents']
project = one(contents['projects'])
if not aggregate and version != self.new_bundle[1]:
self.assertLess(version, self.new_bundle[1])
num_actual_old_contributions += 1
continue
if old_hits_by_id is not None:
old_hit = old_hits_by_id[source['entity_id'], aggregate]
old_source = old_hit['_source']
old_version = one(old_source['bundles'])['version'] if aggregate else old_source['bundle_version']
self.assertLess(old_version, version)
old_contents = old_source['contents']
old_project = one(old_contents['projects'])
self.assertNotEqual(old_project["project_title"], project["project_title"])
self.assertNotEqual(old_project["project_short_name"], project["project_short_name"])
self.assertNotEqual(old_project["laboratory"], project["laboratory"])
if aggregate and entity_type != 'projects':
self.assertNotEqual(old_project["institutions"], project["institutions"])
else:
self.assertNotEqual(old_project["contributors"], project["contributors"])
self.assertNotEqual(old_contents["donors"][0]["genus_species"],
contents["donors"][0]["genus_species"])
self.assertEqual("Single cell transcriptome analysis of human pancreas reveals transcriptional "
"signatures of aging and somatic mutation patterns.",
get(project["project_title"]))
self.assertEqual("Single cell transcriptome analysis of human pancreas",
get(project["project_short_name"]))
self.assertNotIn("<NAME>", project["laboratory"])
self.assertIn("Molecular Atlas", project["laboratory"])
if aggregate and entity_type != 'projects':
self.assertNotIn('Farmers Trucks', project['institutions'])
else:
self.assertNotIn('Farmers Trucks', [c.get('institution') for c in project['contributors']])
self.assertEqual(num_expected_old_contributions, num_actual_old_contributions)
def test_concurrent_specimen_submissions(self):
"""
Index two bundles contributing to the same specimen and project, ensure that conflicts are detected and handled
"""
bundles = [("9dec1bd6-ced8-448a-8e45-1fc7846d8995", "2018-03-29T154319.834528Z"),
("56a338fe-7554-4b5d-96a2-7df127a7640b", "2018-03-29T153507.198365Z")]
original_mget = Elasticsearch.mget
latch = Latch(len(bundles))
def mocked_mget(self, body, _source_include):
mget_return = original_mget(self, body=body, _source_include=_source_include)
# all threads wait at the latch after reading to force conflict while writing
latch.decrement(1)
return mget_return
with patch.object(Elasticsearch, 'mget', new=mocked_mget):
with self.assertLogs(level='WARNING') as cm:
with ThreadPoolExecutor(max_workers=len(bundles)) as executor:
thread_results = executor.map(self._index_canned_bundle, bundles)
self.assertIsNotNone(thread_results)
self.assertTrue(all(r is None for r in thread_results))
self.assertIsNotNone(cm.records)
num_hits = sum(1 for log_msg in cm.output
if "There was a conflict with document" in log_msg
and ("azul_samples" in log_msg or "azul_projects" in log_msg))
# One conflict for the specimen and one for the project
self.assertEqual(num_hits, 2)
hits = self._get_all_hits()
file_uuids = set()
# Two bundles each with 1 sample, 1 cell suspension, 1 project, 1 bundle and 2 files
# Both bundles share the same sample and the project, so they get aggregated only once:
# 2 samples + 2 projects + 2 cell suspension + 2 bundles + 4 files +
# 1 samples agg + 1 projects agg + 2 cell suspension agg + 2 bundle agg + 4 file agg = 22 hits
self.assertEqual(22, len(hits))
for hit in hits:
entity_type, aggregate = config.parse_es_index_name(hit['_index'])
contents = hit['_source']['contents']
if aggregate:
self.assertEqual(hit['_id'], hit['_source']['entity_id'])
if entity_type == 'files':
contents = hit['_source']['contents']
self.assertEqual(1, len(contents['files']))
if aggregate:
file_uuids.add(contents['files'][0]['uuid'])
elif entity_type in ('samples', 'projects'):
if aggregate:
self.assertEqual(2, len(hit['_source']['bundles']))
# All four files are fastqs so the are grouped together
self.assertEqual(4, one(contents['files'])['count'])
else:
self.assertEqual(2, len(contents['files']))
elif entity_type == 'bundles':
if aggregate:
self.assertEqual(1, len(hit['_source']['bundles']))
self.assertEqual(2, len(contents['files']))
else:
self.assertEqual(2, len(contents['files']))
elif entity_type == 'cell_suspensions':
if aggregate:
self.assertEqual(1, len(hit['_source']['bundles']))
self.assertEqual(1, len(contents['files']))
else:
self.assertEqual(2, len(contents['files']))
else:
self.fail()
file_document_ids = set()
self.assertEqual(4, len(file_uuids))
for bundle_fqid in bundles:
manifest, metadata = self._load_canned_bundle(bundle_fqid)
for file in metadata['file.json']['files']:
file_document_ids.add(file['hca_ingest']['document_id'])
self.assertEqual(file_document_ids, file_uuids)
def test_indexing_with_skipped_matrix_file(self):
# FIXME: Remove once https://github.com/HumanCellAtlas/metadata-schema/issues/579 is resolved
self._index_canned_bundle(('587d74b4-1075-4bbf-b96a-4d1ede0481b2', '2018-10-10T022343.182000Z'))
self.maxDiff = None
hits = self._get_all_hits()
file_names, aggregate_file_names = set(), set()
entities_with_matrix_files = set()
for hit in hits:
entity_type, aggregate = config.parse_es_index_name(hit["_index"])
files = hit['_source']['contents']['files']
if aggregate:
if entity_type == 'files':
aggregate_file_names.add(one(files)['name'])
else:
for file in files:
# FIXME: need for one() is odd, file_format is a group field
# https://github.com/DataBiosphere/azul/issues/612
if entity_type == 'bundles':
if file['file_format'] == 'matrix':
entities_with_matrix_files.add(hit['_source']['entity_id'])
else:
if file['file_format'] == 'matrix':
self.assertEqual(1, file['count'])
entities_with_matrix_files.add(hit['_source']['entity_id'])
else:
for file in files:
file_name = file['name']
file_names.add(file_name)
self.assertEqual(4, len(entities_with_matrix_files)) # a project, a specimen, a cell suspension and a bundle
self.assertEqual(aggregate_file_names, file_names)
matrix_file_names = {file_name for file_name in file_names if '.zarr!' in file_name}
self.assertEqual({'377f2f5a-4a45-4c62-8fb0-db9ef33f5cf0.zarr!.zattrs'}, matrix_file_names)
def test_plate_bundle(self):
self._index_canned_bundle(('d0e17014-9a58-4763-9e66-59894efbdaa8', '2018-10-03T144137.044509Z'))
self.maxDiff = None
hits = self._get_all_hits()
self.assertGreater(len(hits), 0)
counted_cell_count = 0
expected_cell_count = 380 # 384 wells in total, four of them empty, the rest with a single cell
documents_with_cell_suspension = 0
for hit in hits:
entity_type, aggregate = config.parse_es_index_name(hit["_index"])
contents = hit['_source']['contents']
cell_suspensions = contents['cell_suspensions']
if entity_type == 'files' and contents['files'][0]['file_format'] == 'pdf':
# The PDF files in that bundle aren't linked to a specimen
self.assertEqual(0, len(cell_suspensions))
else:
if aggregate:
bundles = hit['_source']['bundles']
self.assertEqual(1, len(bundles))
self.assertEqual(one(contents['protocols'])['paired_end'], [True])
else:
self.assertEqual({p.get('paired_end') for p in contents['protocols']}, {True, None})
specimens = contents['specimens']
for specimen in specimens:
self.assertEqual({'bone marrow', 'temporal lobe'}, set(specimen['organ_part']))
for cell_suspension in cell_suspensions:
self.assertEqual({'bone marrow', 'temporal lobe'}, set(cell_suspension['organ_part']))
self.assertEqual({'Plasma cells'}, set(cell_suspension['selected_cell_type']))
self.assertEqual(1 if entity_type == 'cell_suspensions' or aggregate else 384, len(cell_suspensions))
if entity_type == 'cell_suspensions':
counted_cell_count += one(cell_suspensions)['total_estimated_cells']
else:
self.assertEqual(expected_cell_count, sum(cs['total_estimated_cells'] for cs in cell_suspensions))
documents_with_cell_suspension += 1
self.assertEqual(expected_cell_count * 2, counted_cell_count) # times 2 for original document and aggregate
# Cell suspensions should be mentioned in 1 bundle, 1 project, 1 specimen, 384 cell suspensions, and 2 files
# (one per fastq). There should be one original and one aggregate document for each of those. (389 * 2 = 778)
self.assertEqual(778, documents_with_cell_suspension)
def test_well_bundles(self):
self._index_canned_bundle(('3f8176ff-61a7-4504-a57c-fc70f38d5b13', '2018-10-24T234431.820615Z'))
self._index_canned_bundle(('e2c3054e-9fba-4d7a-b85b-a2220d16da73', '2018-10-24T234303.157920Z'))
self.maxDiff = None
hits = self._get_all_hits()
self.assertGreater(len(hits), 0)
for hit in hits:
contents = hit["_source"]['contents']
entity_type, aggregate = config.parse_es_index_name(hit["_index"])
if aggregate:
cell_suspensions = contents['cell_suspensions']
self.assertEqual(1, len(cell_suspensions))
# Each bundle contributes a well with one cell. The data files in each bundle are derived from
# the cell in that well. This is why each data file and bundle should only have a cell count of 1.
# Both bundles refer to the same specimen and project, so the cell count for those should be 2.
expected_cells = 1 if entity_type in ('files', 'cell_suspensions', 'bundles') else 2
self.assertEqual(expected_cells, cell_suspensions[0]['total_estimated_cells'])
self.assertEqual(one(one(contents['protocols'])['workflow']), 'smartseq2_v2.1.0')
else:
self.assertEqual({p.get('workflow') for p in contents['protocols']}, {'smartseq2_v2.1.0', None})
def test_pooled_specimens(self):
"""
Index a bundle that combines 3 specimen_from_organism into 1 cell_suspension
"""
self._index_canned_bundle(('b7fc737e-9b7b-4800-8977-fe7c94e131df', '2018-09-12T121155.846604Z'))
self.maxDiff = None
hits = self._get_all_hits()
self.assertGreater(len(hits), 0)
for hit in hits:
entity_type, aggregate = config.parse_es_index_name(hit["_index"])
if aggregate:
contents = hit["_source"]['contents']
cell_suspensions = contents['cell_suspensions']
self.assertEqual(1, len(cell_suspensions))
# This bundle contains three specimens which are pooled into the a single cell suspension with
# 10000 cells. Until we introduced cell suspensions as an inner entity we used to associate cell
# counts with specimen which would have inflated the total cell count to 30000 in this case.
self.assertEqual(10000, cell_suspensions[0]['total_estimated_cells'])
sample = one(contents['samples'])
self.assertEqual(sample['organ'], sample['effective_organ'])
if entity_type == 'samples':
self.assertTrue(sample['effective_organ'] in {'Brain 1', 'Brain 2', 'Brain 3'})
else:
self.assertEqual(set(sample['effective_organ']), {'Brain 1', 'Brain 2', 'Brain 3'})
def test_project_contact_extraction(self):
"""
Ensure all fields related to project contacts are properly extracted
"""
self._index_canned_bundle(('d0e17014-9a58-4763-9e66-59894efbdaa8', '2018-10-03T144137.044509Z'))
hits = self._get_all_hits()
for hit in hits:
entity_type, aggregate = config.parse_es_index_name(hit['_index'])
if aggregate and entity_type == 'projects':
contributor_values = defaultdict(set)
contributors = hit['_source']['contents']['projects'][0]['contributors']
for contributor in contributors:
for k, v in contributor.items():
contributor_values[k].add(v)
self.assertEqual({'Matthew,,Green', '<NAME>', '<NAME>', '<NAME>', '<NAME>'},
contributor_values['contact_name'])
self.assertEqual({'<EMAIL>', '<EMAIL>', '<EMAIL>',
'<EMAIL>', '<EMAIL>'},
contributor_values['email'])
self.assertEqual({'EMBL-EBI European Bioinformatics Institute', 'The Weizmann Institute of Science'},
contributor_values['institution'])
self.assertEqual({'Prof. <NAME>', 'Human Cell Atlas Data Coordination Platform'},
contributor_values['laboratory'])
self.assertEqual({False, True}, contributor_values['corresponding_contributor'])
self.assertEqual({'Human Cell Atlas wrangler', config.null_keyword},
contributor_values['project_role'])
def test_diseases_field(self):
"""
Index a bundle with a specimen `diseases` value that differs from the donor `diseases` value
and assert that both values are represented in the indexed document.
"""
self._index_canned_bundle(("3db604da-940e-49b1-9bcc-25699a55b295", "2018-11-02T184048.983513Z"))
hits = self._get_all_hits()
for hit in hits:
source = hit['_source']
contents = source['contents']
specimen_diseases = contents['specimens'][0]['disease']
donor_diseases = contents['donors'][0]['diseases']
self.assertEqual(1, len(specimen_diseases))
self.assertEqual("atrophic vulva (specimen_from_organism)", specimen_diseases[0])
self.assertEqual(1, len(donor_diseases))
self.assertEqual("atrophic vulva (donor_organism)", donor_diseases[0])
def test_organoid_priority(self):
"""
Index a bundle containing an Organoid and assert that the "organ" and "organ_part"
values saved are the ones from the Organoid and not the SpecimenFromOrganism
"""
self._index_canned_bundle(('dcccb551-4766-4210-966c-f9ee25d19190', '2018-10-18T204655.866661Z'))
hits = self._get_all_hits()
inner_specimens, inner_cell_suspensions = 0, 0
for hit in hits:
contents = hit['_source']['contents']
entity_type, aggregate = config.parse_es_index_name(hit['_index'])
if entity_type != 'files' or one(contents['files'])['file_format'] != 'pdf':
inner_cell_suspensions += len(contents['cell_suspensions'])
for specimen in contents['specimens']:
inner_specimens += 1
expect_list = aggregate and entity_type != 'specimens'
self.assertEqual(['skin of body'] if expect_list else 'skin | |
<reponame>pseudozach/lnbits-legend
import psycopg2
import sqlite3
import os
# Python script to migrate an LNbits SQLite DB to Postgres
# All credits to @Fritz446 for the awesome work
# pip install psycopg2 OR psycopg2-binary
# Change these values as needed
sqfolder = "data/"
pgdb = "lnbits"
pguser = "postgres"
pgpswd = "<PASSWORD>"
pghost = "localhost"
pgport = "5432"
pgschema = ""
def get_sqlite_cursor(sqdb) -> sqlite3:
consq = sqlite3.connect(sqdb)
return consq.cursor()
def get_postgres_cursor():
conpg = psycopg2.connect(
database=pgdb, user=pguser, password=<PASSWORD>, host=pghost, port=pgport
)
return conpg.cursor()
def check_db_versions(sqdb):
sqlite = get_sqlite_cursor(sqdb)
dblite = dict(sqlite.execute("SELECT * FROM dbversions;").fetchall())
if "lnurlpos" in dblite:
del dblite["lnurlpos"]
sqlite.close()
postgres = get_postgres_cursor()
postgres.execute("SELECT * FROM public.dbversions;")
dbpost = dict(postgres.fetchall())
for key in dblite.keys():
if key in dblite and key in dbpost and dblite[key] != dbpost[key]:
raise Exception(
f"sqlite database version ({dblite[key]}) of {key} doesn't match postgres database version {dbpost[key]}"
)
connection = postgres.connection
postgres.close()
connection.close()
print("Database versions OK, converting")
def fix_id(seq, values):
if not values or len(values) == 0:
return
postgres = get_postgres_cursor()
max_id = values[len(values) - 1][0]
postgres.execute(f"SELECT setval('{seq}', {max_id});")
connection = postgres.connection
postgres.close()
connection.close()
def insert_to_pg(query, data):
if len(data) == 0:
return
cursor = get_postgres_cursor()
connection = cursor.connection
for d in data:
try:
cursor.execute(query, d)
except:
raise ValueError(f"Failed to insert {d}")
connection.commit()
cursor.close()
connection.close()
def migrate_core(sqlite_db_file):
sq = get_sqlite_cursor(sqlite_db_file)
# ACCOUNTS
res = sq.execute("SELECT * FROM accounts;")
q = f"INSERT INTO public.accounts (id, email, pass) VALUES (%s, %s, %s);"
insert_to_pg(q, res.fetchall())
# WALLETS
res = sq.execute("SELECT * FROM wallets;")
q = f'INSERT INTO public.wallets (id, name, "user", adminkey, inkey) VALUES (%s, %s, %s, %s, %s);'
insert_to_pg(q, res.fetchall())
# API PAYMENTS
res = sq.execute("SELECT * FROM apipayments;")
q = f"""
INSERT INTO public.apipayments(
checking_id, amount, fee, wallet, pending, memo, "time", hash, preimage, bolt11, extra, webhook, webhook_status)
VALUES (%s, %s, %s, %s, %s::boolean, %s, to_timestamp(%s), %s, %s, %s, %s, %s, %s);
"""
insert_to_pg(q, res.fetchall())
# BALANCE CHECK
res = sq.execute("SELECT * FROM balance_check;")
q = f"INSERT INTO public.balance_check(wallet, service, url) VALUES (%s, %s, %s);"
insert_to_pg(q, res.fetchall())
# BALANCE NOTIFY
res = sq.execute("SELECT * FROM balance_notify;")
q = f"INSERT INTO public.balance_notify(wallet, url) VALUES (%s, %s);"
insert_to_pg(q, res.fetchall())
# EXTENSIONS
res = sq.execute("SELECT * FROM extensions;")
q = f'INSERT INTO public.extensions("user", extension, active) VALUES (%s, %s, %s::boolean);'
insert_to_pg(q, res.fetchall())
print("Migrated: core")
def migrate_ext(sqlite_db_file, schema):
sq = get_sqlite_cursor(sqlite_db_file)
if schema == "bleskomat":
# BLESKOMAT LNURLS
res = sq.execute("SELECT * FROM bleskomat_lnurls;")
q = f"""
INSERT INTO bleskomat.bleskomat_lnurls(
id, bleskomat, wallet, hash, tag, params, api_key_id, initial_uses, remaining_uses, created_time, updated_time)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
"""
insert_to_pg(q, res.fetchall())
# BLESKOMATS
res = sq.execute("SELECT * FROM bleskomats;")
q = f"""
INSERT INTO bleskomat.bleskomats(
id, wallet, api_key_id, api_key_secret, api_key_encoding, name, fiat_currency, exchange_rate_provider, fee)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);
"""
insert_to_pg(q, res.fetchall())
elif schema == "captcha":
# CAPTCHA
res = sq.execute("SELECT * FROM captchas;")
q = f"""
INSERT INTO captcha.captchas(
id, wallet, url, memo, description, amount, "time", remembers, extras)
VALUES (%s, %s, %s, %s, %s, %s, to_timestamp(%s), %s, %s);
"""
insert_to_pg(q, res.fetchall())
elif schema == "copilot":
# OLD COPILOTS
res = sq.execute("SELECT * FROM copilots;")
q = f"""
INSERT INTO copilot.copilots(
id, "user", title, lnurl_toggle, wallet, animation1, animation2, animation3, animation1threshold, animation2threshold, animation3threshold, animation1webhook, animation2webhook, animation3webhook, lnurl_title, show_message, show_ack, show_price, amount_made, fullscreen_cam, iframe_url, "timestamp")
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, to_timestamp(%s));
"""
insert_to_pg(q, res.fetchall())
# NEW COPILOTS
q = f"""
INSERT INTO copilot.newer_copilots(
id, "user", title, lnurl_toggle, wallet, animation1, animation2, animation3, animation1threshold, animation2threshold, animation3threshold, animation1webhook, animation2webhook, animation3webhook, lnurl_title, show_message, show_ack, show_price, amount_made, fullscreen_cam, iframe_url, "timestamp")
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, to_timestamp(%s));
"""
insert_to_pg(q, res.fetchall())
elif schema == "events":
# EVENTS
res = sq.execute("SELECT * FROM events;")
q = f"""
INSERT INTO events.events(
id, wallet, name, info, closing_date, event_start_date, event_end_date, amount_tickets, price_per_ticket, sold, "time")
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, to_timestamp(%s));
"""
insert_to_pg(q, res.fetchall())
# EVENT TICKETS
res = sq.execute("SELECT * FROM ticket;")
q = f"""
INSERT INTO events.ticket(
id, wallet, event, name, email, registered, paid, "time")
VALUES (%s, %s, %s, %s, %s, %s::boolean, %s::boolean, to_timestamp(%s));
"""
insert_to_pg(q, res.fetchall())
elif schema == "example":
# Example doesn't have a database at the moment
pass
elif schema == "hivemind":
# Hivemind doesn't have a database at the moment
pass
elif schema == "jukebox":
# JUKEBOXES
res = sq.execute("SELECT * FROM jukebox;")
q = f"""
INSERT INTO jukebox.jukebox(
id, "user", title, wallet, inkey, sp_user, sp_secret, sp_access_token, sp_refresh_token, sp_device, sp_playlists, price, profit)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
"""
insert_to_pg(q, res.fetchall())
# JUKEBOX PAYMENTS
res = sq.execute("SELECT * FROM jukebox_payment;")
q = f"""
INSERT INTO jukebox.jukebox_payment(
payment_hash, juke_id, song_id, paid)
VALUES (%s, %s, %s, %s::boolean);
"""
insert_to_pg(q, res.fetchall())
elif schema == "withdraw":
# WITHDRAW LINK
res = sq.execute("SELECT * FROM withdraw_link;")
q = f"""
INSERT INTO withdraw.withdraw_link (
id,
wallet,
title,
min_withdrawable,
max_withdrawable,
uses,
wait_time,
is_unique,
unique_hash,
k1,
open_time,
used,
usescsv
)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
"""
insert_to_pg(q, res.fetchall())
# WITHDRAW HASH CHECK
res = sq.execute("SELECT * FROM hash_check;")
q = f"""
INSERT INTO withdraw.hash_check (id, lnurl_id)
VALUES (%s, %s);
"""
insert_to_pg(q, res.fetchall())
elif schema == "watchonly":
# WALLETS
res = sq.execute("SELECT * FROM wallets;")
q = f"""
INSERT INTO watchonly.wallets (
id,
"user",
masterpub,
title,
address_no,
balance
)
VALUES (%s, %s, %s, %s, %s, %s);
"""
insert_to_pg(q, res.fetchall())
# ADDRESSES
res = sq.execute("SELECT * FROM addresses;")
q = f"""
INSERT INTO watchonly.addresses (id, address, wallet, amount)
VALUES (%s, %s, %s, %s);
"""
insert_to_pg(q, res.fetchall())
# MEMPOOL
res = sq.execute("SELECT * FROM mempool;")
q = f"""
INSERT INTO watchonly.mempool ("user", endpoint)
VALUES (%s, %s);
"""
insert_to_pg(q, res.fetchall())
elif schema == "usermanager":
# USERS
res = sq.execute("SELECT * FROM users;")
q = f"""
INSERT INTO usermanager.users (id, name, admin, email, password)
VALUES (%s, %s, %s, %s, %s);
"""
insert_to_pg(q, res.fetchall())
# WALLETS
res = sq.execute("SELECT * FROM wallets;")
q = f"""
INSERT INTO usermanager.wallets (id, admin, name, "user", adminkey, inkey)
VALUES (%s, %s, %s, %s, %s, %s);
"""
insert_to_pg(q, res.fetchall())
elif schema == "tpos":
# TPOSS
res = sq.execute("SELECT * FROM tposs;")
q = f"""
INSERT INTO tpos.tposs (id, wallet, name, currency)
VALUES (%s, %s, %s, %s);
"""
insert_to_pg(q, res.fetchall())
elif schema == "tipjar":
# TIPJARS
res = sq.execute("SELECT * FROM TipJars;")
q = f"""
INSERT INTO tipjar.TipJars (id, name, wallet, onchain, webhook)
VALUES (%s, %s, %s, %s, %s);
"""
pay_links = res.fetchall()
insert_to_pg(q, pay_links)
fix_id("tipjar.tipjars_id_seq", pay_links)
# TIPS
res = sq.execute("SELECT * FROM Tips;")
q = f"""
INSERT INTO tipjar.Tips (id, wallet, name, message, sats, tipjar)
VALUES (%s, %s, %s, %s, %s, %s);
"""
insert_to_pg(q, res.fetchall())
elif schema == "subdomains":
# DOMAIN
res = sq.execute("SELECT * FROM domain;")
q = f"""
INSERT INTO subdomains.domain (
id,
wallet,
domain,
webhook,
cf_token,
cf_zone_id,
description,
cost,
amountmade,
allowed_record_types,
time
)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, to_timestamp(%s));
"""
insert_to_pg(q, res.fetchall())
# SUBDOMAIN
res = sq.execute("SELECT * FROM subdomain;")
q = f"""
INSERT INTO subdomains.subdomain (
id,
domain,
email,
subdomain,
ip,
wallet,
sats,
duration,
paid,
record_type,
time
)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s::boolean, %s, to_timestamp(%s));
"""
insert_to_pg(q, res.fetchall())
elif schema == "streamalerts":
# SERVICES
res = sq.execute("SELECT * FROM Services;")
q = f"""
INSERT INTO streamalerts.Services (
id,
state,
twitchuser,
client_id,
client_secret,
wallet,
onchain,
servicename,
authenticated,
token
)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s::boolean, %s);
"""
services = res.fetchall()
insert_to_pg(q, services)
fix_id("streamalerts.services_id_seq", services)
# DONATIONS
res = sq.execute("SELECT * FROM Donations;")
q = f"""
INSERT INTO streamalerts.Donations (
id,
wallet,
name,
message,
cur_code,
sats,
amount,
service,
posted,
)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s::boolean);
"""
insert_to_pg(q, res.fetchall())
elif schema == "splitpayments":
# | |
<filename>wikigraphs/utils.py<gh_stars>1000+
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Utility functions for the training script."""
import collections
import math
import random
from absl import flags
from absl import logging
import jax.numpy as jnp
import jraph
import numpy as np
import sklearn
from wikigraphs.data import paired_dataset as pd
from wikigraphs.data import tokenizers
from wikigraphs.data import wikitext as wt
from wikigraphs.model import graph_net as gn
from wikigraphs.model import sampler as transformer_sampler
from wikigraphs.model import transformer
FLAGS = flags.FLAGS
VOCAB_FILES_MAP = {
'wikitext': '/tmp/data/wikitext-vocab.csv',
'freebase2wikitext': '/tmp/data/text-vocab.csv',
}
GRAPH_VOCAB_FILE = '/tmp/data/graph-vocab.csv'
def init_tokenizer(dataset_name):
"""Initialie the tokenizer."""
logging.info('Loading tokenizer...')
tokenizer = tokenizers.WordTokenizer(VOCAB_FILES_MAP[dataset_name])
logging.info('Vocab size: %d', tokenizer.vocab_size)
return tokenizer
def init_graph_tokenizer():
"""Initialie the tokenizer."""
logging.info('Loading graph tokenizer...')
tokenizer = tokenizers.GraphTokenizer(GRAPH_VOCAB_FILE)
logging.info('Vocab size: %d', tokenizer.vocab_size)
return tokenizer
def get_dataset_class(dataset_name, model_type, job_mode='train'):
"""Get the dataset class used for all jobs."""
if dataset_name == 'freebase2wikitext':
if model_type == 'bow2text':
return pd.Bow2TextDataset
elif FLAGS.model_type == 'graph2text':
return pd.Graph2TextDataset
elif FLAGS.model_type == 'text':
if job_mode in ['train', 'eval']:
return pd.TextOnlyDataset
else:
# for sampling: taking the unique graphs for a fair comparison
return pd.Bow2TextDataset
else:
# Add other graph2text data here.
raise NotImplementedError()
else:
def dataset(graph_tokenizer, *args, **kwargs):
del graph_tokenizer
return wt.Dataset(*args, **kwargs)
return dataset
def preprocess(batch, model_type, num_devices=1):
"""Preprocess the batch before sending to the model."""
if model_type == 'text':
if 'graphs' in batch:
del batch['graphs']
elif model_type == 'bow2text':
# Do nothing, bow2text data is already in a good form.
pass
else: # graph2text
if num_devices == 1:
graphs = gn.pad_graphs(jraph.batch(batch['graphs']))
else:
# We need to first batch graphs into num_devices batchs.
graphs = gn.batch_graphs_by_device(batch['graphs'], num_devices)
# Then we pad them to the maximum graph size in the batch and concat.
# This way graphs can be distributed to each device through pmap.
graphs = gn.pad_graphs_by_device(graphs)
max_graph_size = gn.pad_size(graphs.n_node.max())
batch.update({
'graphs': graphs,
'max_graph_size': max_graph_size})
return batch
def text_model_fn(vocab_size):
return transformer.TransformerXL(
vocab_size=vocab_size,
emb_dim=FLAGS.emb_dim,
num_layers=FLAGS.num_layers,
num_heads=FLAGS.num_heads,
dropout_prob=FLAGS.dropout,
dropout_attn_prob=FLAGS.dropout_attn,
self_att_init_scale=FLAGS.self_att_init_scale,
dense_init_scale=FLAGS.dense_init_scale,
dense_dim=FLAGS.dense_dim,
tail_shrink_factor=FLAGS.tail_shrink_factor,
relative_pos_clamp_len=FLAGS.clamp_len or None)
def graph2text_model_fn(vocab_size):
"""Get graph2text transformer model."""
return transformer.Graph2TextTransformer(
vocab_size=vocab_size,
emb_dim=FLAGS.emb_dim,
num_layers=FLAGS.num_layers,
num_heads=FLAGS.num_heads,
dropout_prob=FLAGS.dropout,
dropout_attn_prob=FLAGS.dropout_attn,
self_att_init_scale=FLAGS.self_att_init_scale,
dense_init_scale=FLAGS.dense_init_scale,
dense_dim=FLAGS.dense_dim,
tail_shrink_factor=FLAGS.tail_shrink_factor,
relative_pos_clamp_len=FLAGS.clamp_len or None,
gnn_embed_dim=FLAGS.gnn_embed_dim,
gnn_num_layers=FLAGS.gnn_num_layers,
gnn_layer_norm=FLAGS.gnn_layer_norm)
def bow2text_model_fn(vocab_size):
"""Get the bow2text model."""
return transformer.Bow2TextTransformer(
vocab_size=vocab_size,
emb_dim=FLAGS.emb_dim,
num_layers=FLAGS.num_layers,
num_heads=FLAGS.num_heads,
dropout_prob=FLAGS.dropout,
dropout_attn_prob=FLAGS.dropout_attn,
self_att_init_scale=FLAGS.self_att_init_scale,
dense_init_scale=FLAGS.dense_init_scale,
dense_dim=FLAGS.dense_dim,
tail_shrink_factor=FLAGS.tail_shrink_factor,
relative_pos_clamp_len=FLAGS.clamp_len or None,
bow_embedding_dim=FLAGS.bow_embedding_dim,
bow_n_tokens=FLAGS.bow_n_tokens)
def build_loss_fn(vocab_size, cache_steps):
"""Build the appropriate loss function according to the configs."""
if FLAGS.model_type == 'text':
def loss_fn(data, is_training=True):
return text_model_fn(vocab_size=vocab_size).loss(
data['obs'], data['target'], data['mask'],
is_training=is_training,
should_reset=data['should_reset'],
cache_steps=cache_steps)
elif FLAGS.model_type == 'graph2text':
def loss_fn(data, max_graph_size, is_training=True):
return graph2text_model_fn(vocab_size=vocab_size).loss(
data['graphs'], max_graph_size, True,
data['obs'], data['target'], data['mask'],
is_training=is_training,
should_reset=data['should_reset'],
cache_steps=cache_steps)
elif FLAGS.model_type == 'bow2text':
def loss_fn(data, is_training=True):
return bow2text_model_fn(vocab_size=vocab_size).loss(
data['graphs'], data['obs'], data['target'], data['mask'],
is_training=is_training,
should_reset=data['should_reset'],
cache_steps=cache_steps)
else:
raise ValueError(f'Unknown model type "{FLAGS.model_type}".')
return loss_fn
def build_sampler(tokenizer, device=None):
"""Build the appropriate sampler according to the configs."""
if FLAGS.model_type == 'text':
model_fn = lambda prompts: text_model_fn(tokenizer.vocab_size)( # pylint: disable=g-long-lambda
prompts, is_training=False, cache_steps=FLAGS.sample_memory_size)
sampler_class = transformer_sampler.TransformerXLSampler
elif FLAGS.model_type == 'graph2text':
def model_fn(graphs, max_graph_size, prompts):
return graph2text_model_fn(tokenizer.vocab_size)(
graphs, max_graph_size, True, prompts, is_training=False,
cache_steps=FLAGS.sample_memory_size)
sampler_class = transformer_sampler.Graph2TextTransformerSampler
elif FLAGS.model_type == 'bow2text':
def model_fn(graphs, prompts):
return bow2text_model_fn(tokenizer.vocab_size)(
graphs, prompts, is_training=False,
cache_steps=FLAGS.sample_memory_size)
sampler_class = transformer_sampler.Bow2TextTransformerSampler
sampler = sampler_class(model_fn, FLAGS.sampling_temperature, device)
return sampler
def schedule(i, lr_schedule, init_lr, min_lr_ratio, max_steps):
if lr_schedule == 'cosine':
cosine_decay = 0.5 * (1 + jnp.cos(jnp.pi * i / max_steps))
decayed = (1 - min_lr_ratio) * cosine_decay + min_lr_ratio
return init_lr * decayed
else:
return jnp.where(
i > 350000, init_lr / 3**3,
jnp.where(i > 250000, init_lr / 3**2,
jnp.where(i > 150000, init_lr / 3, init_lr)))
def evaluate(eval_set, initial_state, updater, eval_batch_size=1,
preprocess_fn=None, max_eval_samples=-1,
print_progress_every=None):
"""Evaluate a model on given dataset."""
total_losses = []
total_counts = []
token_accuracy = []
seq_accuracy = []
state = initial_state
step = state['step']
for i, batch in enumerate(eval_set):
state, eval_out = updater.eval_return_state(state, preprocess_fn(batch))
total_losses.append(eval_out['total_loss'])
total_counts.append(eval_out['total_count'])
token_accuracy.append(
eval_out['token_accuracy'] * eval_out['total_count'])
seq_accuracy.append(eval_out['seq_accuracy'])
if print_progress_every and (i + 1) % print_progress_every == 0:
total_loss = float(jnp.array(total_losses).sum())
total_count = float(jnp.array(total_counts).sum())
avg_loss = total_loss / total_count
bpc = avg_loss * np.log2(np.e)
perplexity = np.exp(avg_loss)
logging.info(
'Evaluated %d batches, total tokens %d, average loss %g,'
' bpc %g, perplexity %g.',
i + 1, total_count, avg_loss, bpc, perplexity)
if 0 < max_eval_samples <= (i + 1) * eval_batch_size:
break
total_loss = jnp.array(total_losses).sum()
total_count = jnp.array(total_counts).sum()
avg_loss = total_loss / total_count
eval_out = dict(total_loss=float(total_loss),
total_count=float(total_count),
loss=float(avg_loss),
token_accuracy=float(
jnp.array(token_accuracy).sum() / total_count),
seq_accuracy=float(
jnp.array(seq_accuracy).sum() / len(seq_accuracy)),
step=float(step),
bits_per_token=float(avg_loss) * np.log2(np.e),
perplexity=np.exp(float(avg_loss)))
return eval_out, state
def extract_title(text, tokenizer):
r"""Extract the title in the text.
The wikitext articles is in the format of `\n = TITLE = \n \n...`. We extract
the title as the tokens from the start to when the `\n \n` first appears.
Args:
text: tokenized input text using `tokenizer`.
tokenizer: text tokenizer.
Returns:
title_end_idx: a numpy.array of shape (batch_size,), it indicates the index
in `text` that marks the end of the title.
"""
batch_size, text_length = text.shape
title_end_idx = np.ones(batch_size, dtype=np.int32)
newline_token = tokenizer.encode('\n')[0]
for b in range(batch_size):
prev_token = 1 # start tokens
for i in range(1, text_length): # skip start token
# when we first see '\n \n', that is the title
if prev_token == newline_token and text[b, i] == newline_token:
title_end_idx[b] = i
break
else:
prev_token = text[b, i]
return title_end_idx
def construct_prompts(text, batch_size, sample_length, tokenizer, prompt_title):
"""Construct prompts for text generation.
Args:
text: tokenized input text using `tokenizer`.
batch_size: the size of the batch.
sample_length: the length of the sample to be generated.
tokenizer: text tokenizer.
prompt_title: whether to return a prompt with the title of the `text`.
Returns:
prompts: a numpy.array of shape [batch_size, sample_length], in which -1
indicates tokens that need to be generated using the sampler.
"""
prompts = -np.ones((batch_size, sample_length), dtype=np.int32)
prompts[:, 0] = tokenizer.bos_token()
if prompt_title and text is not None:
title_end_idx = extract_title(text, tokenizer)
for i in range(batch_size):
prompts[i, 1:title_end_idx[i]+1] = text[i, 1:title_end_idx[i]+1]
return prompts
def generate_samples(params, tokenizer, sampler, model_type, prompts, graphs):
"""Generate a batch of samples using a sampler."""
if model_type == 'text':
samples = sampler.sample(params, prompts)
elif model_type == 'graph2text':
samples = sampler.sample(params, prompts, graphs, pad=True)
elif model_type == 'bow2text':
samples = sampler.sample(params, prompts, graphs)
else:
raise ValueError(f'Unknown model_type {model_type}')
return [tokenizer.decode(s) for s in samples], samples
def take_unique_graphs(data_iter, model_type):
"""Filter data such that it only returns batches with unique graphs."""
prev_graphs = None
for batch in data_iter:
graphs = batch.get('graphs', None)
# If there's no graph in batch, don't do any filtering
if graphs is None:
yield batch
else:
if prev_graphs is None:
prev_graphs = graphs
yield batch
else:
if model_type == 'graph2text':
not_same_graph = (prev_graphs.nodes.shape != graphs.nodes.shape or
not (prev_graphs.nodes == graphs.nodes).all())
else:
not_same_graph = (prev_graphs.shape != graphs.shape or
not (prev_graphs == graphs).all())
if not_same_graph:
prev_graphs = graphs
yield batch
def compute_map_sklearn(pred, gt):
"""Computes mAP using scikit-learn."""
assert len(gt.shape) == len(pred.shape) == 2, (
'gt should be a one-hot encoding with the same shape as pred')
ap = [
sklearn.metrics.average_precision_score(
gt[c, :], pred[c, :], average=None)
for c in range(gt.shape[0])
]
return sum(ap) / len(ap)
def compute_recall_at_k(pred, k=1):
"""Computes recall@1 score."""
num_articles = pred.shape[1]
return sklearn.metrics.top_k_accuracy_score(
np.arange(num_articles), pred, k=k)
def compute_text_graph_relevance(
eval_set, initial_state, updater, eval_batch_size=1, preprocess_fn=None,
print_progress_every=None):
"""Compute the text and graph relevance a model on given dataset."""
assert eval_batch_size == 1
num_articles = eval_set.num_articles
tokens_count = np.zeros((num_articles, num_articles))
log_probs | |
NetMRI.
:type EndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EndTime: The date and time the record was last modified in NetMRI.
:type EndTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the local interface for this Vrrp Router Statistics table entry.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the local interface for this Vrrp Router Statistics table entry.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier of Iprg member in the vrrp router statistics.
:type IprgMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier of Iprg member in the vrrp router statistics.
:type IprgMemberID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgNumber: The unique IprgNumber in the Vrrp router.
:type IprgNumber: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgNumber: The unique IprgNumber in the Vrrp router.
:type IprgNumber: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StartTime: The date and time the record was initially created in NetMRI.
:type StartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StartTime: The date and time the record was initially created in NetMRI.
:type StartTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpAddressListErrors: The number of address list errors in the Vrrp router statistic
:type VrrpAddressListErrors: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpAddressListErrors: The number of address list errors in the Vrrp router statistic
:type VrrpAddressListErrors: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpAdvertiseIntervalErrors: The total number of interval errors in the Vrrp Router Statistics.
:type VrrpAdvertiseIntervalErrors: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpAdvertiseIntervalErrors: The total number of interval errors in the Vrrp Router Statistics.
:type VrrpAdvertiseIntervalErrors: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpAdvertiseRcvd: The received advertise of the Vrrp router statistics.
:type VrrpAdvertiseRcvd: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpAdvertiseRcvd: The received advertise of the Vrrp router statistics.
:type VrrpAdvertiseRcvd: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpAuthFailures: The total number of authentication failures occurred in the Vrrp router statistics.
:type VrrpAuthFailures: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpAuthFailures: The total number of authentication failures occurred in the Vrrp router statistics.
:type VrrpAuthFailures: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpAuthTypeMismatch: The mismatch authentication type.
:type VrrpAuthTypeMismatch: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpAuthTypeMismatch: The mismatch authentication type.
:type VrrpAuthTypeMismatch: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpBecomeMaster: The master of the Vrrp Router Statistics.
:type VrrpBecomeMaster: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpBecomeMaster: The master of the Vrrp Router Statistics.
:type VrrpBecomeMaster: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpInvalidAuthType: The Invalid Authentication type of Vrrp Router Statistics.
:type VrrpInvalidAuthType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpInvalidAuthType: The Invalid Authentication type of Vrrp Router Statistics.
:type VrrpInvalidAuthType: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpInvalidTypePktsRcvd: The packet received with Invalid Type.
:type VrrpInvalidTypePktsRcvd: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpInvalidTypePktsRcvd: The packet received with Invalid Type.
:type VrrpInvalidTypePktsRcvd: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpIpTtlErrors: The total number of IP address error occurred in the Vrrp Router Statistics.
:type VrrpIpTtlErrors: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpIpTtlErrors: The total number of IP address error occurred in the Vrrp Router Statistics.
:type VrrpIpTtlErrors: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpPacketLengthErrors: The number of packet length errors in the Vrrp Router Statistics.
:type VrrpPacketLengthErrors: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpPacketLengthErrors: The number of packet length errors in the Vrrp Router Statistics.
:type VrrpPacketLengthErrors: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpPriorityZeroPktsRcvd: The packet received with priority zero.
:type VrrpPriorityZeroPktsRcvd: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpPriorityZeroPktsRcvd: The packet received with priority zero.
:type VrrpPriorityZeroPktsRcvd: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpPriorityZeroPktsSent: The packet sent with priority zero.
:type VrrpPriorityZeroPktsSent: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpPriorityZeroPktsSent: The packet sent with priority zero.
:type VrrpPriorityZeroPktsSent: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics.
:type VrrpRouterStatsID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics.
:type VrrpRouterStatsID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifIndex: The SNMP index for the local interface for this Vrrp router statistics table entry.
:type ifIndex: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifIndex: The SNMP index for the local interface for this Vrrp router statistics table entry.
:type ifIndex: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the vrrp router stats with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will | |
fail
status = fsf_write.CloseFile()
check_status("fsf_write.CloseFile", status)
assert (status == fsf.Status.SUCCESS)
def test_fsf_003_03(self):
"""
Exercise FSF 'Close File' API
Instantiate FSF using read utility, then CloseFile
"""
print("\n===================================================="
"\nTestFSFCloseFile (test_fsf_003_03) : "
"Instantiate FSF using read utility, then CloseFile\n ")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# CloseFile
# TODO: This should fail
status = fsf_read.CloseFile()
check_status("fsf_read.CloseFile", status)
assert (status == fsf.Status.SUCCESS)
def test_fsf_003_04(self, cmdopt_write_dir):
"""
Exercise FSF 'Close File' API
Instantiate FSF using write utilityand call CreateFsfFile with NULL as argument then CloseFile
"""
print("\n===================================================="
"\nTestFSFCloseFile (test_fsf_003_04) : "
"Instantiate FSF using write utilityand call CreateFsfFile with NULL as argument then CloseFile\n ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
file_name = ""
write_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
assert (status == fsf.Status.FILE_NOT_CREATED)
# CloseFile
# TODO: This should fail
status = fsf_write.CloseFile()
check_status("fsf_write.CloseFile", status)
assert (status == fsf.Status.SUCCESS)
# Check if file exists
status = path.isfile(write_file)
check_status("path.isfile", status)
assert status is False
def test_fsf_003_05(self, cmdopt_read_dir):
"""
Exercise FSF 'Close File' API
Instantiate FSF using read utility and call OpenFile with NULL as argument then CloseFile
"""
print("\n===================================================="
"\nTestFSFCloseFile (test_fsf_003_05) : "
"Instantiate FSF using read utility and call OpenFile with NULL as argument then CloseFile\n ")
# Instantiate FSF Read utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename
read_file = ""
# Open FSF file for reading
status = fsf_read.OpenFile(read_file)
check_status("fsf_read.OpenFile", status)
assert (status == fsf.Status.FILE_DOESNOT_EXIST)
# Close FSF file
# TODO: This should fail
status = fsf_read.CloseFile()
assert (status == fsf.Status.SUCCESS)
##############################
#
# FSF parser SetFileHeader API test
#
class TestFSFSetFileHeader:
@pytest.mark.smoke
def test_fsf_004_00(self, cmdopt_write_dir, cmdopt_raw_data):
"""
Exercise FSF 'SetFileHeader' API
Instantiate FSF using write utility and call CreateFsfFile with valid FSF file name.
SetFileHeader with a previously instantiated fileHeader with valid values.
"""
print("\n===================================================="
"\nTestFSFSetFileHeader (test_fsf_004_00) : "
"\nInstantiate FSF using write utility and call CreateFsfFile with valid FSF file name. "
"SetFileHeader with a previously instantiated fileHeader with valid values. ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
file_name = "\\test_fsf_004_00.fsf"
write_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
assert (status == fsf.Status.SUCCESS)
# Get data to use
fsf_data = cmdopt_raw_data
assert (fsf_data.fileHeader is not None)
status = fsf_write.SetFileHeader(fsf_data.fileHeader)
check_status("fsf_write.SetFileHeader", status)
assert (status == fsf.Status.SUCCESS)
# Check if file exists
status = path.isfile(write_file)
check_status("path.isfile", status)
assert status
def test_fsf_004_01(self, cmdopt_write_dir, cmdopt_raw_data):
"""
Exercise FSF 'SetFileHeader' API
Instantiate FSF using read utility and call CreateFsfFile with valid FSF file name.
SetFileHeader with a previously instantiated fileHeader with valid values.
"""
print("\n===================================================="
"\nTestFSFSetFileHeader (test_fsf_004_01) : "
"Instantiate FSF using read utility and call CreateFsfFile with valid FSF file name. "
"SetFileHeader with a previously instantiated fileHeader with valid values.\n ")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename
file_name = "\\test_fsf_004_01.fsf"
write_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_read.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
# assert (status == fsf.Status.SUCCESS)
# Get data to use
fsf_data = cmdopt_raw_data
assert (fsf_data.fileHeader is not None)
status = fsf_read.SetFileHeader(fsf_data.fileHeader)
check_status("fsf_write.SetFileHeader", status)
assert (status == fsf.Status.INVALID_OPERATION)
def test_fsf_004_02(self, cmdopt_read_dir, cmdopt_raw_data):
"""
Exercise FSF 'SetFileHeader' API
Instantiate FSF using read utility and call OpenFile with valid FSF file name.
SetFileHeader with a previously instantiated fileHeader with valid values.
"""
print("\n===================================================="
"\nTestFSFSetFileHeader (test_fsf_004_02) : "
"Instantiate FSF using read utility and call OpenFile with valid FSF file name. "
"SetFileHeader with a previously instantiated fileHeader with valid values.\n ")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Create dummy read file
file_name = "\\test_fsf_004_02.fsf"
status = create_dummy_read_file(cmdopt_read_dir, file_name)
assert status is True
# Set FSF filename
read_file = cmdopt_read_dir + file_name
# Create FSF file for writing
status = fsf_read.OpenFile(read_file)
check_status("fsf_write.OpenFile", status)
assert (status == fsf.Status.SUCCESS)
# Get data to use
fsf_data = cmdopt_raw_data
assert (fsf_data.fileHeader is not None)
status = fsf_read.SetFileHeader(fsf_data.fileHeader)
check_status("fsf_write.SetFileHeader", status)
assert (status == fsf.Status.INVALID_OPERATION)
def test_fsf_004_03(self, cmdopt_write_dir):
"""
Exercise FSF 'SetFileHeader' API
Instantiate using write utility FSF and call CreateFsfFile. SetFileHeader with a fileHeader with no values.
"""
print("\n===================================================="
"\nTestFSFSetFileHeader (test_fsf_004_03) : "
"\nInstantiate using write utility FSF and call CreateFsfFile. "
"SetFileHeader with a fileHeader with no values.")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
file_name = "\\test_fsf_004_03.fsf"
write_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
assert (status == fsf.Status.SUCCESS)
# Get data to use
fsf_data = FsfData()
status = fsf_write.SetFileHeader(fsf_data.fileHeader)
check_status("fsf_write.SetFileHeader", status)
assert (status == fsf.Status.SUCCESS)
# Check if file exists
status = path.isfile(write_file)
check_status("path.isfile", status)
assert status
##############################
#
# FSF parser GetFileHeader API test
#
class TestFSFGetFileHeader:
@pytest.mark.smoke
def test_fsf_005_00(self, cmdopt_read_dir):
"""
Exercise FSF 'GetFileHeader' API
Instantiate FSF using read utility and call OpenFile with valid FSF file name.
GetFileHeader from an FSF file with complete data.
"""
print("\n===================================================="
"\nTestFSFGetFileHeader (test_fsf_005_00) : "
"Instantiate FSF using read utility and call OpenFile with valid FSF file name."
"GetFileHeader from an FSF file with complete data.\n ")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename
read_file = cmdopt_read_dir + readDataFileName
# Open FSF file for writing
status = fsf_read.OpenFile(read_file)
check_status("fsf_read.OpenFile", status)
assert (status == fsf.Status.SUCCESS)
# Instantiate fsf_data
fsf_data = FsfData()
# Read file header
status = fsf_read.GetFileHeader(fsf_data.fileHeader)
check_status("fsf_read.GetFileHeader", status)
assert (status == fsf.Status.SUCCESS)
assert (fsf_data.fileHeader is not None)
def test_fsf_005_01(self, cmdopt_read_dir):
"""
Exercise FSF 'GetFileHeader' API
Instantiate FSF using read utility and call OpenFile with NULL FSF file name.
GetFileHeader from an FSF file with complete data.
"""
print("\n===================================================="
"\nTestFSFGetFileHeader (test_fsf_005_01) : "
"Instantiate FSF using read utility and call OpenFile with NULL FSF file name. "
"GetFileHeader from an FSF file with complete data.\n ")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename
read_file = ""
# Open FSF file for writing
status = fsf_read.OpenFile(read_file)
check_status("fsf_read.OpenFile", status)
assert (status == fsf.Status.FILE_DOESNOT_EXIST)
# Instantiate fsf_data
fsf_data = FsfData()
# Read file header
status = fsf_read.GetFileHeader(fsf_data.fileHeader)
check_status("fsf_read.GetFileHeader", status)
assert (status == fsf.Status.FILE_NOT_OPEN)
def test_fsf_005_02(self, cmdopt_write_dir):
"""
Exercise FSF 'GetFileHeader' API
Instantiate FSF using read utility and call OpenFile with valid FSF file name.
GetFileHeader from an FSF file with complete data.
"""
print("\n===================================================="
"\nTestFSFGetFileHeader (test_fsf_005_02) : "
"Instantiate FSF using write utility and call CreateFsfFile with valid FSF file name. "
"GetFileHeader from an FSF file .\n ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
file_name = "\\test_fsf_006_00.fsf"
write_file = cmdopt_write_dir + file_name
# Open FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
assert (status == fsf.Status.SUCCESS)
# Instantiate fsf_data
fsf_data = FsfData()
# Read file header
status = fsf_write.GetFileHeader(fsf_data.fileHeader)
check_status("fsf_read.GetFileHeader", status)
assert (status == fsf.Status.INVALID_OPERATION)
##############################
#
# FSF parser SetStreamInfo API test
#
class TestFSFSetStreamInfo:
@pytest.mark.smoke
def test_fsf_006_00(self, cmdopt_write_dir, cmdopt_raw_data):
"""
Exercise FSF 'SetStreamInfo' API
Instantiate FSF using write utility and call CreateFsfFile with valid FSF file name
then SetStreamInfo previously instantiated streamInfo with valid values.
"""
print("\n===================================================="
"\nTestFSFSetStreamInfo (test_fsf_006_00) : "
"Instantiate FSF using write utility and call CreateFsfFile with valid FSF file name "
"then SetStreamInfo previously instantiated streamInfo with valid values.\n ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
file_name = "\\test_fsf_006_00.fsf"
write_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
assert (status == fsf.Status.SUCCESS)
# Get data to use
fsf_data = cmdopt_raw_data
assert (fsf_data.fileHeader is not None)
# Set file header
status = fsf_write.SetFileHeader(fsf_data.fileHeader)
check_status("fsf_write.SetFileHeader", status)
assert (status == fsf.Status.SUCCESS)
# Set stream info
for streamIdx in range(fsf_data.fileHeader.nStreams):
status = fsf_write.SetStreamInfo(streamIdx, fsf_data.streamInfo[streamIdx])
assert (status == fsf.Status.SUCCESS)
check_status("fsf_write.SetStreamInfo: " + str(fsf_data.fileHeader.nStreams) + " streams.", status)
# Check if file exists
status = path.isfile(write_file)
check_status("path.isfile", status)
assert status is True
def test_fsf_006_01(self, cmdopt_write_dir, cmdopt_raw_data):
"""
Exercise FSF 'SetStreamInfo' API
Instantiate FSF using write utility and call CreateFsfFile with valid FSF file name.
| |
4): (0, 1),
(7, 3, -3, 5): (0, 1),
(7, 3, -2, -5): (-1, 1),
(7, 3, -2, -4): (-1, 1),
(7, 3, -2, -3): (-1, 0),
(7, 3, -2, -2): (0, 1),
(7, 3, -2, -1): (0, 1),
(7, 3, -2, 0): (1, 1),
(7, 3, -2, 1): (1, 1),
(7, 3, -2, 2): (1, 1),
(7, 3, -2, 3): (1, 1),
(7, 3, -2, 4): (1, 1),
(7, 3, -2, 5): (1, 0),
(7, 3, -1, -5): (0, 1),
(7, 3, -1, -4): (0, 1),
(7, 3, -1, -3): (-1, 1),
(7, 3, -1, -2): (-1, 1),
(7, 3, -1, -1): (1, 1),
(7, 3, -1, 0): (1, 1),
(7, 3, -1, 1): (1, 1),
(7, 3, -1, 2): (1, 1),
(7, 3, -1, 3): (1, 1),
(7, 3, -1, 4): (1, 1),
(7, 3, -1, 5): (1, 0),
(7, 3, 0, -5): (1, 0),
(7, 3, 0, -4): (1, 0),
(7, 3, 0, -3): (1, 0),
(7, 3, 0, -2): (1, -1),
(7, 3, 0, -1): (1, 1),
(7, 3, 0, 0): (1, 1),
(7, 3, 0, 1): (1, 1),
(7, 3, 0, 2): (1, 1),
(7, 3, 0, 3): (1, 1),
(7, 3, 0, 4): (1, 1),
(7, 3, 0, 5): (1, 0),
(7, 3, 1, -5): (1, 0),
(7, 3, 1, -4): (1, 0),
(7, 3, 1, -3): (1, 0),
(7, 3, 1, -2): (1, -1),
(7, 3, 1, -1): (0, 1),
(7, 3, 1, 0): (0, 1),
(7, 3, 1, 1): (0, 1),
(7, 3, 1, 2): (0, 1),
(7, 3, 1, 3): (0, 1),
(7, 3, 1, 4): (0, 1),
(7, 3, 1, 5): (0, 1),
(7, 3, 2, -5): (0, 1),
(7, 3, 2, -4): (0, 1),
(7, 3, 2, -3): (0, 0),
(7, 3, 2, -2): (1, 1),
(7, 3, 2, -1): (1, 1),
(7, 3, 2, 0): (-1, 1),
(7, 3, 2, 1): (-1, 1),
(7, 3, 2, 2): (-1, 1),
(7, 3, 2, 3): (-1, 1),
(7, 3, 2, 4): (-1, 1),
(7, 3, 2, 5): (-1, 1),
(7, 3, 3, -5): (0, 1),
(7, 3, 3, -4): (0, 1),
(7, 3, 3, -3): (0, 1),
(7, 3, 3, -2): (0, 1),
(7, 3, 3, -1): (0, 1),
(7, 3, 3, 0): (0, 1),
(7, 3, 3, 1): (0, 1),
(7, 3, 3, 2): (0, 1),
(7, 3, 3, 3): (-1, 1),
(7, 3, 3, 4): (-1, 1),
(7, 3, 3, 5): (-1, 1),
(7, 3, 4, -5): (0, 1),
(7, 3, 4, -4): (0, 1),
(7, 3, 4, -3): (0, 1),
(7, 3, 4, -2): (0, 1),
(7, 3, 4, -1): (0, 1),
(7, 3, 4, 0): (0, 1),
(7, 3, 4, 1): (0, 1),
(7, 3, 4, 2): (0, 1),
(7, 3, 4, 3): (0, 1),
(7, 3, 4, 4): (0, 1),
(7, 3, 4, 5): (0, 1),
(7, 3, 5, -5): (0, 1),
(7, 3, 5, -4): (0, 1),
(7, 3, 5, -3): (0, 1),
(7, 3, 5, -2): (0, 1),
(7, 3, 5, -1): (0, 1),
(7, 3, 5, 0): (0, 1),
(7, 3, 5, 1): (0, 1),
(7, 3, 5, 2): (0, 1),
(7, 3, 5, 3): (0, 1),
(7, 3, 5, 4): (0, 1),
(7, 3, 5, 5): (0, 1),
(7, 4, -5, -5): (0, 1),
(7, 4, -5, -4): (0, 0),
(7, 4, -5, -3): (-1, -1),
(7, 4, -5, -2): (0, 1),
(7, 4, -5, -1): (0, 1),
(7, 4, -5, 0): (0, 1),
(7, 4, -5, 1): (0, 1),
(7, 4, -5, 2): (0, 1),
(7, 4, -5, 3): (0, 1),
(7, 4, -5, 4): (0, 1),
(7, 4, -5, 5): (0, 1),
(7, 4, -4, -5): (-1, 1),
(7, 4, -4, -4): (-1, 0),
(7, 4, -4, -3): (-1, -1),
(7, 4, -4, -2): (0, 1),
(7, 4, -4, -1): (0, 1),
(7, 4, -4, 0): (0, 1),
(7, 4, -4, 1): (0, 1),
(7, 4, -4, 2): (0, 1),
(7, 4, -4, 3): (0, 1),
(7, 4, -4, 4): (0, 1),
(7, 4, -4, 5): (0, 1),
(7, 4, -3, -5): (-1, 1),
(7, 4, -3, -4): (-1, 0),
(7, 4, -3, -3): (-1, -1),
(7, 4, -3, -2): (0, 1),
(7, 4, -3, -1): (0, 1),
(7, 4, -3, 0): (0, 1),
(7, 4, -3, 1): (0, 1),
(7, 4, -3, 2): (0, 1),
(7, 4, -3, 3): (0, 1),
(7, 4, -3, 4): (0, 1),
(7, 4, -3, 5): (0, 1),
(7, 4, -2, -5): (-1, 1),
(7, 4, -2, -4): (-1, 0),
(7, 4, -2, -3): (0, 1),
(7, 4, -2, -2): (0, 1),
(7, 4, -2, -1): (0, 1),
(7, 4, -2, 0): (1, 1),
(7, 4, -2, 1): (1, 1),
(7, 4, -2, 2): (1, 1),
(7, 4, -2, 3): (1, 1),
(7, 4, -2, 4): (1, 1),
(7, 4, -2, 5): (1, 0),
(7, 4, -1, -5): (0, 1),
(7, 4, -1, -4): (-1, 1),
(7, 4, -1, -3): (-1, 1),
(7, 4, -1, -2): (-1, 1),
(7, 4, -1, -1): (1, 1),
(7, 4, -1, 0): (1, 1),
(7, 4, -1, 1): (1, 1),
(7, 4, -1, 2): (1, 1),
(7, 4, -1, 3): (1, 1),
(7, 4, -1, 4): (1, 1),
(7, 4, -1, 5): (1, 0),
(7, 4, 0, -5): (1, 0),
(7, 4, 0, -4): (1, 0),
(7, 4, 0, -3): (1, -1),
(7, 4, 0, -2): (-1, 1),
(7, 4, 0, -1): (1, 1),
(7, 4, 0, 0): (1, 1),
(7, 4, 0, 1): (1, 1),
(7, 4, 0, 2): (0, 1),
(7, 4, 0, 3): (1, 1),
(7, 4, 0, 4): (0, 1),
(7, 4, 0, 5): (0, 1),
(7, 4, 1, -5): (1, 0),
(7, 4, 1, -4): (1, 0),
(7, 4, 1, -3): (1, -1),
(7, 4, 1, -2): (1, 1),
(7, 4, 1, -1): (0, 1),
(7, 4, 1, 0): (0, 1),
(7, 4, 1, 1): (0, 1),
(7, 4, 1, 2): (-1, 1),
(7, 4, 1, 3): (0, 1),
(7, 4, 1, 4): (-1, 1),
(7, 4, 1, 5): (-1, 1),
(7, 4, 2, -5): (0, 1),
(7, 4, 2, -4): (0, 0),
(7, 4, 2, -3): (1, 1),
(7, 4, 2, -2): (1, 1),
(7, 4, 2, -1): (1, 1),
(7, 4, 2, 0): (-1, 1),
(7, 4, 2, 1): (-1, 1),
(7, 4, 2, 2): (-1, 1),
(7, 4, 2, 3): (-1, 1),
(7, 4, 2, 4): (-1, 1),
(7, 4, 2, 5): (-1, 1),
(7, 4, 3, -5): (0, 1),
(7, 4, 3, -4): (0, 1),
(7, 4, 3, -3): (0, 1),
(7, 4, 3, -2): (0, 1),
(7, 4, 3, -1): (0, 1),
(7, 4, 3, 0): (0, 1),
(7, 4, 3, 1): (0, 1),
(7, 4, 3, 2): (0, 1),
(7, 4, 3, 3): (-1, 1),
(7, 4, 3, 4): (-1, 1),
(7, 4, 3, 5): (-1, 1),
(7, 4, 4, -5): (0, 1),
(7, 4, 4, -4): (0, 1),
(7, 4, 4, -3): (0, 1),
(7, 4, 4, -2): (0, 1),
(7, 4, 4, -1): (0, 1),
(7, 4, 4, 0): (0, 1),
(7, 4, 4, 1): (0, 1),
(7, 4, 4, 2): (0, 1),
(7, 4, 4, 3): (0, 1),
(7, 4, 4, 4): (0, 1),
(7, 4, 4, 5): (0, 1),
(7, 4, 5, -5): (0, 1),
(7, 4, 5, -4): (0, 1),
(7, 4, 5, -3): (0, 1),
(7, 4, 5, -2): (0, 1),
(7, 4, 5, -1): (0, 1),
(7, 4, 5, 0): (0, 1),
(7, 4, 5, 1): (0, 1),
(7, 4, 5, 2): (0, 1),
(7, 4, 5, 3): (0, 1),
(7, 4, 5, 4): (0, 1),
(7, 4, 5, 5): (0, 1),
(7, 5, -5, -5): (0, 0),
(7, 5, -5, -4): (-1, -1),
(7, 5, -5, -3): (0, | |
import os
import json
import mock
import pytest
from chalice.config import Config
from chalice import package
from chalice.constants import LAMBDA_TRUST_POLICY
from chalice.deploy.appgraph import ApplicationGraphBuilder, DependencyBuilder
from chalice.awsclient import TypedAWSClient
from chalice.deploy.deployer import BuildStage
from chalice.deploy import models
from chalice.deploy.swagger import SwaggerGenerator
from chalice.package import PackageOptions
from chalice.utils import OSUtils
@pytest.fixture
def mock_swagger_generator():
return mock.Mock(spec=SwaggerGenerator)
def test_can_create_app_packager():
config = Config()
options = PackageOptions(mock.Mock(spec=TypedAWSClient))
packager = package.create_app_packager(config, options)
assert isinstance(packager, package.AppPackager)
def test_can_create_terraform_app_packager():
config = Config()
options = PackageOptions(mock.Mock(spec=TypedAWSClient))
packager = package.create_app_packager(config, options, 'terraform')
assert isinstance(packager, package.AppPackager)
def test_template_post_processor_moves_files_once():
mock_osutils = mock.Mock(spec=OSUtils)
p = package.SAMCodeLocationPostProcessor(mock_osutils)
template = {
'Resources': {
'foo': {
'Type': 'AWS::Serverless::Function',
'Properties': {
'CodeUri': 'old-dir.zip',
}
},
'bar': {
'Type': 'AWS::Serverless::Function',
'Properties': {
'CodeUri': 'old-dir.zip',
}
},
}
}
p.process(template, config=None,
outdir='outdir', chalice_stage_name='dev')
mock_osutils.copy.assert_called_with(
'old-dir.zip', os.path.join('outdir', 'deployment.zip'))
assert mock_osutils.copy.call_count == 1
assert template['Resources']['foo']['Properties']['CodeUri'] == (
'./deployment.zip'
)
assert template['Resources']['bar']['Properties']['CodeUri'] == (
'./deployment.zip'
)
def test_terraform_post_processor_moves_files_once():
mock_osutils = mock.Mock(spec=OSUtils)
p = package.TerraformCodeLocationPostProcessor(mock_osutils)
template = {
'resource': {
'aws_lambda_function': {
'foo': {'filename': 'old-dir.zip'},
'bar': {'filename': 'old-dir.zip'},
}
}
}
p.process(template, config=None,
outdir='outdir', chalice_stage_name='dev')
mock_osutils.copy.assert_called_with(
'old-dir.zip', os.path.join('outdir', 'deployment.zip'))
assert mock_osutils.copy.call_count == 1
assert template['resource']['aws_lambda_function'][
'foo']['filename'] == ('${path.module}/deployment.zip')
assert template['resource']['aws_lambda_function'][
'bar']['filename'] == ('${path.module}/deployment.zip')
def test_template_generator_default():
tgen = package.TemplateGenerator(Config(),
PackageOptions(
mock.Mock(spec=TypedAWSClient)
))
with pytest.raises(package.UnsupportedFeatureError):
tgen.dispatch(models.Model(), {})
class TestTemplateMergePostProcessor(object):
def _test_can_call_merge(self, file_template, template_name):
mock_osutils = mock.Mock(spec=OSUtils)
mock_osutils.get_file_contents.return_value = json.dumps(file_template)
mock_merger = mock.Mock(spec=package.TemplateMerger)
mock_merger.merge.return_value = {}
p = package.TemplateMergePostProcessor(
mock_osutils, mock_merger, package.JSONTemplateSerializer(),
merge_template=template_name)
template = {
'Resources': {
'foo': {
'Type': 'AWS::Serverless::Function',
'Properties': {
'CodeUri': 'old-dir.zip',
}
},
'bar': {
'Type': 'AWS::Serverless::Function',
'Properties': {
'CodeUri': 'old-dir.zip',
}
},
}
}
config = mock.MagicMock(spec=Config)
p.process(
template, config=config, outdir='outdir', chalice_stage_name='dev')
assert mock_osutils.file_exists.call_count == 1
assert mock_osutils.get_file_contents.call_count == 1
mock_merger.merge.assert_called_once_with(file_template, template)
def test_can_call_merge(self):
file_template = {
"Resources": {
"foo": {
"Properties": {
"Environment": {
"Variables": {"Name": "Foo"}
}
}
}
}
}
self._test_can_call_merge(file_template, 'extras.json')
def test_can_call_merge_with_yaml(self):
file_template = '''
Resources:
foo:
Properties:
Environment:
Variables:
Name: Foo
'''
self._test_can_call_merge(file_template, 'extras.yaml')
def test_raise_on_bad_json(self):
mock_osutils = mock.Mock(spec=OSUtils)
mock_osutils.get_file_contents.return_value = (
'{'
' "Resources": {'
' "foo": {'
' "Properties": {'
' "Environment": {'
' "Variables": {"Name": "Foo"}'
''
)
mock_merger = mock.Mock(spec=package.TemplateMerger)
p = package.TemplateMergePostProcessor(
mock_osutils, mock_merger, package.JSONTemplateSerializer(),
merge_template='extras.json')
template = {}
config = mock.MagicMock(spec=Config)
with pytest.raises(RuntimeError) as e:
p.process(
template,
config=config,
outdir='outdir',
chalice_stage_name='dev',
)
assert str(e.value).startswith('Expected')
assert 'to be valid JSON template' in str(e.value)
assert mock_merger.merge.call_count == 0
def test_raise_on_bad_yaml(self):
mock_osutils = mock.Mock(spec=OSUtils)
mock_osutils.get_file_contents.return_value = (
'---'
'Resources:'
' foo:'
' Properties:'
' Environment:'
' - 123'
''
)
mock_merger = mock.Mock(spec=package.TemplateMerger)
p = package.TemplateMergePostProcessor(
mock_osutils, mock_merger, package.YAMLTemplateSerializer(),
merge_template='extras.yaml')
template = {}
config = mock.MagicMock(spec=Config)
with pytest.raises(RuntimeError) as e:
p.process(
template,
config=config,
outdir='outdir',
chalice_stage_name='dev',
)
assert str(e.value).startswith('Expected')
assert 'to be valid YAML template' in str(e.value)
assert mock_merger.merge.call_count == 0
def test_raise_if_file_does_not_exist(self):
mock_osutils = mock.Mock(spec=OSUtils)
mock_osutils.file_exists.return_value = False
mock_merger = mock.Mock(spec=package.TemplateMerger)
p = package.TemplateMergePostProcessor(
mock_osutils, mock_merger, package.JSONTemplateSerializer(),
merge_template='extras.json')
template = {}
config = mock.MagicMock(spec=Config)
with pytest.raises(RuntimeError) as e:
p.process(
template,
config=config,
outdir='outdir',
chalice_stage_name='dev',
)
assert str(e.value).startswith('Cannot find template file:')
assert mock_merger.merge.call_count == 0
class TestCompositePostProcessor(object):
def test_can_call_no_processors(self):
processor = package.CompositePostProcessor([])
template = {}
config = mock.MagicMock(spec=Config)
processor.process(template, config, 'out', 'dev')
assert template == {}
def test_does_call_processors_once(self):
mock_processor_a = mock.Mock(spec=package.TemplatePostProcessor)
mock_processor_b = mock.Mock(spec=package.TemplatePostProcessor)
processor = package.CompositePostProcessor(
[mock_processor_a, mock_processor_b])
template = {}
config = mock.MagicMock(spec=Config)
processor.process(template, config, 'out', 'dev')
mock_processor_a.process.assert_called_once_with(
template, config, 'out', 'dev')
mock_processor_b.process.assert_called_once_with(
template, config, 'out', 'dev')
class TemplateTestBase(object):
template_gen_factory = None
def setup_method(self, stubbed_session):
self.resource_builder = package.ResourceBuilder(
application_builder=ApplicationGraphBuilder(),
deps_builder=DependencyBuilder(),
build_stage=mock.Mock(spec=BuildStage)
)
client = TypedAWSClient(None)
m_client = mock.Mock(wraps=client, spec=TypedAWSClient)
type(m_client).region_name = mock.PropertyMock(
return_value='us-west-2')
self.pkg_options = PackageOptions(m_client)
self.template_gen = self.template_gen_factory(
Config(), self.pkg_options)
def generate_template(self, config, chalice_stage_name='dev',
options=None):
resources = self.resource_builder.construct_resources(
config, chalice_stage_name)
if options is None:
options = self.pkg_options
return self.template_gen_factory(config, options).generate(resources)
def lambda_function(self):
return models.LambdaFunction(
resource_name='foo',
function_name='app-dev-foo',
environment_variables={},
runtime='python27',
handler='app.app',
tags={'foo': 'bar'},
timeout=120,
xray=None,
memory_size=128,
deployment_package=models.DeploymentPackage(filename='foo.zip'),
role=models.PreCreatedIAMRole(role_arn='role:arn'),
security_group_ids=[],
subnet_ids=[],
layers=[],
reserved_concurrency=None,
)
def managed_layer(self):
return models.LambdaLayer(
resource_name='layer',
layer_name='bar',
runtime='python2.7',
deployment_package=models.DeploymentPackage(filename='layer.zip')
)
class TestPackageOptions(object):
def test_service_principal(self):
awsclient = mock.Mock(spec=TypedAWSClient)
awsclient.region_name = 'us-east-1'
awsclient.endpoint_dns_suffix.return_value = 'amazonaws.com'
awsclient.service_principal.return_value = 'lambda.amazonaws.com'
options = package.PackageOptions(awsclient)
principal = options.service_principal('lambda')
assert principal == 'lambda.amazonaws.com'
awsclient.endpoint_dns_suffix.assert_called_once_with('lambda',
'us-east-1')
awsclient.service_principal.assert_called_once_with('lambda',
'us-east-1',
'amazonaws.com')
class TestTerraformTemplate(TemplateTestBase):
template_gen_factory = package.TerraformGenerator
EmptyPolicy = {
'Version': '2012-10-18',
'Statement': {
'Sid': '',
'Effect': 'Allow',
'Action': 'lambda:*'
}
}
def generate_template(self, config, chalice_stage_name='dev',
options=None):
resources = self.resource_builder.construct_resources(
config, chalice_stage_name)
# Patch up resources that have mocks (due to build stage)
# that we need to serialize to json.
for r in resources:
# For terraform rest api construction, we need a swagger
# doc on the api resource as we'll be serializing it to
# json.
if isinstance(r, models.RestAPI):
r.swagger_doc = {
'info': {'title': 'some-app'},
'x-amazon-apigateway-binary-media-types': []
}
if (isinstance(r, models.RestAPI) and
config.api_gateway_endpoint_type == 'PRIVATE'):
r.swagger_doc['x-amazon-apigateway-policy'] = (
r.policy.document)
# Same for iam policies on roles
elif isinstance(r, models.FileBasedIAMPolicy):
r.document = self.EmptyPolicy
if options is None:
options = self.pkg_options
return self.template_gen_factory(config, options).generate(resources)
def get_function(self, template):
functions = list(template['resource'][
'aws_lambda_function'].values())
assert len(functions) == 1
return functions[0]
def test_supports_precreated_role(self):
builder = DependencyBuilder()
resources = builder.build_dependencies(
models.Application(
stage='dev',
resources=[self.lambda_function()],
)
)
template = self.template_gen.generate(resources)
assert template['resource'][
'aws_lambda_function']['foo']['role'] == 'role:arn'
def test_adds_env_vars_when_provided(self, sample_app):
function = self.lambda_function()
function.environment_variables = {'foo': 'bar'}
template = self.template_gen.generate([function])
tf_resource = self.get_function(template)
assert tf_resource['environment'] == {
'variables': {
'foo': 'bar'
}
}
def test_adds_vpc_config_when_provided(self):
function = self.lambda_function()
function.security_group_ids = ['sg1', 'sg2']
function.subnet_ids = ['sn1', 'sn2']
template = self.template_gen.generate([function])
tf_resource = self.get_function(template)
assert tf_resource['vpc_config'] == {
'subnet_ids': ['sn1', 'sn2'],
'security_group_ids': ['sg1', 'sg2']}
def test_adds_layers_when_provided(self):
function = self.lambda_function()
function.layers = layers = ['arn://layer1', 'arn://layer2']
template = self.template_gen.generate([function])
tf_resource = self.get_function(template)
assert tf_resource['layers'] == layers
def test_adds_managed_layer_when_provided(self):
function = self.lambda_function()
function.layers = ['arn://layer1', 'arn://layer2']
function.managed_layer = self.managed_layer()
template = self.template_gen.generate(
[function.managed_layer, function])
tf_resource = self.get_function(template)
assert tf_resource['layers'] == [
'${aws_lambda_layer_version.layer.arn}',
'arn://layer1',
'arn://layer2',
]
assert template['resource']['aws_lambda_layer_version']['layer'] == {
'layer_name': 'bar',
'compatible_runtimes': ['python2.7'],
'filename': 'layer.zip',
}
def test_adds_reserved_concurrency_when_provided(self, sample_app):
function = self.lambda_function()
function.reserved_concurrency = 5
template = self.template_gen.generate([function])
tf_resource = self.get_function(template)
assert tf_resource['reserved_concurrent_executions'] == 5
def test_can_add_tracing_config(self, sample_app):
function = self.lambda_function()
function.xray = True
template = self.template_gen.generate([function])
tf_resource = self.get_function(template)
assert tf_resource['tracing_config']['mode'] == 'Active'
def test_can_generate_cloudwatch_event(self):
function = self.lambda_function()
event = models.CloudWatchEvent(
resource_name='foo-event',
rule_name='myrule',
event_pattern='{"source": ["aws.ec2"]}',
lambda_function=function,
)
template = self.template_gen.generate(
[function, event]
)
rule = template['resource'][
'aws_cloudwatch_event_rule'][event.resource_name]
assert rule == {
'name': event.resource_name,
'event_pattern': event.event_pattern}
target = template['resource'][
'aws_cloudwatch_event_target'][event.resource_name]
assert target == {
'target_id': 'foo-event',
'rule': '${aws_cloudwatch_event_rule.foo-event.name}',
'arn': '${aws_lambda_function.foo.arn}',
}
def test_can_generate_scheduled_event(self):
function = self.lambda_function()
event = models.ScheduledEvent(
resource_name='foo-event',
rule_name='myrule',
schedule_expression='rate(5 minutes)',
lambda_function=function,
rule_description='description',
)
template = self.template_gen.generate(
[function, event]
)
rule = template['resource'][
'aws_cloudwatch_event_rule'][event.resource_name]
assert rule == {
'name': event.resource_name,
'schedule_expression': 'rate(5 minutes)',
'description': 'description',
}
def test_can_generate_rest_api(self, sample_app_with_auth):
config = Config.create(chalice_app=sample_app_with_auth,
project_dir='.',
minimum_compression_size=8192,
api_gateway_endpoint_type='PRIVATE',
api_gateway_endpoint_vpce='vpce-abc123',
app_name='sample_app',
api_gateway_stage='api')
template = self.generate_template(config)
resources = template['resource']
# Lambda function should be created.
assert resources['aws_lambda_function']
# Along with permission to invoke from API Gateway.
assert list(resources['aws_lambda_permission'].values())[0] == {
'function_name': '${aws_lambda_function.api_handler.arn}',
'action': 'lambda:InvokeFunction',
'principal': 'apigateway.amazonaws.com',
'source_arn': (
'${aws_api_gateway_rest_api.rest_api.execution_arn}/*')
}
assert 'aws_api_gateway_rest_api' in resources
assert 'rest_api' in resources['aws_api_gateway_rest_api']
resource_policy = resources[
'aws_api_gateway_rest_api']['rest_api']['policy']
assert json.loads(resource_policy) == {
'Version': '2012-10-17',
'Statement': [
{
'Action': 'execute-api:Invoke',
'Resource': 'arn:*:execute-api:*:*:*',
'Effect': 'Allow',
'Condition': {
'StringEquals': {
'aws:SourceVpce': 'vpce-abc123'
}
},
'Principal': '*'
}
]
}
assert resources['aws_api_gateway_rest_api'][
'rest_api']['minimum_compression_size'] == 8192
assert resources['aws_api_gateway_rest_api'][
'rest_api']['endpoint_configuration'] == {'types': ['PRIVATE']}
assert 'aws_api_gateway_stage' not in resources
assert resources['aws_api_gateway_deployment']['rest_api'] == {
'rest_api_id': '${aws_api_gateway_rest_api.rest_api.id}',
'stage_description': (
'${md5(data.template_file.chalice_api_swagger.rendered)}'),
'stage_name': 'api',
'lifecycle': {'create_before_destroy': True}
}
# We should also create the auth lambda function.
assert 'myauth' in resources['aws_lambda_function']
# Along with permission to invoke from API Gateway.
assert resources['aws_lambda_permission']['myauth_invoke'] == {
'action': 'lambda:InvokeFunction',
'function_name': '${aws_lambda_function.myauth.arn}',
'principal': 'apigateway.amazonaws.com',
'source_arn': (
'${aws_api_gateway_rest_api.rest_api.execution_arn}/*')
}
# Also verify we add the expected outputs when using
# a Rest API.
assert template['output'] == {
'EndpointURL': {
'value': '${aws_api_gateway_deployment.rest_api.invoke_url}'},
'RestAPIId': {
'value': '${aws_api_gateway_rest_api.rest_api.id}'}
}
def test_can_package_s3_event_handler_with_tf_ref(self, sample_app):
@sample_app.on_s3_event(
bucket='${aws_s3_bucket.my_data_bucket.id}')
def handler(event):
pass
config = Config.create(chalice_app=sample_app,
project_dir='.',
api_gateway_stage='api')
template = self.generate_template(config)
assert template['resource']['aws_s3_bucket_notification'][
'my_data_bucket_notify'] == {
'bucket': '${aws_s3_bucket.my_data_bucket.id}',
'lambda_function': [{
'events': ['s3:ObjectCreated:*'],
'lambda_function_arn': (
'${aws_lambda_function.handler.arn}')
}]
}
def test_can_generate_chalice_terraform_static_data(self, sample_app):
config = Config.create(chalice_app=sample_app,
project_dir='.',
app_name='myfoo',
api_gateway_stage='dev')
template = self.generate_template(config)
assert template['data']['null_data_source']['chalice']['inputs'] == {
'app': 'myfoo',
'stage': 'dev'
}
def test_can_package_s3_event_handler_sans_filters(self, sample_app):
@sample_app.on_s3_event(bucket='foo')
def handler(event):
pass
config = Config.create(chalice_app=sample_app,
project_dir='.',
api_gateway_stage='api')
template = self.generate_template(config)
assert template['resource']['aws_s3_bucket_notification'][
'foo_notify'] == {
'bucket': 'foo',
'lambda_function': [{
'events': | |
<filename>PyLESA/heatpump.py
"""Heat pump module
Modelling a heat pump with modelling approaches of
simple, lorentz, generic regression, and standard test regression
"""
import os
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import tools as t
import weather
import inputs
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 22})
def perf(name, subname):
myInputs = inputs.Inputs(name, subname)
input_weather = myInputs.weather()
inputs_basics = myInputs.heatpump_basics()
modelling_approach = inputs_basics['modelling_approach']
if modelling_approach == 'Simple':
inputs_simple = myInputs.heatpump_simple()
inputs_demands = myInputs.demands()
myHeatPump = HeatPump(
inputs_basics['heat_pump_type'],
inputs_basics['modelling_approach'],
inputs_basics['capacity'],
inputs_basics['ambient_delta_t'],
inputs_basics['minimum_runtime'],
inputs_basics['minimum_output'],
inputs_basics['data_input'],
inputs_demands['source_temp'],
inputs_demands['return_temp_DH'],
input_weather,
simple_cop=inputs_simple)
return myHeatPump.performance()
elif modelling_approach == 'Lorentz':
inputs_lorentz = myInputs.heatpump_lorentz()
inputs_demands = myInputs.demands()
myHeatPump = HeatPump(
inputs_basics['heat_pump_type'],
inputs_basics['modelling_approach'],
inputs_basics['capacity'],
inputs_basics['ambient_delta_t'],
inputs_basics['minimum_runtime'],
inputs_basics['minimum_output'],
inputs_basics['data_input'],
inputs_demands['source_temp'],
inputs_demands['return_temp_DH'],
input_weather,
lorentz_inputs=inputs_lorentz)
return myHeatPump.performance()
elif modelling_approach == 'Generic regression':
inputs_demands = myInputs.demands()
myHeatPump = HeatPump(
inputs_basics['heat_pump_type'],
inputs_basics['modelling_approach'],
inputs_basics['capacity'],
inputs_basics['ambient_delta_t'],
inputs_basics['minimum_runtime'],
inputs_basics['minimum_output'],
inputs_basics['data_input'],
inputs_demands['source_temp'],
inputs_demands['return_temp_DH'],
input_weather)
return myHeatPump.performance()
elif modelling_approach == 'Standard test regression':
inputs_standard_regression = myInputs.heatpump_standard_regression()
inputs_demands = myInputs.demands()
myHeatPump = HeatPump(
inputs_basics['heat_pump_type'],
inputs_basics['modelling_approach'],
inputs_basics['capacity'],
inputs_basics['ambient_delta_t'],
inputs_basics['minimum_runtime'],
inputs_basics['minimum_output'],
inputs_basics['data_input'],
inputs_demands['source_temp'],
inputs_demands['return_temp_DH'],
input_weather,
standard_test_regression_inputs=inputs_standard_regression)
return myHeatPump.performance()
class HeatPump(object):
def __init__(self, hp_type, modelling_approach,
capacity, ambient_delta_t,
minimum_runtime, minimum_output, data_input,
flow_temp_source, return_temp,
hp_ambient_temp,
simple_cop=None,
lorentz_inputs=None,
generic_regression_inputs=None,
standard_test_regression_inputs=None
):
"""heat pump class object
Arguments:
hp_type {string} -- type of heatpump, ASHP, WSHP, GSHP
modelling_approach {str} -- simple, lorentz,
generic, standard regression
capacity {float} -- thermal capacity of heat pump
ambient_delta_t {int} -- drop in ambient source temperature
from inlet to outlet
minimum_runtime {string} -- fixed or variable speed compressor
data_input {str} -- type of data input, peak or integrated
flow_temp {dataframe} -- required temperatures out of HP
return_temp {dataframe} -- inlet temp to HP
weather {dic} -- ambient conditions of heat source
Keyword Arguments: all these are for inputs, bar simple,
for different modelling approaches
simple_cop {float} -- only COP for simple (default: {None})
lorentz_inputs {dic} -- (default: {None})
generic_regression_inputs {dic} -- (default: {None})
standard_test_regression_inputs {dic} -- (default: {None})
"""
self.hp_type = hp_type
self.modelling_approach = modelling_approach
self.capacity = capacity
self.ambient_delta_t = ambient_delta_t
self.minimum_runtime = minimum_runtime
self.minimum_output = minimum_output
self.data_input = data_input
self.flow_temp_source = flow_temp_source
self.return_temp = return_temp
self.hp_ambient_temp = hp_ambient_temp
self.simple_cop = simple_cop
self.lorentz_inputs = lorentz_inputs
self.generic_regression_inputs = generic_regression_inputs
self.standard_test_regression_inputs = standard_test_regression_inputs
def heat_resource(self):
"""accessing the heat resource
takes the hp resource from the weather class
Returns:
dataframe -- ambient temperature for heat source of heat pump
"""
HP_resource = weather.Weather(
air_temperature=self.hp_ambient_temp['air_temperature'],
water_temperature=self.hp_ambient_temp['water_temperature']).heatpump()
if self.hp_type == 'ASHP':
HP_resource = HP_resource.rename(
columns={'air_temperature': 'ambient_temp'})
return HP_resource[['ambient_temp']]
elif self.hp_type == 'WSHP':
HP_resource = HP_resource.rename(
columns={'water_temperature': 'ambient_temp'})
return HP_resource[['ambient_temp']]
else:
print('ERROR invalid heat pump type')
def performance(self):
"""performance over year of heat pump
input a timestep from which gathers inputs
a method for calculating the heat pump performance (cop and duty)
for a timetsp
outputs are dict containing
Returns:
dic -- cop and duty for each hour timestep in year
"""
if self.capacity == 0:
performance = []
for timesteps in range(8760):
# cop needs to be low to not break the mpc solver
# duty being zero means it won't choose it anyway
p = {'cop': 0.5, 'duty': 0}
performance.append(p)
return performance
ambient_temp = self.heat_resource()['ambient_temp']
if self.modelling_approach == 'Simple':
cop_x = self.simple_cop
duty_x = self.capacity
elif self.modelling_approach == 'Lorentz':
myLorentz = Lorentz(self.lorentz_inputs['cop'],
self.lorentz_inputs['flow_temp_spec'],
self.lorentz_inputs['return_temp_spec'],
self.lorentz_inputs['temp_ambient_in_spec'],
self.lorentz_inputs['temp_ambient_out_spec'],
self.lorentz_inputs['elec_capacity'])
hp_eff = myLorentz.hp_eff()
elif self.modelling_approach == 'Generic regression':
myGenericRegression = GenericRegression()
duty_x = self.capacity
elif self.modelling_approach == 'Standard test regression':
myStandardRegression = StandardTestRegression(
self.standard_test_regression_inputs['data_x'],
self.standard_test_regression_inputs['data_COSP'],
self.standard_test_regression_inputs['data_duty'])
models = myStandardRegression.train()
COP_model = models['COP_model']
duty_model = models['duty_model']
performance = []
for timestep in range(8760):
if self.modelling_approach == 'Simple':
cop = cop_x
hp_duty = duty_x
elif self.modelling_approach == 'Lorentz':
ambient_return = ambient_temp[timestep] - self.ambient_delta_t
cop = myLorentz.calc_cop(hp_eff,
self.flow_temp_source[timestep],
self.return_temp[timestep],
ambient_temp[timestep],
ambient_return)
hp_duty = myLorentz.calc_duty(self.capacity)
elif self.modelling_approach == 'Generic regression':
if self.hp_type == 'ASHP':
cop = myGenericRegression.ASHP_cop(
self.flow_temp_source[timestep],
ambient_temp[timestep])
elif self.hp_type == 'GSHP' or self.hp_type == 'WSHP':
cop = myGenericRegression.GSHP_cop(
self.flow_temp_source[timestep],
ambient_temp[timestep])
# account for defrosting below 5 drg
if ambient_temp[timestep] <= 5:
cop = 0.9 * cop
hp_duty = duty_x
elif self.modelling_approach == 'Standard test regression':
hp_duty = myStandardRegression.predict_duty(
duty_model,
ambient_temp[timestep],
self.flow_temp_source[timestep])
# 15% reduction in performance if
# data not done to standards
if self.data_input == 'Integrated performance' or ambient_temp[timestep] > 5:
cop = myStandardRegression.predict_COP(
COP_model,
ambient_temp[timestep],
self.flow_temp_source[timestep])
elif self.data_input == 'Peak performance':
if self.hp_type == 'ASHP':
if ambient_temp[timestep] <= 5:
cop = 0.9 * myStandardRegression.predict_COP(
COP_model,
ambient_temp[timestep],
self.flow_temp_source[timestep])
d = {'cop': cop, 'duty': hp_duty}
performance.append(d)
return performance
def elec_usage(self, demand, hp_performance):
"""electricity usage of hp for timestep given a thermal demand
calculates the electrical usage of the heat pump given a heat demand
outputs a dataframe of heat demand, heat pump heat demand,
heat pump elec demand, cop, duty, and leftover
(only non-zero for fixed speed HP)
Arguments:
timestep {int} -- timestep to be calculated
demand {float} -- thermal demand to be met by heat pump
hp_performance {dic} -- dic containing the cop and duty
for timesteps over year
Returns:
dic -- heat demand to be met, cop, duty,
heat demand met by hp, electricity usage of heat pump
"""
if self.capacity == 0:
return {'hp_demand': 0.0, 'hp_elec': 0.0}
cop = hp_performance['cop']
duty = hp_performance['duty']
max_elec_usage = demand / cop
max_elec_cap = duty / cop
hp_elec = min(max_elec_usage, max_elec_cap)
hp_demand = hp_elec * cop
d = {'hp_demand': hp_demand,
'hp_elec': hp_elec}
return d
def thermal_output(self, elec_supply,
hp_performance, heat_demand):
"""thermal output from a given electricity supply
Arguments:
timestep {int} -- timestep to be modelled
elec_supply {float} -- electricity supply used by heat pump
hp_performance {dic} -- dic containing the cop and duty
for timesteps over year
heat_demand {float} -- heat demand to be met of timestep
Returns:
dic -- max_thermal_output, heat demand met by hp,
electricity usage of heat pump
"""
if self.capacity == 0:
return {'hp_demand': 0.0, 'hp_elec': 0.0}
cop = hp_performance['cop']
duty = hp_performance['duty']
# maximum thermal output given elec supply
max_thermal_output = elec_supply * cop
# demand met by hp is min of three arguments
hp_demand = min(max_thermal_output, heat_demand, duty)
hp_elec = hp_demand / cop
d = {'hp_demand': hp_demand,
'hp_elec': hp_elec}
return d
class Lorentz(object):
def __init__(self, cop, flow_temp_spec, return_temp_spec,
ambient_temp_in_spec, ambient_temp_out_spec,
elec_capacity):
"""lorentz calculations and attributes
based on EnergyPRO method
Arguments:
cop {float} -- cop at specified conditions
flow_temp_spec {float} -- temperature from HP spec
return_temp_spec {float} -- tempature to HP spec
ambient_temp_in_spec {float} -- specificed
ambient_temp_out_spec {float} -- spec
elec_capacity {float} -- absolute
"""
self.cop = cop
self.flow_temp_spec = flow_temp_spec
self.return_temp_spec = return_temp_spec
self.ambient_temp_in_spec = ambient_temp_in_spec
self.ambient_temp_out_spec = ambient_temp_out_spec
self.elec_capacity = elec_capacity
def hp_eff(self):
"""heat pump efficiency which is static
# calcultaions of the lorentz model. starting with the mean temps fo
# for the temp flow and return of heat pump, t high mean
# and the ambient in and out temps, t low mean
Returns:
float -- efficiency of the heat pump
"""
t_high_mean = ((self.flow_temp_spec - self.return_temp_spec) /
(math.log((self.flow_temp_spec + 273.15) /
(self.return_temp_spec + 273.15))))
t_low_mean = (
(self.ambient_temp_in_spec - self.ambient_temp_out_spec) /
(math.log((self.ambient_temp_in_spec + 273.15) /
(self.ambient_temp_out_spec + 273.15))))
# lorentz cop is the highest theoretical cop
cop_lorentz = t_high_mean / (t_high_mean - t_low_mean)
# this gives the heat pump efficiency using the stated cop
# the lorentz cop is calcualted for each timestep
# then this is multiplied by the heat pump
# efficiency to give actual cop
hp_eff = self.cop / cop_lorentz
return hp_eff
def calc_cop(self, hp_eff, flow_temp, return_temp,
ambient_temp_in, ambient_temp_out):
"""cop for timestep
calculates the cop based upon actual flow/retur and ambient
uses heat pump efficiency from before
Arguments:
hp_eff {float} -- heat pump efficiency
flow_temp {float} -- flow temperature from heat pump
return_temp {float} -- temperature returning to heat pump
ambient_temp_in {float} -- real-time
ambient_temp_out {float} -- real-time
Returns:
float -- cop for timestep
"""
t_high_mean = ((flow_temp - return_temp) /
(math.log((flow_temp + 273.15) /
(return_temp + 273.15))))
t_low_mean = ((ambient_temp_in - ambient_temp_out) /
(math.log((ambient_temp_in + 273.15) /
(ambient_temp_out + 273.15))))
cop_lorentz = t_high_mean / (t_high_mean - t_low_mean)
cop = hp_eff * cop_lorentz
return cop
def calc_duty(self, capacity):
| |
<reponame>cedricg92/event-manager
import datetime
import fnmatch
import gzip
import shutil
import tarfile
import time
import croniter
import hdfs
from watchdog.events import PatternMatchingEventHandler, FileCreatedEvent, FileModifiedEvent, FileMovedEvent, os, \
FileSystemEvent
def add_log_str(log_pattern, strlog):
"""Add log in logfile
:param log_pattern: Pattern of log
:type log_pattern: str
:param strlog: Log message
:type strlog: str
"""
logfile = log_pattern
logfile = logfile.replace("%Y", time.strftime("%Y"))
logfile = logfile.replace("%m", time.strftime("%m"))
logfile = logfile.replace("%d", time.strftime("%d"))
logfile = logfile.replace("%H", time.strftime("%H"))
logfile = logfile.replace("%M", time.strftime("%M"))
logfile = logfile.replace("%S", time.strftime("%S"))
with open(logfile, "ab+") as myfile:
myfile.write(strlog)
myfile.close()
def add_log(log_pattern, event, filename, destination, event_type, event_subtype, exec_program, args, return_val):
"""Add log in filelog
:param log_pattern: Pattern of log
:type log_pattern: str
:param event: Event Name
:param filename: File name
:param destination: Destination
:param event_type: Event type
:param event_subtype: Event sub type
:param exec_program: executable
:param args: Arguments
:param return_val: Return Value
"""
log = time.strftime("%Y-%m-%d %H:%M:%S")
log += "|" + str(event)
log += "|" + str(filename)
log += "|" + str(destination)
log += "|" + str(event_type)
log += "|" + str(event_subtype)
log += "|" + str(exec_program)
log += "|" + str(args)
if return_val:
log += "|" + str(0)
else:
log += "|" + str(1)
log += os.linesep
add_log_str(log_pattern, log)
class ExecHandler(PatternMatchingEventHandler):
"""Class ExecHandler
"""
FILE_LOG = ""
def __init__(self, event_conf):
"""
:param event_conf: EventConf
:type event_conf: em.event.EventConf
:return:
"""
super(self.__class__, self).__init__(["*"], ["*.err"], True, False)
self._patterns = event_conf.patterns
self.event_conf = event_conf
def is_scheduled(self):
"""Check if the event handler is scheduled
:return: True if the event handler is scheduled
:rtype: bool
"""
return self.event_conf.is_scheduled()
def on_created(self, event):
"""Function called when file is created
:param event: File created event
:type event: FileCreatedEvent
"""
if isinstance(event, FileCreatedEvent):
self.process(event)
def on_modified(self, event):
"""Function called when file is modified
:param event: File modified event
:type event: FileModifiedEvent
"""
if isinstance(event, FileModifiedEvent):
self.process(event)
def on_moved(self, event):
"""Function called when file is moved
:param event: File moved event
:type event: FileMovedEvent
"""
if isinstance(event, FileMovedEvent):
self.process(event)
def process(self, event):
"""Function process
:param event: Event file
:type event: FileSystemEvent
:return: Return value (True success, False error)
:rtype: bool
"""
if self.event_conf.enabled == 0:
return 0
args = str(self.event_conf.get_context_value("execArgs"))
args = args.replace("%filenale", event.src_path)
args = args.replace("%destination", self.event_conf.destination)
exec_dir = os.path.dirname(self.event_conf.get_context_value("execProgram"))
exec_app = os.path.dirname(self.event_conf.get_context_value("execProgram"))
ret = os.system("cd "+exec_dir+";./"+exec_app+" "+args)
add_log(self.FILE_LOG, self.event_conf.name, event.src_path, self.event_conf.destination, self.event_conf.type,
self.event_conf.subtype, self.event_conf.get_context_value("execProgram"), args, ret)
if ret is False:
os.rename(event.src_path, event.src_path + ".err")
else:
os.remove(event.src_path)
return ret
def check_schedule(self, now):
"""Check if the event should be launched
:param now: Actual date and time
:type now: datetime.datetime
:return: True if the event should be launched
:rtype: bool
"""
cron = croniter.croniter(self.event_conf.get_cron(), now)
current_exec_datetime = cron.get_current(datetime.datetime)
return (current_exec_datetime.year == now.year and current_exec_datetime.month == now.month and
current_exec_datetime.day == now.day and current_exec_datetime.hour == now.hour and
current_exec_datetime.minute == now.minute)
class FsHandler(PatternMatchingEventHandler):
"""File System Handler
"""
FILE_LOG = ""
TYPE_MOVE = 1
TYPE_ARCHIVE = 2
TYPE_COMPRESS = 3
TYPE_UNARCHIVE = 4
TYPE_UNCOMPRESS = 5
STR_TYPE_MOVE = "move"
STR_TYPE_ARCHIVE = "archive"
STR_TYPE_COMPRESS = "compress"
STR_TYPE_UNARCHIVE = "unarchive"
STR_TYPE_UNCOMPRESS = "uncompress"
def __init__(self, event_conf, fs_type):
"""
:param event_conf: ExecConf
:type event_conf: em.event.EventConf
:param fs_type: Process type of Fs handler
:type fs_type: str
"""
super(self.__class__, self).__init__(["*"], ["*.tmp", "*.err", "*.run"], True, False)
self.event_conf = event_conf
self.fs_type = fs_type
self.delimiter = os.path.sep
def is_scheduled(self):
"""Check if the event handler is scheduled
:return: True if the event handler is scheduled
:rtype: bool
"""
return self.event_conf.is_scheduled()
def on_any_event(self, event):
if not event.src_path.endswith(".tmp"):
for pattern in self.event_conf.patterns:
if fnmatch.fnmatch(os.path.basename(event.src_path), pattern):
print(event)
def on_created(self, event):
"""Handler listener on creation of file
:param event: File created event
:type event: FileCreatedEvent
:return: True if the process run correctly
:rtype: bool
"""
if isinstance(event, FileCreatedEvent):
return self.process(event.src_path)
return False
def on_modified(self, event):
"""Handler listener on modification of file
:param event: File modified event
:type event: FileModifiedEvent
:return: True if the process run correctly
:rtype: bool
"""
if isinstance(event, FileModifiedEvent):
return self.process(event.src_path)
return False
def on_moved(self, event):
"""Handler listener on move of file
:param event: File moved event
:type event: FileMovedEvent
:return: True if the process run correctly
:rtype: bool
"""
if isinstance(event, FileMovedEvent):
return self.process(event.dest_path)
return False
def process(self, full_filename):
"""Function process
:param full_filename: Full path of filename
:type full_filename: str
:return: True if the process run correctly
:rtype: bool
"""
if self.event_conf.enabled == 0:
return True
if not os.path.exists(full_filename):
return False
if os.path.dirname(full_filename) != self.event_conf.directory:
return False
res = False
filename = os.path.basename(full_filename)
matched = False
for pattern in self.event_conf.patterns:
if fnmatch.fnmatch(filename, pattern):
matched = True
break
if not matched:
return False
os.rename(full_filename, full_filename + ".run")
if self.fs_type == FsHandler.TYPE_MOVE or self.fs_type == FsHandler.STR_TYPE_MOVE:
res = self.process_move(filename, "run")
elif self.fs_type == FsHandler.TYPE_ARCHIVE or self.fs_type == FsHandler.STR_TYPE_ARCHIVE:
res = self.process_archive(filename, "run", "tmp")
elif self.fs_type == FsHandler.TYPE_COMPRESS or self.fs_type == FsHandler.STR_TYPE_COMPRESS:
res = self.process_compress(filename, "run", "tmp")
elif self.fs_type == FsHandler.TYPE_UNCOMPRESS or self.fs_type == FsHandler.STR_TYPE_UNCOMPRESS:
res = self.process_uncompress(filename, "run", "tmp")
elif self.fs_type == FsHandler.TYPE_UNARCHIVE or self.fs_type == FsHandler.STR_TYPE_UNARCHIVE:
res = self.process_unarchive(filename, "run")
add_log(self.FILE_LOG, self.event_conf.name, full_filename, self.event_conf.destination, self.event_conf.type,
self.event_conf.subtype, "", "", res)
if not res:
os.rename(full_filename + ".run", full_filename + ".err")
return res
def process_move(self, filename, extension):
"""Move file to destination
directory -> File -> destination
:param filename: Filename
:type filename: str
:param extension: Extention of file (run)
:type extension: str
:return: True if the process run correctly
:rtype: bool
"""
if os.path.exists(self.event_conf.destination + self.delimiter + filename):
os.remove(self.event_conf.destination + self.delimiter + filename)
os.rename(self.event_conf.directory + self.delimiter + filename + "." + extension,
self.event_conf.destination + self.delimiter + filename)
return os.path.exists(self.event_conf.destination + self.delimiter + filename)
def process_archive(self, filename, extension, tmp_extension):
"""Create archive file (tar)
:param filename: Filename
:type filename: str
:param extension: Extension of file (run)
:type extension: str
:param tmp_extension: Tmp extension
:type tmp_extension: str
:return: True if the process run correctly
:rtype: bool
"""
if os.path.exists(self.event_conf.destination + self.delimiter + filename + ".tar" + "." + tmp_extension):
os.remove(self.event_conf.destination + self.delimiter + filename + ".tar" + "." + tmp_extension)
if os.path.exists(self.event_conf.destination + self.delimiter + filename + ".tar"):
os.remove(self.event_conf.destination + self.delimiter + filename + ".tar")
tar = tarfile.open(self.event_conf.destination + self.delimiter + filename + ".tar" + "." + tmp_extension, "w")
tar.add(self.event_conf.directory + self.delimiter + filename + "." + extension, filename)
tar.close()
os.remove(self.event_conf.directory + self.delimiter + filename + "." + extension)
os.rename(self.event_conf.destination + self.delimiter + filename + ".tar" + "." + tmp_extension,
self.event_conf.destination + self.delimiter + filename + ".tar")
return os.path.exists(self.event_conf.destination + self.delimiter + filename + ".tar")
def process_compress(self, filename, extension, tmp_extension):
"""Compress file (gzip)
:param filename: Filename
:type filename: str
:param extension: Extension of file (run)
:type extension: str
:param tmp_extension: Tmp extension
:type tmp_extension: str
:return: True if the process run correctly
:rtype: bool
"""
if os.path.exists(self.event_conf.destination + self.delimiter + filename + ".gz"):
os.remove(self.event_conf.destination + self.delimiter + filename + ".gz")
if os.path.exists(self.event_conf.destination + self.delimiter + filename + ".gz" + "." + tmp_extension):
os.remove(self.event_conf.destination + self.delimiter + filename + ".gz" + "." + tmp_extension)
with open(self.event_conf.directory + self.delimiter + filename + "." + extension, 'rb') as f_in, \
gzip.open(self.event_conf.destination + self.delimiter + filename + ".gz" + "." + tmp_extension,
'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
f_in.close()
f_out.close()
os.remove(self.event_conf.directory + self.delimiter + filename + "." + extension)
os.rename(self.event_conf.destination + self.delimiter + filename + ".gz" + "." + tmp_extension,
self.event_conf.destination + self.delimiter + filename + ".gz")
return os.path.exists(self.event_conf.destination + self.delimiter + filename + ".gz")
def process_uncompress(self, filename, extension, tmp_extension):
"""Uncompress file (gunzip)
:param filename: Filename
:type filename: str
:param extension: Extension of file (run)
:type extension: str
:param tmp_extension: Tmp extension
:type tmp_extension: str
:return: True if the process run correctly
:rtype: bool
"""
if os.path.exists(self.event_conf.destination + self.delimiter + filename.replace(".gz", "") + "." +
tmp_extension):
os.remove(self.event_conf.destination + self.delimiter + filename.replace(".gz", "") + "." + tmp_extension)
if os.path.exists(self.event_conf.destination + self.delimiter + filename.replace(".gz", "")):
os.remove(self.event_conf.destination + self.delimiter + filename.replace(".gz", ""))
with gzip.open(self.event_conf.directory + self.delimiter + filename + "." + extension) as f_in, \
open(self.event_conf.destination + self.delimiter + filename.replace(".gz", "") + "." + tmp_extension,
"w") as f_out:
f_out.write(f_in.read())
os.rename(self.event_conf.destination + self.delimiter + filename.replace(".gz", "") + "." + tmp_extension,
self.event_conf.destination + self.delimiter + filename.replace(".gz", ""))
os.remove(self.event_conf.directory + self.delimiter + filename + "." + extension)
return os.path.exists(self.event_conf.destination + self.delimiter + | |
import hashlib
import json
import logging
import uuid
from functools import wraps
from flask import redirect, Blueprint, abort, send_file, make_response, request
from prometheus_client import Counter
import features
from app import app, signer, storage, config_provider, ip_resolver, instance_keys
from auth.auth_context import get_authenticated_user
from auth.decorators import process_auth
from auth.permissions import ReadRepositoryPermission
from data import database
from data import model
from data.registry_model import registry_model
from endpoints.decorators import (
anon_protect,
anon_allowed,
route_show_if,
parse_repository_name,
check_region_blacklisted,
)
from endpoints.metrics import image_pulls, image_pulled_bytes
from endpoints.v2.blob import BLOB_DIGEST_ROUTE
from image.appc import AppCImageFormatter
from image.docker import ManifestException
from image.docker.squashed import SquashedDockerImageFormatter
from storage import Storage
from util.audit import track_and_log, wrap_repository
from util.http import exact_abort
from util.metrics.prometheus import timed_blueprint
from util.registry.filelike import wrap_with_handler
from util.registry.queuefile import QueueFile
from util.registry.queueprocess import QueueProcess
from util.registry.tarlayerformat import TarLayerFormatterReporter
from util.registry.torrent import (
make_torrent,
per_user_torrent_filename,
public_torrent_filename,
PieceHasher,
TorrentConfiguration,
)
logger = logging.getLogger(__name__)
verbs = timed_blueprint(Blueprint("verbs", __name__))
verb_stream_passes = Counter(
"quay_verb_stream_passes_total",
"number of passes over a tar stream used by verb requests",
labelnames=["kind"],
)
LAYER_MIMETYPE = "binary/octet-stream"
QUEUE_FILE_TIMEOUT = 15 # seconds
class VerbReporter(TarLayerFormatterReporter):
def __init__(self, kind):
self.kind = kind
def report_pass(self, pass_count):
verb_stream_passes.labels(self.kind).inc(pass_count)
def _open_stream(formatter, tag, schema1_manifest, derived_image_id, handlers, reporter):
"""
This method generates a stream of data which will be replicated and read from the queue files.
This method runs in a separate process.
"""
# For performance reasons, we load the full image list here, cache it, then disconnect from
# the database.
with database.UseThenDisconnect(app.config):
layers = registry_model.list_parsed_manifest_layers(
tag.repository, schema1_manifest, storage, include_placements=True
)
def image_stream_getter(store, blob):
def get_stream_for_storage():
current_image_stream = store.stream_read_file(blob.placements, blob.storage_path)
logger.debug("Returning blob %s: %s", blob.digest, blob.storage_path)
return current_image_stream
return get_stream_for_storage
def tar_stream_getter_iterator():
# Re-Initialize the storage engine because some may not respond well to forking (e.g. S3)
store = Storage(app, config_provider=config_provider, ip_resolver=ip_resolver)
# Note: We reverse because we have to start at the leaf layer and move upward,
# as per the spec for the formatters.
for layer in reversed(layers):
yield image_stream_getter(store, layer.blob)
stream = formatter.build_stream(
tag,
schema1_manifest,
derived_image_id,
layers,
tar_stream_getter_iterator,
reporter=reporter,
)
for handler_fn in handlers:
stream = wrap_with_handler(stream, handler_fn)
return stream.read
def _sign_derived_image(verb, derived_image, queue_file):
"""
Read from the queue file and sign the contents which are generated.
This method runs in a separate process.
"""
signature = None
try:
signature = signer.detached_sign(queue_file)
except:
logger.exception("Exception when signing %s deriving image %s", verb, derived_image)
return
# Setup the database (since this is a new process) and then disconnect immediately
# once the operation completes.
if not queue_file.raised_exception:
with database.UseThenDisconnect(app.config):
registry_model.set_derived_image_signature(derived_image, signer.name, signature)
def _write_derived_image_to_storage(
verb, derived_image, queue_file, namespace, repository, tag_name
):
"""
Read from the generated stream and write it back to the storage engine.
This method runs in a separate process.
"""
def handle_exception(ex):
logger.debug(
"Exception when building %s derived image %s (%s/%s:%s): %s",
verb,
derived_image,
namespace,
repository,
tag_name,
ex,
)
with database.UseThenDisconnect(app.config):
registry_model.delete_derived_image(derived_image)
queue_file.add_exception_handler(handle_exception)
# Re-Initialize the storage engine because some may not respond well to forking (e.g. S3)
store = Storage(app, config_provider=config_provider, ip_resolver=ip_resolver)
try:
store.stream_write(
derived_image.blob.placements, derived_image.blob.storage_path, queue_file
)
except IOError as ex:
logger.error(
"Exception when writing %s derived image %s (%s/%s:%s): %s",
verb,
derived_image,
namespace,
repository,
tag_name,
ex,
)
with database.UseThenDisconnect(app.config):
registry_model.delete_derived_image(derived_image)
queue_file.close()
def _torrent_for_blob(blob, is_public):
"""
Returns a response containing the torrent file contents for the given blob.
May abort with an error if the state is not valid (e.g. non-public, non-user request).
"""
# Make sure the storage has a size.
if not blob.compressed_size:
abort(404)
# Lookup the torrent information for the storage.
torrent_info = registry_model.get_torrent_info(blob)
if torrent_info is None:
abort(404)
# Lookup the webseed path for the storage.
webseed = storage.get_direct_download_url(
blob.placements, blob.storage_path, expires_in=app.config["BITTORRENT_WEBSEED_LIFETIME"]
)
if webseed is None:
# We cannot support webseeds for storages that cannot provide direct downloads.
exact_abort(501, "Storage engine does not support seeding.")
# Load the config for building torrents.
torrent_config = TorrentConfiguration.from_app_config(instance_keys, app.config)
# Build the filename for the torrent.
if is_public:
name = public_torrent_filename(blob.uuid)
else:
user = get_authenticated_user()
if not user:
abort(403)
name = per_user_torrent_filename(torrent_config, user.uuid, blob.uuid)
# Return the torrent file.
torrent_file = make_torrent(
torrent_config,
name,
webseed,
blob.compressed_size,
torrent_info.piece_length,
torrent_info.pieces,
)
headers = {
"Content-Type": "application/x-bittorrent",
"Content-Disposition": "attachment; filename={0}.torrent".format(name),
}
return make_response(torrent_file, 200, headers)
def _torrent_repo_verb(repository, tag, manifest, verb, **kwargs):
"""
Handles returning a torrent for the given verb on the given image and tag.
"""
if not features.BITTORRENT:
# Torrent feature is not enabled.
abort(406)
# Lookup an *existing* derived storage for the verb. If the verb's image storage doesn't exist,
# we cannot create it here, so we 406.
derived_image = registry_model.lookup_derived_image(
manifest, verb, storage, varying_metadata={"tag": tag.name}, include_placements=True
)
if derived_image is None:
abort(406)
# Return the torrent.
torrent = _torrent_for_blob(
derived_image.blob, model.repository.is_repository_public(repository)
)
# Log the action.
track_and_log(
"repo_verb", wrap_repository(repository), tag=tag.name, verb=verb, torrent=True, **kwargs
)
return torrent
def _verify_repo_verb(_, namespace, repo_name, tag_name, verb, checker=None):
permission = ReadRepositoryPermission(namespace, repo_name)
repo = model.repository.get_repository(namespace, repo_name)
repo_is_public = repo is not None and model.repository.is_repository_public(repo)
if not permission.can() and not repo_is_public:
logger.debug(
"No permission to read repository %s/%s for user %s with verb %s",
namespace,
repo_name,
get_authenticated_user(),
verb,
)
abort(403)
if repo is not None and repo.kind.name != "image":
logger.debug(
"Repository %s/%s for user %s is not an image repo",
namespace,
repo_name,
get_authenticated_user(),
)
abort(405)
# Make sure the repo's namespace isn't disabled.
if not registry_model.is_namespace_enabled(namespace):
abort(400)
# Lookup the requested tag.
repo_ref = registry_model.lookup_repository(namespace, repo_name)
if repo_ref is None:
abort(404)
tag = registry_model.get_repo_tag(repo_ref, tag_name)
if tag is None:
logger.debug(
"Tag %s does not exist in repository %s/%s for user %s",
tag,
namespace,
repo_name,
get_authenticated_user(),
)
abort(404)
# Get its associated manifest.
manifest = registry_model.get_manifest_for_tag(tag, backfill_if_necessary=True)
if manifest is None:
logger.debug("Could not get manifest on %s/%s:%s::%s", namespace, repo_name, tag.name, verb)
abort(404)
# Retrieve the schema1-compatible version of the manifest.
try:
schema1_manifest = registry_model.get_schema1_parsed_manifest(
manifest, namespace, repo_name, tag.name, storage
)
except ManifestException:
logger.exception(
"Could not get manifest on %s/%s:%s::%s", namespace, repo_name, tag.name, verb
)
abort(400)
if schema1_manifest is None:
abort(404)
# If there is a data checker, call it first.
if checker is not None:
if not checker(tag, schema1_manifest):
logger.debug(
"Check mismatch on %s/%s:%s, verb %s", namespace, repo_name, tag.name, verb
)
abort(404)
# Preload the tag's repository information, so it gets cached.
assert tag.repository.namespace_name
assert tag.repository.name
return tag, manifest, schema1_manifest
def _repo_verb_signature(namespace, repository, tag_name, verb, checker=None, **kwargs):
# Verify that the tag exists and that we have access to it.
tag, manifest, _ = _verify_repo_verb(storage, namespace, repository, tag_name, verb, checker)
# Find the derived image storage for the verb.
derived_image = registry_model.lookup_derived_image(
manifest, verb, storage, varying_metadata={"tag": tag.name}
)
if derived_image is None or derived_image.blob.uploading:
return make_response("", 202)
# Check if we have a valid signer configured.
if not signer.name:
abort(404)
# Lookup the signature for the verb.
signature_value = registry_model.get_derived_image_signature(derived_image, signer.name)
if signature_value is None:
abort(404)
# Return the signature.
return make_response(signature_value)
@check_region_blacklisted()
def _repo_verb(
namespace, repository, tag_name, verb, formatter, sign=False, checker=None, **kwargs
):
# Verify that the image exists and that we have access to it.
logger.debug(
"Verifying repo verb %s for repository %s/%s with user %s with mimetype %s",
verb,
namespace,
repository,
get_authenticated_user(),
request.accept_mimetypes.best,
)
tag, manifest, schema1_manifest = _verify_repo_verb(
storage, namespace, repository, tag_name, verb, checker
)
# Load the repository for later.
repo = model.repository.get_repository(namespace, repository)
if repo is None:
abort(404)
# Check for torrent. If found, we return a torrent for the repo verb image (if the derived
# image already exists).
if request.accept_mimetypes.best == "application/x-bittorrent":
return _torrent_repo_verb(repo, tag, manifest, verb, **kwargs)
# Log the action.
track_and_log("repo_verb", wrap_repository(repo), tag=tag.name, verb=verb, **kwargs)
is_readonly = app.config.get("REGISTRY_STATE", "normal") == "readonly"
# Lookup/create the derived image for the verb and repo image.
if is_readonly:
derived_image = registry_model.lookup_derived_image(
manifest, verb, storage, varying_metadata={"tag": tag.name}, include_placements=True
)
else:
derived_image = registry_model.lookup_or_create_derived_image(
manifest,
verb,
storage.preferred_locations[0],
storage,
varying_metadata={"tag": tag.name},
include_placements=True,
)
if derived_image is None:
logger.error("Could not create or lookup a derived image for manifest %s", manifest)
abort(400)
if derived_image is not None and not derived_image.blob.uploading:
logger.debug("Derived %s image %s exists in storage", verb, derived_image)
is_head_request = request.method == "HEAD"
image_pulled_bytes.labels("bittorrent").inc(derived_image.blob.compressed_size)
download_url = storage.get_direct_download_url(
derived_image.blob.placements, derived_image.blob.storage_path, head=is_head_request
)
if download_url:
logger.debug("Redirecting to download URL for derived %s image %s", verb, derived_image)
return redirect(download_url)
# Close the database handle here for this process before we send the long download.
database.close_db_filter(None)
logger.debug("Sending cached derived | |
<filename>quantile_ml/utils_scoring.py
from collections import OrderedDict
import math
from quantile_ml import utils
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.metrics import mean_squared_error, make_scorer, brier_score_loss, accuracy_score, explained_variance_score, mean_absolute_error, median_absolute_error, r2_score, log_loss, roc_auc_score
import numpy as np
bad_vals_as_strings = set([str(float('nan')), str(float('inf')), str(float('-inf')), 'None', 'none', 'NaN', 'NAN', 'nan', 'NULL', 'null', '', 'inf', '-inf', 'np.nan', 'numpy.nan'])
def advanced_scoring_classifiers(probas, actuals, name=None):
# pandas Series don't play nice here. Make sure our actuals list is indeed a list
actuals = list(actuals)
predictions = list(probas)
print('Here is our brier-score-loss, which is the default value we optimized for while training, and is the value returned from .score() unless you requested a custom scoring metric')
print('It is a measure of how close the PROBABILITY predictions are.')
if name != None:
print(name)
# Sometimes we will be given "flattened" probabilities (only the probability of our positive label), while other times we might be given "nested" probabilities (probabilities of both positive and negative, in a list, for each item).
try:
probas = [proba[1] for proba in probas]
except:
pass
print(format(brier_score_loss(actuals, probas), '.4f'))
print('\nHere is the trained estimator\'s overall accuracy (when it predicts a label, how frequently is that the correct label?)')
predicted_labels = []
for pred in probas:
if pred >= 0.5:
predicted_labels.append(1)
else:
predicted_labels.append(0)
print(format(accuracy_score(y_true=actuals, y_pred=predicted_labels) * 100, '.1f') + '%')
print('\nHere is a confusion matrix showing predictions and actuals by label')
#it would make sense to use sklearn's confusion_matrix here but it apparently has no labels
#took this idea instead from: http://stats.stackexchange.com/a/109015
conf = pd.crosstab(pd.Series(actuals), pd.Series(predicted_labels), rownames=['v Actual v'], colnames=['Predicted >'], margins=True)
print(conf)
print('Here is the accuracy of our trained estimator at each level of predicted probabilities')
# create summary dict
summary_dict = OrderedDict()
for num in range(0, 110, 10):
summary_dict[num] = []
for idx, proba in enumerate(probas):
proba = math.floor(int(proba * 100) / 10) * 10
summary_dict[proba].append(actuals[idx])
for k, v in summary_dict.items():
if len(v) > 0:
print('Predicted probability: ' + str(k) + '%')
actual = sum(v) * 1.0 / len(v)
# Format into a prettier number
actual = round(actual * 100, 0)
print('Actual: ' + str(actual) + '%')
print('# preds: ' + str(len(v)) + '\n')
print('\n\n')
def calculate_and_print_differences(predictions, actuals, name=None):
pos_differences = []
neg_differences = []
# Technically, we're ignoring cases where we are spot on
for idx, pred in enumerate(predictions):
difference = pred - actuals[idx]
if difference > 0:
pos_differences.append(difference)
elif difference < 0:
neg_differences.append(difference)
if name != None:
print(name)
print('Count of positive differences (prediction > actual):')
print(len(pos_differences))
print('Count of negative differences:')
print(len(neg_differences))
if len(pos_differences) > 0:
print('Average positive difference:')
print(sum(pos_differences) * 1.0 / len(pos_differences))
if len(neg_differences) > 0:
print('Average negative difference:')
print(sum(neg_differences) * 1.0 / len(neg_differences))
def advanced_scoring_regressors(predictions, actuals, verbose=2, name=None):
# pandas Series don't play nice here. Make sure our actuals list is indeed a list
actuals = list(actuals)
predictions = list(predictions)
print('\n\n***********************************************')
if name != None:
print(name)
print('Advanced scoring metrics for the trained regression model on this particular dataset:\n')
# 1. overall RMSE
print('Here is the overall RMSE for these predictions:')
print(mean_squared_error(actuals, predictions)**0.5)
# 2. overall avg predictions
print('\nHere is the average of the predictions:')
print(sum(predictions) * 1.0 / len(predictions))
# 3. overall avg actuals
print('\nHere is the average actual value on this validation set:')
print(sum(actuals) * 1.0 / len(actuals))
# 2(a). median predictions
print('\nHere is the median prediction:')
print(np.median(predictions))
# 3(a). median actuals
print('\nHere is the median actual value:')
print(np.median(actuals))
# 4. avg differences (not RMSE)
print('\nHere is the mean absolute error:')
print(mean_absolute_error(actuals, predictions))
print('\nHere is the median absolute error (robust to outliers):')
print(median_absolute_error(actuals, predictions))
print('\nHere is the explained variance:')
print(explained_variance_score(actuals, predictions))
print('\nHere is the R-squared value:')
print(r2_score(actuals, predictions))
# 5. pos and neg differences
calculate_and_print_differences(predictions=predictions, actuals=actuals, name=name)
actuals_preds = list(zip(actuals, predictions))
# Sort by PREDICTED value, since this is what what we will know at the time we make a prediction
actuals_preds.sort(key=lambda pair: pair[1])
actuals_sorted = [act for act, pred in actuals_preds]
predictions_sorted = [pred for act, pred in actuals_preds]
if verbose > 2:
print('Here\'s how the trained predictor did on each successive decile (ten percent chunk) of the predictions:')
for i in range(1,10):
print('\n**************')
print('Bucket number:')
print(i)
# There's probably some fenceposting error here
min_idx = int((i - 1) / 10.0 * len(actuals_sorted))
max_idx = int(i / 10.0 * len(actuals_sorted))
actuals_for_this_decile = actuals_sorted[min_idx:max_idx]
predictions_for_this_decile = predictions_sorted[min_idx:max_idx]
print('Avg predicted val in this bucket')
print(sum(predictions_for_this_decile) * 1.0 / len(predictions_for_this_decile))
print('Avg actual val in this bucket')
print(sum(actuals_for_this_decile) * 1.0 / len(actuals_for_this_decile))
print('RMSE for this bucket')
print(mean_squared_error(actuals_for_this_decile, predictions_for_this_decile)**0.5)
calculate_and_print_differences(predictions_for_this_decile, actuals_for_this_decile)
print('')
print('\n***********************************************\n\n')
def rmse_func(y, predictions):
return mean_squared_error(y, predictions)**0.5
scoring_name_function_map = {
'rmse': rmse_func
, 'median_absolute_error': median_absolute_error
, 'r2': r2_score
, 'r-squared': r2_score
, 'mean_absolute_error': mean_absolute_error
, 'accuracy': accuracy_score
, 'accuracy_score': accuracy_score
, 'log_loss': log_loss
, 'roc_auc': roc_auc_score
, 'brier_score_loss': brier_score_loss
}
class RegressionScorer(object):
def __init__(self, scoring_method=None):
if scoring_method is None:
scoring_method = 'rmse'
self.scoring_method = scoring_method
if callable(scoring_method):
self.scoring_func = scoring_method
else:
self.scoring_func = scoring_name_function_map[scoring_method]
self.scoring_method = scoring_method
def get(self, prop_name, default=None):
try:
return getattr(self, prop_name)
except AttributeError:
return default
def score(self, estimator, X, y, took_log_of_y=False, advanced_scoring=False, verbose=2, name=None):
X, y = utils.drop_missing_y_vals(X, y, output_column=None)
if isinstance(estimator, GradientBoostingRegressor):
X = X.toarray()
predictions = estimator.predict(X)
if took_log_of_y:
for idx, val in enumerate(predictions):
predictions[idx] = math.exp(val)
try:
score = self.scoring_func(y, predictions)
except ValueError:
bad_val_indices = []
for idx, val in enumerate(y):
if str(val) in bad_vals_as_strings:
bad_val_indices.append(idx)
predictions = [val for idx, val in enumerate(predictions) if idx not in bad_val_indices]
y = [val for idx, val in enumerate(y) if idx not in bad_val_indices]
print('Found ' + str(len(bad_val_indices)) + ' null or infinity values in the y values. We will ignore these, and report the score on the rest of the dataset')
score = self.scoring_func(y, predictions)
if advanced_scoring == True:
if hasattr(estimator, 'name'):
print(estimator.name)
advanced_scoring_regressors(predictions, y, verbose=verbose, name=name)
return - 1 * score
class ClassificationScorer(object):
def __init__(self, scoring_method=None):
if scoring_method is None:
scoring_method = 'brier_score_loss'
self.scoring_method = scoring_method
if callable(scoring_method):
self.scoring_func = scoring_method
else:
self.scoring_func = scoring_name_function_map[scoring_method]
def get(self, prop_name, default=None):
try:
return getattr(self, prop_name)
except AttributeError:
return default
def clean_probas(self, probas):
print('Warning: We have found some values in the predicted probabilities that fall outside the range {0, 1}')
print('This is likely the result of a model being trained on too little data, or with a bad set of hyperparameters. If you get this warning while doing a hyperparameter search, for instance, you can probably safely ignore it')
print('We will cap those values at 0 or 1 for the purposes of scoring, but you should be careful to have similar safeguards in place in prod if you use this model')
if not isinstance(probas[0], list):
probas = [min(max(pred, 0), 1) for pred in probas]
return probas
else:
cleaned_probas = []
for proba_tuple in probas:
cleaned_tuple = []
for item in proba_tuple:
cleaned_tuple.append(max(min(item, 1), 0))
cleaned_probas.append(cleaned_tuple)
return cleaned_probas
def score(self, estimator, X, y, advanced_scoring=False):
X, y = utils.drop_missing_y_vals(X, y, output_column=None)
if isinstance(estimator, GradientBoostingClassifier):
X = X.toarray()
predictions = estimator.predict_proba(X)
if self.scoring_method == 'brier_score_loss':
# At the moment, Microsoft's LightGBM returns probabilities > 1 and < 0, which can break some scoring functions. So we have to take the max of 1 and the pred, and the min of 0 and the pred.
probas = [max(min(row[1], 1), 0) for row in predictions]
predictions = probas
try:
score = self.scoring_func(y, predictions)
except ValueError as e:
bad_val_indices = []
for idx, val in enumerate(y):
if str(val) in bad_vals_as_strings:
bad_val_indices.append(idx)
predictions = [val for idx, val in enumerate(predictions) if idx not in bad_val_indices]
y = [val for idx, val in enumerate(y) if idx not in bad_val_indices]
print('Found ' + str(len(bad_val_indices)) + ' null or infinity values in the y values. We will ignore these, and report the score on the rest of the dataset')
try:
score = self.scoring_func(y, predictions)
except ValueError:
# Sometimes, particularly for a badly fit model using either too little data, or a really bad set of hyperparameters during a grid search, we can predict probas that are > 1 or < 0. We'll cap those | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-07-25 19:44:35
# @Author : <NAME> (<EMAIL>)
# @Link : http://iridescent.ink
# @Version : $1.0$
# from __future__ import print_fntion
import time
import torchlib as tl
import torchsar as ts
import torch as th
from torch.nn.parameter import Parameter
from collections import OrderedDict
from dataset import saveimage
import matplotlib.pyplot as plt
def focus(X, xas, ca=None, pa=None, isfft=True, ftshift=True):
d = X.dim()
if pa is None:
if ca is not None:
pa = th.matmul(ca, xas.to(ca.device)) # N-Qa x Qa-Na
else:
raise ValueError('---You should specify ca or pa!')
sizea = [1] * d
sizea[0], sizea[-3], sizea[-2], sizea[-1] = pa.size(0), pa.size(1), 1, 2
epa = th.stack((th.cos(pa), -th.sin(pa)), dim=-1)
epa = epa.reshape(sizea)
if isfft:
X = ts.fft(X, n=None, axis=-3, norm=None, shift=ftshift)
X = ts.ebemulcc(X, epa)
X = ts.ifft(X, n=None, axis=-3, norm=None, shift=ftshift)
return X
class CELM(th.nn.Module):
def __init__(self, Na, Nr, Qas, Conv=[[16, 17, 1]], ftshift=True):
r"""CELM
CELM
Parameters
----------
Na : {number}
[description]
Nr : {number}
1 or the number of range cells
Qas : {list}
[description]
ftshift : {bool}, optional
[description] (the default is True, which [default_description])
"""
super(CELM, self).__init__()
self.Na = Na
self.Nr = Nr
self.Qas = Qas
self.ftshift = ftshift
self.NQas = len(Qas) if Qas is not None else 0
self.nconvs = len(Conv) if Conv is not None else 0
self.negslope = 0.01
self.L = Conv[0][0] * (256 - Conv[0][1] + 1)
self.BETA = Parameter(th.zeros([self.L, self.NQas]), requires_grad=False)
FD = OrderedDict()
FD['conv1'] = th.nn.Conv2d(2, Conv[0][0], Conv[0][1:3], stride=Conv[0][3:5], padding=Conv[0][5:7], dilation=Conv[0][7:9], groups=Conv[0][9])
FD['in1'] = th.nn.InstanceNorm2d(Conv[0][0])
FD['relu1'] = th.nn.LeakyReLU(self.negslope)
for n in range(1, self.nconvs):
FD['conv' + str(n + 1)] = th.nn.Conv2d(Conv[n - 1][0], Conv[n][0], Conv[n][1:3], stride=Conv[n][3:5],
padding=Conv[n][5:7], dilation=Conv[n][7:9], groups=Conv[n][9])
FD['in' + str(n + 1)] = th.nn.InstanceNorm2d(Conv[n][0])
FD['relu' + str(n + 1)] = th.nn.LeakyReLU(self.negslope)
FD['gapool'] = th.nn.AdaptiveAvgPool2d((None, 1)) # N-1-Na-1
self.features = th.nn.Sequential(FD)
def forward_feature(self, X):
H = th.stack((X[..., 0], X[..., 1]), dim=1)
H = self.features(H)
H = H.view(H.size(0), -1) # N-Na
return H
def forward_predict(self, H, BETA=None):
if BETA is None:
return th.matmul(H, self.BETA) # ca/pa
else:
return th.matmul(H, BETA) # ca/pa
def optimize(self, H, T, C, device=None, assign=True):
if device is not None:
H = H.to(device)
device = H.device
T = T.to(device)
N, L = H.shape
if N <= L:
IC = th.eye(N, device=device) / C
BETA = (H.t()).mm(th.inverse(IC + H.mm(H.t()))).mm(T)
else:
IC = th.eye(L, device=device) / C
BETA = (th.inverse(IC + (H.t()).mm(H))).mm(H.t()).mm(T)
BETA = Parameter(BETA, requires_grad=False)
if assign:
self.BETA = BETA
return BETA
def weights_init(m):
if isinstance(m, th.nn.Conv2d):
# th.nn.init.orthogonal_(m.weight.data, th.nn.init.calculate_gain('leaky_relu', 0.5))
th.nn.init.normal_(m.weight.data)
No, Ni, Hk, Wk = m.weight.data.shape
if Hk * Wk < No * Ni:
m.weight.data = tl.orth(m.weight.data.reshape(No * Ni, Hk * Wk)).reshape(No, Ni, Hk, Wk)
else:
m.weight.data = tl.orth(m.weight.data.reshape(No * Ni, Hk * Wk).t()).t().reshape(No, Ni, Hk, Wk)
m.bias.data.zero_()
if isinstance(m, th.nn.Linear):
th.nn.init.orthogonal_(m.weight.data, th.nn.init.calculate_gain('leaky_relu', 0.5))
m.bias.data.zero_()
class BaggingECELMs(th.nn.Module):
def __init__(self, Na, Nr, Qas=[[2, 3]], Qrs=None, Convs=[[[32, 11, 1]]], xa=None, xr=None, ftshift=True, cstrategy='Entropy', seed=None):
super(BaggingECELMs, self).__init__()
self.Na = Na
self.Nr = Nr
self.Qas = Qas
self.Qrs = Qrs
self.Convs = Convs
self.xa = xa
self.xr = xr
self.ftshift = ftshift
self.cstrategy = cstrategy.lower()
self.seed = seed
self.NQas = len(Qas) if Qas is not None else 0
self.NQrs = len(Qrs) if Qrs is not None else 0
self.Ncelms = len(Convs) if Convs is not None else 0
self.celms = []
if self.seed is not None:
th.manual_seed(seed)
th.backends.cudnn.deterministic = True
th.backends.cudnn.benchmark = True
if Qas is not None:
if xa is None:
xa = ts.ppeaxis(Na, norm=True, shift=ftshift, mode='fftfreq')
xa = xa.reshape(1, Na)
xas = th.tensor([])
for q in Qas:
xas = th.cat((xas, xa ** q), axis=0)
self.xa = Parameter(xa, requires_grad=False) # 1-Na
self.xas = Parameter(xas, requires_grad=False) # NQas-Na
if Qrs is not None:
if xr is None:
xr = ts.ppeaxis(Nr, norm=True, shift=ftshift, mode='fftfreq')
xr = xr.reshape(1, Nr)
xrs = th.tensor([])
for q in Qrs:
xrs = th.cat((xrs, xr ** q), axis=0)
self.xr = Parameter(xr, requires_grad=False) # 1-Nr
self.xrs = Parameter(xrs, requires_grad=False) # NQrs-Nr
for Conv in Convs:
celmn = CELM(Na, Nr, Qas, Conv, ftshift)
celmn.apply(weights_init) # init weight
self.celms.append(celmn)
self.celms = th.nn.ModuleList(self.celms)
self.combine_metric = ts.Entropy('natural', reduction=None)
self.loss_mse_fn = th.nn.MSELoss(reduction='mean')
def forwardn(self, n, X, isfft=True):
H = self.celms[n].forward_feature(X)
pca = self.celms[n].forward_predict(H)
return focus(X, self.xas, ca=pca, pa=None, isfft=isfft, ftshift=self.celms[n].ftshift), pca
def ensemble_forward(self, X, isfft=True, cstrategy=None):
if cstrategy is not None:
self.cstrategy = cstrategy
if self.cstrategy == 'entropy':
Smin = th.ones((X.shape[0],), device=X.device) * 1e32
Y = th.zeros_like(X)
ca = th.zeros((X.shape[0], self.celms[0].NQas), device=X.device)
xa = self.xas.to(X.device)
for n in range(self.Ncelms):
H = self.celms[n].forward_feature(X)
pca = self.celms[n].forward_predict(H)
Z = focus(X, xa, pca, isfft=isfft, ftshift=self.celms[n].ftshift)
S = self.combine_metric(Z)
idx = S < Smin
Y[idx] = Z[idx]
ca[idx] = pca[idx]
Smin[idx] = S[idx]
return Y, ca
if self.cstrategy == 'averagecoef':
ca = 0.
xa = self.xas.to(X.device)
for n in range(self.Ncelms):
H = self.celms[n].forward_feature(X)
pca = self.celms[n].forward_predict(H)
ca += pca
ca /= self.Ncelms
Y = focus(X, xa, ca=ca, pa=None, isfft=isfft, ftshift=self.celms[n].ftshift)
return Y, ca
if self.cstrategy == 'averagephase':
pa = 0.
xa = self.xas.to(X.device)
for n in range(self.Ncelms):
H = self.celms[n].forward_feature(X)
pca = self.celms[n].forward_predict(H)
pa += th.matmul(pca, xa)
pa /= self.Ncelms
Y = focus(X, xa, ca=None, pa=pa, isfft=isfft, ftshift=self.celms[n].ftshift)
return Y, pa
def train_valid(self, Xtrain, catrain, crtrain, Xvalid, cavalid, crvalid, sizeBatch, nsamples1, loss_ent_fn, loss_cts_fn, loss_fro_fn, Cs, device):
tstart = time.time()
bestC = [0.] * self.Ncelms
with th.no_grad():
for n in range(self.Ncelms):
idxt = list(th.randint(Xtrain.shape[0], [nsamples1]))
Xt = Xtrain[idxt]
cat = catrain[idxt]
Ns = Xt.shape[0]
numBatch = int(Ns / sizeBatch) if Ns % sizeBatch == 0 else int(Ns / sizeBatch) + 1
idx = ts.randperm(0, Ns, Ns)
X, ca = Xt[idx], cat[idx]
# print(X.shape, ca.shape, cr.shape, numBatch, sizeBatch, Ns)
bestMetric, bestBETA = 1e32, 0
for C in Cs:
H = th.tensor([], device=device)
T = th.tensor([], device=device)
self.train()
self.celms[n].train()
lossENTv, lossCTSv, lossFROv, lossvtrain = 0., 0., 0., 0.
for b in range(numBatch):
i = range(b * sizeBatch, (b + 1) * sizeBatch)
xi, cani = X[i], ca[i]
xi, cani = xi.to(device), cani.to(device)
hi = self.celms[n].forward_feature(xi)
H = th.cat((H, hi), axis=0)
T = th.cat((T, cani), axis=0)
BETAc = self.celms[n].optimize(H, T, C, device=device, assign=True)
pcan = self.celms[n].forward_predict(H)
Y = focus(X.to('cpu'), self.xas, pcan.to('cpu'))
lossENT = loss_ent_fn(Y)
lossCTS = loss_cts_fn(Y)
lossFRO = loss_fro_fn(Y)
loss = self.loss_mse_fn(pcan, T)
lossvtrain = loss.item()
lossCTSv = lossCTS.item()
lossENTv = lossENT.item()
lossFROv = lossFRO.item()
tend = time.time()
print("--->Train focuser: %d, C: %12.6f, loss: %.4f, entropy: %.4f, l1norm: %.4f, contrast: %.4f, time: %ss" %
(n, C, lossvtrain, lossENTv, lossFROv, lossCTSv, tend - tstart))
metricnc = self.validnc(n, C, Xvalid, cavalid, crvalid, sizeBatch, loss_ent_fn, loss_cts_fn, loss_fro_fn, device, name='Valid focuser')
# print('===', n, C, metricnc)
if metricnc < bestMetric:
bestC[n] = C
bestBETA = BETAc.clone()
bestMetric = metricnc
self.celms[n].BETA.data = bestBETA.clone()
return bestC
def validnc(self, n, C, X, ca, cr, sizeBatch, loss_ent_fn, loss_cts_fn, loss_fro_fn, device, name='Valid focuser'):
self.eval()
self.celms[n].eval()
N, Na, Nr, _ = X.shape
tstart = time.time()
numSamples = X.shape[0]
numBatch = int(numSamples / sizeBatch) if numSamples % sizeBatch == 0 else int(numSamples / sizeBatch) + 1
# idx = ts.randperm(0, numSamples, numSamples)
idx = list(range(0, numSamples))
X, ca = X[idx], ca[idx]
with th.no_grad():
lossENTv, lossCTSv, lossFROv, lossvvalid = 0., 0., 0., 0.
for b in range(numBatch):
i = range(b * sizeBatch, (b + 1) * sizeBatch)
xi, cai = X[i], ca[i]
xi, cai = xi.to(device), cai.to(device)
yi, pcai = self.forwardn(n, xi, isfft=True)
lossENT = loss_ent_fn(yi)
lossCTS = loss_cts_fn(yi)
lossFRO = loss_fro_fn(yi)
loss = self.loss_mse_fn(pcai, cai)
lossvvalid += loss.item()
lossCTSv += lossCTS.item()
lossENTv += lossENT.item()
lossFROv += lossFRO.item()
lossvvalid /= numBatch
lossCTSv /= numBatch
lossENTv /= numBatch
lossFROv /= numBatch
tend = time.time()
print("--->" + name + ": %d, C: %12.6f, loss: %.4f, entropy: %.4f, l1norm: %.4f, contrast: %.4f, time: %ss" %
(n, C, lossvvalid, lossENTv, lossFROv, lossCTSv, tend - tstart))
return lossENTv
def ensemble_test(self, X, ca, cr, sizeBatch, loss_ent_fn, loss_cts_fn, loss_fro_fn, device, name='Test'):
self.eval()
if self.cstrategy == 'averagephase':
ca = th.matmul(ca, self.xas.to(ca.device)) if ca is not None else None
# cr = th.matmul(cr, self.xrs.to(cr.device)) if cr is not None else None
N, Na, Nr, _ = X.shape
tstart = time.time()
numSamples = X.shape[0]
numBatch | |
Wine": 0xB59F62,
"Apple-A-Day": 0x903F45,
"Appleblossom": 0xDAB5B4,
"Applegate": 0x8AC479,
"Applegate Park": 0xAEAD93,
"Applemint": 0xCDEACD,
"Applemint Soda": 0xF3F5E9,
"Applesauce": 0xF6D699,
"Applesauce Cake": 0xC2A377,
"Appletini": 0x929637,
"Appleton": 0x6EB478,
"Approaching Dusk": 0x8B97A5,
"Approval Green": 0x039487,
"Apricot": 0xFFB16D,
"Apricot Appeal": 0xFEC382,
"Apricot Blush": 0xFEAEA5,
"Apricot Brandy": 0xC26A5A,
"Apricot Brown": 0xCC7E5B,
"Apricot Buff": 0xCD7E4D,
"Apricot Chicken": 0xDA8923,
"Apricot Cream": 0xF1BD89,
"Apricot Flower": 0xFFBB80,
"Apricot Foam": 0xEEDED8,
"Apricot Fool": 0xFFD2A0,
"Apricot Freeze": 0xF3CFB7,
"Apricot Gelato": 0xF5D7AF,
"Apricot Glazed Chicken": 0xEEAA22,
"Apricot Glow": 0xFFCE79,
"Apricot Ice": 0xFFF6E9,
"Apricot Ice Cream": 0xF8CC9C,
"Apricot Iced Tea": 0xFBBE99,
"Apricot Illusion": 0xE2C4A6,
"Apricot Jam": 0xEEA771,
"Apricot Light": 0xFFCA95,
"Apricot Lily": 0xFECFB5,
"Apricot Mix": 0xB47756,
"Apricot Mousse": 0xFCDFAF,
"Apricot Nectar": 0xECAA79,
"Apricot Obsession": 0xF8C4B4,
"Apricot Orange": 0xC86B3C,
"Apricot Preserves": 0xEEB192,
"Apricot Red": 0xE8917D,
"Apricot Sherbet": 0xFACD9E,
"Apricot Sorbet": 0xE8A760,
"Apricot Spring": 0xF1B393,
"Apricot Tan": 0xDD9760,
"Apricot Wash": 0xFBAC82,
"Apricot White": 0xF7F0DB,
"Apricot Yellow": 0xF7BD81,
"Apricotta": 0xD8A48F,
"April Blush": 0xF6D0D8,
"April Fool's Red": 0x1FB57A,
"April Green": 0xA9B062,
"April Love": 0x8B3D2F,
"April Mist": 0xCCD9C9,
"April Showers": 0xDADEB5,
"April Sunshine": 0xFBE198,
"April Tears": 0xB4CBD4,
"April Wedding": 0xC5CFB1,
"April Winds": 0xD5E2E5,
"Aqua": 0x00FFFF,
"Aqua Bay": 0xB5DFC9,
"Aqua Belt": 0x7ACAD0,
"Aqua Bloom": 0x96D3D8,
"Aqua Blue": 0x79B6BC,
"Aqua Breeze": 0xD8E8E4,
"Aqua Clear": 0x8BD0DD,
"Aqua Cyan": 0x01F1F1,
"Aqua Deep": 0x014B43,
"Aqua Eden": 0x85C7A6,
"Aqua Experience": 0x038E85,
"Aqua Fiesta": 0x8CC3C3,
"Aqua Foam": 0xADC3B4,
"Aqua Forest": 0x5FA777,
"Aqua Fresco": 0x4A9FA3,
"Aqua Frost": 0xA9D1D7,
"Aqua Glass": 0xD2E8E0,
"Aqua Green": 0x12E193,
"Aqua Grey": 0x889FA5,
"Aqua Haze": 0xD9DDD5,
"Aqua Island": 0xA1DAD7,
"Aqua Lake": 0x30949D,
"Aqua Mist": 0xA0C9CB,
"Aqua Nation": 0x08787F,
"Aqua Oasis": 0xBCE8DD,
"Aqua Obscura": 0x05696B,
"Aqua Pura": 0xDDF2EE,
"Aqua Rapids": 0x63A39C,
"Aqua Revival": 0x539F91,
"Aqua Sea": 0x6BAAAE,
"Aqua Sky": 0x7BC4C4,
"Aqua Smoke": 0x8C9FA0,
"Aqua Sparkle": 0xD3E4E6,
"Aqua Splash": 0x85CED1,
"Aqua Spray": 0xA5DDDB,
"Aqua Spring": 0xE8F3E8,
"Aqua Squeeze": 0xDBE4DC,
"Aqua Tint": 0xE5F1EE,
"Aqua Velvet": 0x00A29E,
"Aqua Verde": 0x56B3C3,
"Aqua Vitale": 0x7BBDC7,
"Aqua Waters": 0x00937D,
"Aqua Whisper": 0xBFDFDF,
"Aqua Wish": 0xA0E3D1,
"Aqua Zing": 0x7CD8D6,
"Aqua-Sphere": 0x9CB0B3,
"Aquacade": 0xE1F0EA,
"Aquadazzle": 0x006F49,
"Aquadulce": 0x7B9F82,
"Aqualogic": 0x57B7C5,
"Aquamarine": 0x2EE8BB,
"Aquamarine Blue": 0x71D9E2,
"Aquamarine Dream": 0xB3C4BA,
"Aquamarine Ocean": 0x82CDAD,
"Aquamentus Green": 0x00A800,
"Aquarelle": 0x61AAB1,
"Aquarelle Beige": 0xE8E0D5,
"Aquarelle Blue": 0xBFE0E4,
"Aquarelle Green": 0xE2F4E4,
"Aquarelle Lilac": 0xEDC8FF,
"Aquarelle Mint": 0xDBF4D8,
"Aquarelle Orange": 0xFBE8E0,
"Aquarelle Pink": 0xFBE9DE,
"Aquarelle Purple": 0xD8E1F1,
"Aquarelle Red": 0xFEDDDD,
"Aquarelle Sky": 0xBCE4EB,
"Aquarelle Yellow": 0xF4EEDA,
"Aquarium": 0x356B6F,
"Aquarium Blue": 0x66CDAA,
"Aquarium Diver": 0x0A98AC,
"Aquarius": 0x3CADD4,
"Aquarius Reef Base": 0x559999,
"Aquastone": 0x89C6B7,
"Aquatic": 0x99C1CC,
"Aquatic Cool": 0x41A0B4,
"Aquatic Green": 0x49999A,
"Aquatint": 0xB8E7DE,
"Aquatone": 0xA6B5A9,
"Aquaverde": 0xA3C0BD,
"Aqueduct": 0x60B3BC,
"Aquella": 0x59B6D9,
"Aqueous": 0x388D95,
"Aquifer": 0xE2ECED,
"Aquitaine": 0x88ABB4,
"Arabella": 0x82ACC4,
"Arabesque": 0xD16F52,
"Arabian Bake": 0xCD9945,
"Arabian Red": 0xA14C3F,
"Arabian Sands": 0xDDC6B1,
"Arabian Silk": 0x786E97,
"Arabian Spice": 0x884332,
"Arabian Veil": 0xC9FFFA,
"Arabic Coffee": 0x6F4D3F,
"Arabica Mint": 0xC0FFEE,
"Arable Brown": 0x7A552E,
"Aragon": 0xB06455,
"Aragon Green": 0x47BA87,
"Aragonite": 0xE4E0D4,
"Aragonite Blue": 0x6A95B1,
"Aragonite Grey": 0x948E96,
"Aragonite White": 0xF3F1F3,
"Araigaki Orange": 0xEC8254,
"Arame Seaweed Green": 0x3F4635,
"Arapawa": 0x274A5D,
"Arathi Highlands": 0x93A344,
"Araucana Egg": 0xADD8E1,
"Arava": 0xA18D71,
"Arbol De Tamarindo": 0xCDA182,
"Arbor Hollow": 0xC1C2B4,
"Arbor Vitae": 0xBBC3AD,
"Arboretum": 0x70BA9F,
"Arc Light": 0xCCDDFF,
"Arcade Fire": 0xEE3311,
"Arcade Glow": 0x0022CC,
"Arcade White": 0xEDEBE2,
"Arcadia": 0x00A28A,
"Arcadian Green": 0xA3C893,
"Arcala Green": 0x3B6C3F,
"Arcane": 0x98687E,
"Arcavia Red": 0x6A0002,
"Archaeological Site": 0x8E785C,
"Archeology": 0x6E6A5E,
"Architecture Blue": 0x7195A6,
"Architecture Grey": 0x6B6A69,
"Archivist": 0x9F8C73,
"Arctic": 0x648589,
"Arctic Blue": 0x95D6DC,
"Arctic Cotton": 0xE6E3DF,
"Arctic Daisy": 0xEBE4BE,
"Arctic Dawn": 0xE3E5E8,
"Arctic Dusk": 0x735B6A,
"Arctic Feelings": 0xAFBEC1,
"Arctic Flow": 0xDAEAE4,
"Arctic Fox": 0xE7E7E2,
"Arctic Glow": 0xC9D1E9,
"Arctic Green": 0x45BCB3,
"Arctic Grey": 0xBBCCDD,
"Arctic Ice": 0xBFC7D6,
"Arctic Lichen Green": 0x6F7872,
"Arctic Lime": 0xD0FF14,
"Arctic Nights": 0x345C61,
"Arctic Ocean": 0x66C3D0,
"Arctic Paradise": 0xB8DFF8,
"Arctic Rose": 0xB7ABB0,
"Arctic Shadow": 0xD9E5EB,
"Arctic Water": 0x00FCFC,
"Arctic White": 0xE9EAE7,
"Ardcoat": 0xE2DEDF,
"Ardent Coral": 0xE5756A,
"Ardósia": 0x232F2C,
"Ares Red": 0xDD2200,
"Ares Shadow": 0x62584C,
"Argan Oil": 0x8B593E,
"Argent": 0x888888,
"Argos": 0xBDBDB7,
"Argyle": 0x348A5D,
"Argyle Purple": 0x895C79,
"Argyle Rose": 0xC48677,
"Aria": 0xE3E4E2,
"Aria Ivory": 0xF9E8D8,
"Arid Landscape": 0xDCD6C6,
"Arid Plains": 0xB6B4A9,
"Ariel": 0xAED7EA,
"Ariel's Delight": 0xB2A5D3,
"Aristocrat Ivory": 0xFAF0DF,
"Aristocrat Peach": 0xECCEB9,
"Aristocratic Pink": 0xDDAACC,
"Arizona": 0xEEB377,
"Arizona Clay": 0xAD735A,
"Arizona Stone": 0x00655A,
"Arizona Sunrise": 0xEBBCB9,
"Arizona Tan": 0xE5BC82,
"Arizona Tree Frog": 0x669264,
"Armada": 0x536762,
"Armadillo": 0x484A46,
"Armadillo Egg": 0x7D4638,
"Armageddon Dunes": 0x926A25,
"Armageddon Dust": 0xD3A907,
"Armagnac": 0xAD916C,
"Armor": 0x74857F,
"Armor Wash": 0x030303,
"Armored Steel": 0x747769,
"Armory": 0x6A6B65,
"Army Canvas": 0x5B6F61,
"Army Green": 0x4B5320,
"Army Issue": 0x8A806B,
"Army Issue Green": 0x838254,
"Arnica": 0xBF8F37,
"Arnica Yellow": 0xE59B00,
"Aroma": 0xD3C1C5,
"Aroma Blue": 0x96D2D6,
"Aroma Garden": 0xA1C4A8,
"Aromatic": 0x706986,
"Aromatic Breeze": 0xFFCECB,
"Arona": 0x879BA3,
"Arousing Alligator": 0x776600,
"Arraign": 0x5C546E,
"Arresting Auburn": 0x5A3532,
"Arrow Creek": 0x927257,
"Arrow Quiver": 0xC7A998,
"Arrow Rock": 0xA28440,
"Arrow Shaft": 0x5C503A,
"Arrowhead": 0x514B40,
"Arrowhead Lake": 0x58728A,
"Arrowhead White": 0xF9EAEB,
"Arrowroot": 0xF8DECF,
"Arrowroote": 0xE4DECF,
"Arrowtown": 0x827A67,
"Arrowwood": 0xBC8D1F,
"Arsenic": 0x3B444B,
"Art and Craft": 0x896956,
"Art Deco Pink": 0xCDACA0,
"Art Deco Red": 0x623745,
"Art District": 0x94897C,
"Art House Pink": 0xC06F70,
"Art Nouveau Glass": 0xA29AA0,
"Art Nouveau Green": 0x9C932F,
"Art Nouveau Violet": 0xA08994,
"Artemis": 0xD2A96E,
"Artemis Silver": 0xDDDDEE,
"Artemisia": 0xE3EBEA,
"Arterial Blood Red": 0x711518,
"Artesian Pool": 0xA6BEE1,
"Artesian Water": 0x007DB6,
"Artesian Well": 0x5EB2AA,
"Artful Aqua": 0x91B4B3,
"Artful Magenta": 0x80505D,
"Artichoke": 0x8F9779,
"Artichoke Dip": 0xA19676,
"Artichoke Green": 0x4B6D41,
"Artichoke Mauve": 0xC19AA5,
"Artifact": 0xCA9D8D,
"Artificial Strawberry": 0xFF43A4,
"Artificial Turf": 0x41B45C,
"Artillery": 0x746F67,
"Artisan": 0x8F5C45,
"Artisan Crafts": 0xB99779,
"Artisan Tan": 0xB09879,
"Artisan Tea": 0xDAC2AF,
"Artisan Tile": 0x845E40,
"Artisans Gold": 0xF2AB46,
"Artist Blue": 0x01343A,
"Artist's Canvas": 0xEEE4D2,
"Artist's Shadow": 0xA1969B,
"Artiste": 0x987387,
"Artistic License": 0x434053,
"Artistic Stone": 0x5C6B65,
"Artistic Taupe": 0xC3B1AC,
"Artistic Violet": 0xD0D2E9,
"Arts & Crafts Gold": 0xF5C68B,
"Arts and Crafts": 0x7D6549,
"Aruba Aqua": 0xD1DED3,
"Aruba Blue": 0x81D7D3,
"Aruba Green": 0x54B490,
"Arugula": 0x75AD5B,
"Arylide Yellow": 0xE9D66B,
"Asagi Blue": 0x48929B,
"Asagi Koi": 0x455559,
"Asagi Yellow": 0xF7BB7D,
"Asfar Yellow": 0xFCEF01,
"Ash": 0xBEBAA7,
"Ash Blonde": 0xD7BEA5,
"Ash Blue": 0xC0C6C9,
"Ash Brown": 0x98623C,
"Ash Cherry Blossom": 0xE8D3D1,
"Ash Gold": 0x8C6F54,
"Ash Grey": 0xC1B5A9,
"Ash Grove": 0xB9B3BF,
"Ash Hollow": 0xA88E8B,
"Ash in the Air": 0xD9DDE5,
"Ash Mauve": 0x737486,
"Ash Pink": 0x998E91,
"Ash Plum": 0xE8D3C7,
"Ash Rose": 0xB5817D,
"Ash to Ash": 0x4E4E4C,
"Ash Tree": 0xAABB99,
"Ash Tree Bark": 0xCECFD6,
"Ash Violet": 0x9695A4,
"Ash White": 0xE9E4D4,
"Ash Yellow": 0xF0BD7E,
"Ashberry": 0xB495A4,
"Ashen": 0xC9BFB2,
"Ashen Brown": 0x994444,
"Ashen Plum": 0x9B9092,
"Ashen Tan": 0xD3CABF,
"Ashen Wind": 0x94A9B7,
"Ashenvale Nights": 0x104071,
"<NAME>": 0x45575E,
"Ashes": 0xB8B5AD,
"Ashes to Ashes": 0xBBB3A2,
"Ashley Blue": 0x8699AB,
"Ashlite": 0xA7A49F,
"Ashton Blue": 0x4A79BA,
"Ashton Skies": 0x7B8EB0,
"Ashwood": 0xBCC4BD,
"Asian Fusion": 0xECE0CD,
"Asian Ivory": 0xE8E0CD,
"Asian Jute": 0xD4B78F,
"Asian Pear": 0xAE9156,
"Asian Violet": 0x8B818C,
"Āsmānī Sky": 0x88DDBB,
"Aspara": 0x70B2CC,
"Asparagus": 0x77AB56,
"Asparagus Cream": 0x96AF54,
"Asparagus Fern": 0xB9CB5A,
"Asparagus Green": 0xD2CDB4,
"Asparagus Sprig": 0x576F44,
"Asparagus Yellow": 0xDAC98E,
"Aspen Aura": 0x83A494,
"Aspen Branch": 0xC6BCAD,
"Aspen Gold": 0xFFD662,
"Aspen Green": 0x7E9B76,
"Aspen Hush": 0x6A8D88,
"Aspen Mist": 0xCFD7CB,
"Aspen Snow": 0xF0F0E7,
"Aspen Valley": 0x687F7A,
"Aspen Whisper": 0xEDF1E3,
"Aspen Yellow": 0xF6DF9F,
"Asphalt": 0x130A06,
"Asphalt Blue": 0x474C55,
"Asphalt Grey": 0x5E5E5D,
"Aspiring Blue": 0xA2C1C0,
"Assassin": 0x2D4F83,
"Assassin's Red": 0xF60206,
"Assateague Sand": 0xE1D0B2,
"Assault": 0x1C4374,
"Aster": 0x867BA9,
"Aster Flower Blue": 0x9BACD8,
"Aster Petal": 0xD4DAE2,
"Aster Purple": 0x7D74A8,
"Aster Violetta": 0x8F629A,
"Astilbe": 0xF091A9,
"Astorath Red": 0xDD482B,
"Astra": 0xEDD5A6,
"Astral": 0x376F89,
"Astral Aura": 0x363151,
"Astral Spirit": 0x8EC2E7,
"Astro Arcade Green": 0x77FF77,
"Astro Bound": 0x899FB9,
"Astro Nautico": 0x5383C3,
"Astro Purple": 0x6D5ACF,
"Astro Sunset": 0x937874,
"Astro Zinger": 0x797EB5,
"Astrogranite": 0x757679,
"Astrogranite Debris": 0x3B424C,
"Astrolabe Reef": 0x2D96CE,
"Astronaut": 0x445172,
"Astronaut Blue": 0x214559,
"Astronomer": 0xE8F2EB,
"Astronomical": 0x474B4A,
"Astronomicon Grey": 0x6B7C85,
"Astroscopus Grey": 0xAFB4B6,
"Astroturf": 0x67A159,
"Asurmen Blue Wash": 0x273E51,
"Aswad Black": 0x17181C,
"At Ease": 0xE7EEE1,
"At Ease Soldier": 0x9E9985,
"At The Beach": 0xE7D9B9,
"Atelier": 0xA3ABB8,
"Ateneo Blue": 0x003A6C,
"Athena Blue": 0x66DDFF,
"Athenian Green": 0x92A18A,
"Athens Grey": 0xDCDDDD,
"Athonian Camoshade": 0x6D8E44,
"Aths Special": 0xD5CBB2,
"Atlantic Blue": 0x008997,
"Atlantic Breeze": 0xCBE1EE,
"Atlantic Charter": 0x2B2F41,
"Atlantic Deep": 0x274E55,
"Atlantic Depths": 0x001166,
"Atlantic Fig Snail": 0xD7CEB9,
"Atlantic Gull": 0x4B8EB0,
"Atlantic Mystique": 0x00629A,
"Atlantic Ocean": 0xA7D8E4,
"Atlantic Sand": 0xDCD5D2,
"Atlantic Shoreline": 0x708189,
"Atlantic Tide": 0x3E586E,
"Atlantic Tulip": 0xB598C3,
"Atlantic Wave": 0x3D797C,
"Atlantic Waves": 0x264243,
"Atlantis": 0x336172,
"Atlantis Myth": 0x006477,
"Atlas Cedar Green": 0x667A6E,
"Atlas Red": 0x82193A,
"Atlas White": 0xEDE5CF,
"Atmosphere": 0x0099DD,
"Atmospheric": 0x899697,
"Atmospheric Pressure": 0xC2D0E1,
"Atmospheric | |
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step("Kill BGP on R2")
kill_router_daemons(tgen, "r2", ["bgpd"])
step(
"Verify that R2 keeps the stale entries in FIB & R1 keeps stale entries in RIB & FIB"
)
for addr_type in ADDR_TYPES:
protocol = "bgp"
next_hop = next_hop_per_address_family(
tgen, "r2", "r1", addr_type, NEXT_HOP_IP_1
)
input_topo = {"r1": topo["routers"]["r1"]}
result = verify_rib(tgen, addr_type, "r2", input_topo, next_hop, protocol)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
next_hop = next_hop_per_address_family(
tgen, "r1", "r2", addr_type, NEXT_HOP_IP_2
)
input_topo = {"r2": topo["routers"]["r2"]}
result = verify_bgp_rib(tgen, addr_type, "r1", input_topo, next_hop)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, "r1", input_topo, next_hop, protocol)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step("Bring up BGP on R2 and remove Peer-level GR config from R1 ")
start_router_daemons(tgen, "r2", ["bgpd"])
input_dict = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link1": {"graceful-restart-helper": False}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link1": {"graceful-restart-helper": False}
}
}
}
}
},
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
neighbor = topo["routers"]["r2"]["links"]["r1-link1"][addr_type].split("/")[0]
clear_bgp(tgen, addr_type, "r1", neighbor=neighbor)
result = verify_bgp_convergence_from_running_config(tgen)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify on R2 that R1 still advertises GR capabilities as a helper node")
input_dict = {
"r1": {"bgp": {"graceful-restart": {"graceful-restart-helper": True}}},
"r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}},
}
for addr_type in ADDR_TYPES:
result = verify_graceful_restart(
tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
protocol = "bgp"
next_hop = next_hop_per_address_family(
tgen, "r2", "r1", addr_type, NEXT_HOP_IP_1
)
input_topo = {"r1": topo["routers"]["r1"]}
result = verify_rib(tgen, addr_type, "r2", input_topo, next_hop, protocol)
assert (
result is True
), "Testcase {} : Failed \n Routes are still present \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
next_hop = next_hop_per_address_family(
tgen, "r1", "r2", addr_type, NEXT_HOP_IP_2
)
input_topo = {"r2": topo["routers"]["r2"]}
result = verify_bgp_rib(tgen, addr_type, "r1", input_topo, next_hop)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, "r1", input_topo, next_hop, protocol)
assert (
result is True
), "Testcase {} : Failed \n Routes are still present \n Error {}".format(
tc_name, result
)
step("Kill BGP on R2")
kill_router_daemons(tgen, "r2", ["bgpd"])
step(
"Verify that R2 keeps the stale entries in FIB & R1 keeps stale entries in RIB & FIB"
)
for addr_type in ADDR_TYPES:
protocol = "bgp"
next_hop = next_hop_per_address_family(
tgen, "r2", "r1", addr_type, NEXT_HOP_IP_1
)
input_topo = {"r1": topo["routers"]["r1"]}
result = verify_rib(tgen, addr_type, "r2", input_topo, next_hop, protocol)
assert (
result is True
), "Testcase {} : Failed \n Routes are still present \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
next_hop = next_hop_per_address_family(
tgen, "r1", "r2", addr_type, NEXT_HOP_IP_2
)
input_topo = {"r2": topo["routers"]["r2"]}
result = verify_bgp_rib(tgen, addr_type, "r1", input_topo, next_hop)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, "r1", input_topo, next_hop, protocol)
assert (
result is True
), "Testcase {} : Failed \n Routes are still present \n Error {}".format(
tc_name, result
)
step("Start BGP on R2")
start_router_daemons(tgen, "r2", ["bgpd"])
write_test_footer(tc_name)
def test_BGP_GR_TC_51_p1(request):
"""
Test Objective : Transition from Peer-level restarting to Global inherit helper
Global Mode : None
PerPeer Mode : GR Restart
GR Mode effective : GR Restart
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
# Check router status
check_router_status(tgen)
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Creating configuration from JSON
reset_config_on_routers(tgen)
step("Configure R1 as GR restarting node at per Peer-level for R2")
input_dict = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link1": {"graceful-restart": True}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link1": {"graceful-restart": True}
}
}
}
}
},
}
}
},
"r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}},
}
configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2")
step("Verify on R2 that R1 advertises GR capabilities as a restarting node")
for addr_type in ADDR_TYPES:
result = verify_graceful_restart(
tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
protocol = "bgp"
next_hop = next_hop_per_address_family(
tgen, "r1", "r2", addr_type, NEXT_HOP_IP_2
)
input_topo = {"r2": topo["routers"]["r2"]}
result = verify_rib(tgen, addr_type, "r1", input_topo, next_hop, protocol)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
next_hop = next_hop_per_address_family(
tgen, "r2", "r1", addr_type, NEXT_HOP_IP_1
)
input_topo = {"r1": topo["routers"]["r1"]}
result = verify_bgp_rib(tgen, addr_type, "r2", input_topo, next_hop)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, "r2", input_topo, next_hop, protocol)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step("Kill BGP on R1")
kill_router_daemons(tgen, "r1", ["bgpd"])
step(
"Verify that R1 keeps the stale entries in FIB & R2 keeps stale entries in RIB & FIB"
)
for addr_type in ADDR_TYPES:
protocol = "bgp"
next_hop = next_hop_per_address_family(
tgen, "r1", "r2", addr_type, NEXT_HOP_IP_2
)
input_topo = {"r2": topo["routers"]["r2"]}
result = verify_rib(tgen, addr_type, "r1", input_topo, next_hop, protocol)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
next_hop = next_hop_per_address_family(
tgen, "r2", "r1", addr_type, NEXT_HOP_IP_1
)
input_topo = {"r1": topo["routers"]["r1"]}
result = verify_bgp_rib(tgen, addr_type, "r2", input_topo, next_hop)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, "r2", input_topo, next_hop, protocol)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step("Bring up BGP on R1 and remove Peer-level GR config")
start_router_daemons(tgen, "r1", ["bgpd"])
input_dict = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link1": {"graceful-restart": False}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link1": {"graceful-restart": False}
}
}
}
}
},
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
neighbor = topo["routers"]["r2"]["links"]["r1-link1"][addr_type].split("/")[0]
clear_bgp(tgen, addr_type, "r1", neighbor=neighbor)
result = verify_bgp_convergence_from_running_config(tgen)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify on R2 that R1 advertises GR capabilities as a helper node")
input_dict = {
"r1": {"bgp": {"graceful-restart": {"graceful-restart-helper": True}}},
"r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}},
}
for addr_type in ADDR_TYPES:
result = verify_graceful_restart(
tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
protocol = "bgp"
next_hop = next_hop_per_address_family(
tgen, "r2", "r1", addr_type, NEXT_HOP_IP_1
)
input_topo = {"r1": topo["routers"]["r1"]}
result = verify_rib(tgen, addr_type, "r2", input_topo, next_hop, protocol)
assert (
result is True
), "Testcase {} : Failed \n Routes are still present \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
next_hop = next_hop_per_address_family(
tgen, "r1", "r2", addr_type, NEXT_HOP_IP_2
)
input_topo = {"r2": topo["routers"]["r2"]}
result = verify_bgp_rib(tgen, addr_type, "r1", input_topo, next_hop)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, "r1", input_topo, next_hop, protocol)
assert (
result is True
), "Testcase {} : Failed \n Routes are still present \n Error {}".format(
tc_name, result
)
step("Kill BGPd on R2")
kill_router_daemons(tgen, "r2", ["bgpd"])
step(
"Verify that R2 keeps the stale entries in FIB & R1 keeps stale entries in RIB & FIB"
)
for addr_type in | |
= int(data.get("precision_id", None))
optimization_data.optimization_type_id = int(data.get("optimization_type_id", None))
optimization_data.dataset_id = int(data.get("dataset_id", None))
optimization_data.tuning_details = TuningDetailsInterface(data)
except ValueError:
raise ClientErrorException("Could not parse value")
except TypeError:
raise ClientErrorException("Could not find required parameter.")
optimization_data.batch_size = int(data.get("batch_size", 100))
optimization_data.sampling_size = int(data.get("sampling_size", 100))
return optimization_data
@staticmethod
def clean_status(status_to_clean: ExecutionStatus) -> dict:
"""Clean specified optimization status."""
with Session.begin() as db_session:
response = Optimization.clean_status(
db_session=db_session,
status_to_clean=status_to_clean,
)
return response
class BenchmarkAPIInterface:
"""Interface for queries connected with benchmark."""
@staticmethod
def delete_benchmark(data: dict) -> dict:
"""Delete benchmark from database and clean workspace."""
try:
benchmark_id: int = int(data.get("id", None))
benchmark_name: str = str(data.get("name", None))
except ValueError:
raise ClientErrorException("Could not parse value.")
except TypeError:
raise ClientErrorException("Missing project id or project name.")
with Session.begin() as db_session:
benchmark_details = Benchmark.details(db_session, benchmark_id)
project_id = benchmark_details["project_id"]
project_details = ProjectAPIInterface.get_project_details({"id": project_id})
removed_benchmark_id = Benchmark.delete_benchmark(
db_session=db_session,
benchmark_id=benchmark_id,
benchmark_name=benchmark_name,
)
if removed_benchmark_id is not None:
try:
model_id = benchmark_details["model"]["id"]
normalized_project_name = normalize_string(project_details["name"])
normalized_benchmark_name = normalize_string(benchmark_name)
normalized_model_name = normalize_string(benchmark_details["model"]["name"])
benchmark_location = os.path.join(
WORKSPACE_LOCATION,
f"{normalized_project_name}_{project_id}",
"models",
f"{normalized_model_name}_{model_id}",
"benchmarks",
f"{normalized_benchmark_name}_{benchmark_id}",
)
shutil.rmtree(benchmark_location, ignore_errors=True)
except Exception:
log.debug("Could not find benchmark directory.")
return {"id": removed_benchmark_id}
@staticmethod
def get_benchmark_details(data: dict) -> dict:
"""Parse input data and get benchmark details."""
try:
benchmark_id: int = int(data.get("id", None))
except ValueError:
raise ClientErrorException("Incorrect benchmark id.")
except TypeError:
raise ClientErrorException("Could not find benchmark id.")
with Session.begin() as db_session:
benchmark_details = Benchmark.details(
db_session,
benchmark_id,
)
return benchmark_details
@staticmethod
def list_benchmarks(data: dict) -> dict:
"""List benchmarks assigned to project."""
try:
project_id: int = int(data.get("project_id", None))
except ValueError:
raise ClientErrorException("Incorrect project id.")
except TypeError:
raise ClientErrorException("Could not find project id.")
with Session.begin() as db_session:
benchmarks_list = Benchmark.list(
db_session,
project_id,
)
return benchmarks_list
@staticmethod
def update_benchmark_accuracy(data: dict) -> dict:
"""Update benchmark accuracy."""
try:
benchmark_id: int = int(data.get("id", None))
except ValueError:
raise ClientErrorException("Incorrect benchmark id.")
except TypeError:
raise ClientErrorException("Could not find benchmark id.")
try:
accuracy: float = float(data.get("status", None))
except ValueError as err:
raise ClientErrorException(err)
with Session.begin() as db_session:
response_data = BenchmarkResult.update_accuracy(
db_session,
benchmark_id,
accuracy,
)
return response_data
@staticmethod
def update_benchmark_performance(data: dict) -> dict:
"""Update benchmark performance."""
try:
benchmark_id: int = int(data.get("id", None))
except ValueError:
raise ClientErrorException("Incorrect benchmark id.")
except TypeError:
raise ClientErrorException("Could not find benchmark id.")
try:
performance: float = float(data.get("status", None))
except ValueError as err:
raise ClientErrorException(err)
with Session.begin() as db_session:
response_data = BenchmarkResult.update_performance(
db_session,
benchmark_id,
performance,
)
return response_data
@staticmethod
def update_benchmark_status(data: dict) -> dict:
"""Update benchmark status."""
try:
benchmark_id: int = int(data.get("id", None))
except ValueError:
raise ClientErrorException("Incorrect benchmark id.")
except TypeError:
raise ClientErrorException("Could not find benchmark id.")
try:
status: ExecutionStatus = ExecutionStatus(data.get("status", None))
except ValueError as err:
raise ClientErrorException(err)
with Session.begin() as db_session:
response_data = Benchmark.update_status(
db_session,
benchmark_id,
status,
)
return response_data
@staticmethod
def update_benchmark_duration(data: dict) -> dict:
"""Update duration of benchmark."""
try:
benchmark_id: int = int(data.get("id", None))
except ValueError:
raise ClientErrorException("Incorrect benchmark id.")
except TypeError:
raise ClientErrorException("Could not find benchmark id.")
try:
duration = int(data.get("duration", None))
except ValueError as err:
raise ClientErrorException(err)
with Session.begin() as db_session:
response_data = Benchmark.update_duration(
db_session,
benchmark_id,
duration,
)
return response_data
@staticmethod
def update_paths(data: dict) -> dict:
"""Update config path and output log path."""
response = {}
try:
benchmark_id: int = int(data.get("id", None))
except ValueError:
raise ClientErrorException("Incorrect benchmark id.")
except TypeError:
raise ClientErrorException("Could not find benchmark id.")
config_path: Optional[str] = data.get("config_path")
log_path: Optional[str] = data.get("log_path")
with Session.begin() as db_session:
config_path_response = Benchmark.update_config_path(
db_session=db_session,
benchmark_id=benchmark_id,
path=config_path,
)
response.update(config_path_response)
log_path_response = Benchmark.update_log_path(
db_session=db_session,
benchmark_id=benchmark_id,
path=log_path,
)
response.update(log_path_response)
return response
@staticmethod
def update_execution_command(data: dict) -> dict:
"""Update benchmark execution command."""
try:
benchmark_id: int = int(data.get("id", None))
except ValueError:
raise ClientErrorException("Incorrect benchmark id.")
except TypeError:
raise ClientErrorException("Could not find benchmark id.")
execution_command: Optional[Union[str, List[str]]] = data.get("execution_command")
if isinstance(execution_command, list):
execution_command = " ".join(map(str, execution_command))
with Session.begin() as db_session:
response_data = Benchmark.update_execution_command(
db_session=db_session,
benchmark_id=benchmark_id,
execution_command=execution_command,
)
return response_data
@staticmethod
def add_benchmark(data: dict) -> dict:
"""Add benchmark to database."""
parser = ConfigurationParser()
parsed_data = parser.parse(data)
benchmark_params: BenchmarkAddParamsInterface = BenchmarkAPIInterface.parse_benchmark_data(
parsed_data,
)
with Session.begin() as db_session:
benchmark_id = Benchmark.add(
db_session=db_session,
project_id=benchmark_params.project_id,
name=benchmark_params.name,
model_id=benchmark_params.model_id,
dataset_id=benchmark_params.dataset_id,
mode=benchmark_params.mode,
batch_size=benchmark_params.batch_size,
iterations=benchmark_params.iterations,
number_of_instance=benchmark_params.number_of_instance,
cores_per_instance=benchmark_params.cores_per_instance,
warmup_iterations=benchmark_params.warmup_iterations,
)
return {
"benchmark_id": benchmark_id,
}
@staticmethod
def add_result(data: dict) -> None:
"""Add benchmark result to database."""
try:
benchmark_id: int = int(data.get("benchmark_id", None))
except ValueError:
raise ClientErrorException("Incorrect benchmark id.")
except TypeError:
raise ClientErrorException("Could not find benchmark id.")
accuracy: Optional[float] = None
performance: Optional[float] = None
try:
accuracy = float(data["accuracy"])
except Exception:
pass
try:
performance = float(data["performance"])
except Exception:
pass
with Session.begin() as db_session:
BenchmarkResult.add(
db_session=db_session,
benchmark_id=benchmark_id,
accuracy=accuracy,
performance=performance,
)
@staticmethod
def parse_benchmark_data(data: dict) -> BenchmarkAddParamsInterface:
"""Parse input data for benchmark."""
benchmark_data = BenchmarkAddParamsInterface()
try:
benchmark_data.project_id = int(data.get("project_id", None))
benchmark_data.name = str(data.get("name", None))
benchmark_data.model_id = int(data.get("model_id", None))
benchmark_data.dataset_id = int(data.get("dataset_id", None))
benchmark_data.mode = str(data.get("mode", Benchmarks.PERF))
benchmark_data.batch_size = int(data.get("batch_size", 100))
benchmark_data.iterations = int(data.get("iterations", -1))
benchmark_data.cores_per_instance = int(data.get("cores_per_instance", 4))
benchmark_data.warmup_iterations = int(data.get("warmup_iterations", 10))
except ValueError:
raise ClientErrorException("Could not parse value")
except TypeError:
raise ClientErrorException("Could not find required parameter.")
try:
benchmark_data.number_of_instance = int(data.get("number_of_instance", None))
except TypeError:
from neural_compressor.ux.utils.hw_info import HWInfo
hw_info = HWInfo()
benchmark_data.number_of_instance = (
hw_info.cores_per_socket // benchmark_data.cores_per_instance
)
return benchmark_data
@staticmethod
def clean_status(status_to_clean: ExecutionStatus) -> dict:
"""Clean specified optimization status."""
with Session.begin() as db_session:
response = Benchmark.clean_status(
db_session=db_session,
status_to_clean=status_to_clean,
)
return response
class ProfilingAPIInterface:
"""Interface for queries connected with profiling."""
@staticmethod
def get_profiling_details(data: dict) -> dict:
"""Parse input data and get profiling details."""
try:
profiling_id: int = int(data.get("id", None))
except ValueError:
raise ClientErrorException("Incorrect profiling id.")
except TypeError:
raise ClientErrorException("Could not find profiling id.")
with Session.begin() as db_session:
profiling_details = Profiling.details(
db_session,
profiling_id,
)
return profiling_details
@staticmethod
def delete_profiling(data: dict) -> dict:
"""Delete profiling from database and clean workspace."""
try:
profiling_id: int = int(data.get("id", None))
profiling_name: str = str(data.get("name", None))
except ValueError:
raise ClientErrorException("Could not parse value.")
except TypeError:
raise ClientErrorException("Missing project id or project name.")
with Session.begin() as db_session:
profiling_details = Profiling.details(db_session, profiling_id)
project_id = profiling_details["project_id"]
project_details = ProjectAPIInterface.get_project_details({"id": project_id})
removed_profiling_id = Profiling.delete_profiling(
db_session=db_session,
profiling_id=profiling_id,
profiling_name=profiling_name,
)
if removed_profiling_id is not None:
try:
model_id = profiling_details["model"]["id"]
normalized_project_name = normalize_string(project_details["name"])
normalized_profiling_name = normalize_string(profiling_name)
normalized_model_name = normalize_string(profiling_details["model"]["name"])
profiling_location = os.path.join(
WORKSPACE_LOCATION,
f"{normalized_project_name}_{project_id}",
"models",
f"{normalized_model_name}_{model_id}",
"profilings",
f"{normalized_profiling_name}_{profiling_id}",
)
shutil.rmtree(profiling_location, ignore_errors=True)
except Exception:
log.debug("Could not find profiling directory.")
return {"id": removed_profiling_id}
@staticmethod
def list_profilings(data: dict) -> dict:
"""List profilings assigned to project."""
try:
project_id: int = int(data.get("project_id", None))
except ValueError:
raise ClientErrorException("Incorrect project id.")
except TypeError:
raise ClientErrorException("Could not find project id.")
with Session.begin() as db_session:
profilings_list = Profiling.list(
db_session,
project_id,
)
return profilings_list
@staticmethod
def update_profiling_status(data: dict) -> dict:
"""Update profiling status."""
try:
profiling_id: int = int(data.get("id", None))
except ValueError:
raise ClientErrorException("Incorrect profiling id.")
except TypeError:
raise ClientErrorException("Could not find profiling id.")
try:
status: ExecutionStatus = ExecutionStatus(data.get("status", None))
except ValueError as err:
raise ClientErrorException(err)
with Session.begin() as db_session:
response_data = Profiling.update_status(
db_session,
profiling_id,
status,
)
return response_data
@staticmethod
def update_profiling_duration(data: dict) -> dict:
"""Update duration of profiling."""
try:
profiling_id: int = int(data.get("id", None))
except ValueError:
raise ClientErrorException("Incorrect profiling id.")
except TypeError:
raise ClientErrorException("Could not find profiling id.")
try:
duration = int(data.get("duration", None))
except ValueError as err:
raise ClientErrorException(err)
with Session.begin() as db_session:
response_data = Profiling.update_duration(
db_session,
profiling_id,
duration,
)
return response_data
@staticmethod
def update_log_path(data: dict) -> dict:
"""Update config path and output log path."""
try:
profiling_id: int = int(data.get("id", None))
except ValueError:
raise ClientErrorException("Incorrect profiling id.")
except TypeError:
raise ClientErrorException("Could not find profiling id.")
log_path: Optional[str] = data.get("log_path")
with Session.begin() as db_session:
response = Profiling.update_log_path(
db_session=db_session,
profiling_id=profiling_id,
path=log_path,
)
return response
@staticmethod
def update_execution_command(data: dict) -> dict:
"""Update profiling execution command."""
try:
profiling_id: int = int(data.get("id", None))
except ValueError:
raise ClientErrorException("Incorrect profiling id.")
except TypeError:
raise ClientErrorException("Could not find profiling id.")
execution_command: Optional[Union[str, List[str]]] = data.get("execution_command")
if isinstance(execution_command, list):
execution_command = " ".join(map(str, execution_command))
with Session.begin() as db_session:
response_data = Profiling.update_execution_command(
db_session=db_session,
profiling_id=profiling_id,
execution_command=execution_command,
)
return response_data
@staticmethod
def add_profiling(data: dict) -> dict:
"""Add profiling to database."""
parser = ConfigurationParser()
parsed_data = parser.parse(data)
profiling_params: ProfilingAddParamsInterface = ProfilingAPIInterface.parse_profiling_data(
parsed_data,
)
with Session.begin() as db_session:
profiling_id = Profiling.add(
db_session=db_session,
project_id=profiling_params.project_id,
name=profiling_params.name,
model_id=profiling_params.model_id,
dataset_id=profiling_params.dataset_id,
num_threads=profiling_params.num_threads,
)
return {
"profiling_id": profiling_id,
}
@staticmethod
def add_result(profiling_id: int, data: dict) -> None:
"""Add profiling result to | |
conference
Example: create simple conference::
conference_id = api.create_conference('+12018994444')
print(conference_id)
## conf-ixaagbn5wcyskisiy
Example: create conference with extra parameters::
conference_id = api.create_conference(from_ = "+12018994444", callback_url = "http://google.com",
callback_timeout= 2000, fallback_url = "http://yahoo.com")
print(conference_id)
## conf-ixaagbn5wcyskisiy
my_conf = api.get_conference(conference_id)
print(my_conf)
## { 'activeMembers' : 0,
## 'callbackHttpMethod': 'post',
## 'callbackTimeout' : 2000,
## 'callbackUrl' : 'http://google.com',
## 'createdTime' : '2017-01-26T01:58:59Z',
## 'fallbackUrl' : 'http://yahoo.com',
## 'from' : '+12018994444',
## 'hold' : False,
## 'id' : 'conf-ixaagbn5wcyskisiy',
## 'mute' : False,
## 'state' : 'created'}
"""
kwargs["from"] = from_
kwargs["callbackUrl"] = callback_url
kwargs["callbackTimeout"] = callback_timeout
kwargs["callbackHttpMethod"] = callback_http_method
kwargs["fallbackUrl"] = fallback_url
kwargs["tag"] = tag
return self._make_request('post', '/users/%s/conferences' % self.user_id, json=kwargs)[2]
def get_conference(self, conference_id):
"""
Get information about a conference
:type conference_id: str
:param conference_id: id of a conference
:rtype: dict
:returns: conference information
Example: Create then fetch conference::
conference_id = api.create_conference(from_ = "+12018994444", callback_url = "http://google.com",
callback_timeout= 2000, fallback_url = "http://yahoo.com")
print(conference_id)
## conf-ixaagbn5wcyskisiy
my_conf = api.get_conference(conference_id)
print(my_conf)
## { 'activeMembers' : 0,
## 'callbackHttpMethod': 'post',
## 'callbackTimeout' : 2000,
## 'callbackUrl' : 'http://google.com',
## 'createdTime' : '2017-01-26T01:58:59Z',
## 'fallbackUrl' : 'http://yahoo.com',
## 'from' : '+12018994444',
## 'hold' : False,
## 'id' : 'conf-ixaagbn5wcyskisiy',
## 'mute' : False,
## 'state' : 'created'}
"""
return self._make_request('get', '/users/%s/conferences/%s' % (self.user_id, conference_id))[0]
def update_conference(self,
conference_id,
state=None,
mute=None,
hold=None,
callback_url=None,
callback_timeout=None,
callback_http_method=None,
fallback_url=None,
tag=None,
**kwargs):
"""
Update a conference
:param str conference_id: id of a conference
:param str state: Conference state. Possible state values are: "completed" to terminate the conference.
:param str mute: If "true", all member can't speak in the conference.\
If "false", all members can speak in the conference
:param str hold: If "true", all member can't hear or speak in the conference. \
If "false", all members can hear and speak in the conference
:param str callback_url: The full server URL where the conference events related to the conference will be sent
:param str callback_timeout: Determine how long should the platform wait for callbackUrl's response before
timing out in milliseconds.
:param str callback_http_method: Determine if the callback event should be sent via HTTP GET or HTTP POST. \
Values are "GET" or "POST" (if not set the default is POST).
:param str fallback_url: The full server URL used to send the callback event
if the request to callbackUrl fails.
:param str tag: A string that will be included in the callback events of the conference.
Example: End conference::
api.update_conference('conferenceId', state='completed')
"""
kwargs["state"] = state
kwargs["mute"] = mute
kwargs["hold"] = hold
kwargs["callbackUrl"] = callback_url
kwargs["callbackTimeout"] = callback_timeout
kwargs["callbackHttpMethod"] = callback_http_method
kwargs["fallbackUrl"] = fallback_url
kwargs["tag"] = tag
self._make_request('post', '/users/%s/conferences/%s' %
(self.user_id, conference_id), json=kwargs)
def play_audio_to_conference(self,
conference_id,
file_url=None,
sentence=None,
gender=None,
locale=None,
voice=None,
loop_enabled=None,
**kwargs):
"""
Play audio to a conference
:type conference_id: str
:param conference_id: id of a conference
:param str file_url: The location of an audio file to play (WAV and MP3 supported).
:param str sentence: The sentence to speak.
:param str gender: The gender of the voice used to synthesize the sentence.
:param str locale: The locale used to get the accent of the voice used to synthesize the sentence.
:param str voice: The voice to speak the sentence.
:param str loop_enabled: When value is true, the audio will keep playing in a loop.
Example: Play audio file to conference::
api.play_audio_to_conference('conferenceId', fileUrl = 'http://host/path/file.mp3')
Example: Speak Sentence to conference::
api.play_audio_to_conference('conferenceId', sentence='Press 0 to complete call', gender='female')
Example: Use Extensions methods::
# or with extension methods
api.play_audio_file_to_conference('conferenceId', 'http://host/path/file.mp3')
api.speak_sentence_to_conference('conferenceId', 'Hello')
"""
kwargs['fileUrl'] = file_url
kwargs['sentence'] = sentence
kwargs['gender'] = gender
kwargs['locale'] = locale
kwargs['voice'] = voice
kwargs['loopEnabled'] = loop_enabled
self._make_request('post', '/users/%s/conferences/%s/audio' %
(self.user_id, conference_id), json=kwargs)
def list_conference_members(self, conference_id):
"""
Get a list of members of a conference
:type conference_id: str
:param conference_id: id of a conference
:rtype: types.GeneratorType
:returns: list of recordings
Example: Fetch and list conference members::
my_conf_id = api.create_conference(from_='+19192223333')
print(my_conf)
# conf-confId
my_call_id = api.create_call(from_='+19192223333', to='+19192223334', conference_id= 'conf-confId')
print(my_call_id)
# c-callId
my_conf_member_id = api.create_conference_member(my_conf_id, call_id=my_call_id)
print(my_conf_member_id)
# member-memberId
my_conference_members = list_conference_members(my_conf_id)
print(list(my_conference_members))
## [
## {
## 'addedTime' :'2017-01-30T22:01:11Z',
## 'call' :'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' :False,
## 'id' :'member-memberId',
## 'joinTone' :False,
## 'leavingTone':False,
## 'mute' :False,
## 'removedTime':'2017-01-30T22:01:21Z',
## 'state' :'completed'
## }
## ]
"""
path = '/users/%s/conferences/%s/members' % (
self.user_id, conference_id)
return get_lazy_enumerator(self, lambda: self._make_request('get', path))
def create_conference_member(self,
conference_id,
call_id=None,
join_tone=None,
leaving_tone=None,
mute=None,
hold=None,
**kwargs):
"""
Create a conference member for a conference
:type conference_id: str
:param conference_id: id of a conference
:param str call_id: The callId must refer to an active call that was created using this conferenceId (required)
:param bool join_tone: If "true", will play a tone when the member joins the conference. \
If "false", no tone is played when the member joins the conference.
:param bool leaving_tone: If "true", will play a tone when the member leaves the conference.\
If "false", no tone is played when the member leaves the conference.
:param bool mute: If "true", member can't speak in the conference.\
If "false", this members can speak in the conference (unless set at the conference level).
:param bool hold: If "true", member can't hear or speak in the conference.\
If "false", member can hear and speak in the conference (unless set at the conference level).
:rtype: str
:returns: id of create of conference member
Example: Create Conference and add member::
my_conf_id = api.create_conference(from_='+19192223333')
print(my_conf)
# conf-confId
my_call_id = api.create_call(from_='+19192223333', to='+19192223334', conference_id= 'conf-confId')
print(my_call_id)
# c-callId
my_conf_member_id = api.create_conference_member(my_conf_id, call_id=my_call_id, join_tone=True)
print(my_conf_member_id)
# member-memberId
my_conf_member = api.get_conference_member(my_conf_id, my_member_id)
print(my_conf_member)
## {
## 'addedTime': '2017-01-30T22:01:11Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' : False,
## 'id' : 'member-memberId',
## 'joinTone' : False,
## 'leavingTone' : False,
## 'mute' : False,
## 'removedTime' : '2017-01-30T22:01:21Z',
## 'state' : 'completed'
## }
"""
kwargs['callId'] = call_id
kwargs['joinTone'] = join_tone
kwargs['leavingTone'] = leaving_tone
kwargs['mute'] = mute
kwargs['hold'] = hold
path = '/users/%s/conferences/%s/members' % (
self.user_id, conference_id)
return self._make_request('post', path, json=kwargs)[2]
def get_conference_member(self, conference_id, member_id):
"""
Get a conference member
:type conference_id: str
:param conference_id: id of a conference
:type member_id: str
:param member_id: id of a member
:rtype: dict
:returns: data of conference member
Example: Create Conference and add member::
my_conf_id = api.create_conference(from_='+19192223333')
print(my_conf)
# conf-confId
my_call_id = api.create_call(from_='+19192223333', to='+19192223334', conference_id= 'conf-confId')
print(my_call_id)
# c-callId
my_conf_member_id = api.create_conference_member(my_conf_id, call_id=my_call_id, join_tone=True)
print(my_conf_member_id)
# member-memberId
my_conf_member = api.get_conference_member(my_conf_id, my_member_id)
print(my_conf_member)
## {
## 'addedTime': '2017-01-30T22:01:11Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' : False,
## 'id' : 'member-memberId',
## 'joinTone' : True,
## 'leavingTone' : False,
## 'mute' : False,
## 'removedTime' : '2017-01-30T22:01:21Z',
## 'state' : 'active'
## }
"""
path = '/users/%s/conferences/%s/members/%s' % (
self.user_id, conference_id, member_id)
return self._make_request('get', path)[0]
def update_conference_member(self,
conference_id,
member_id,
join_tone=None,
leaving_tone=None,
mute=None,
hold=None,
**kwargs):
"""
Update a conference member
:param str conference_id: id of a conference
:param str member_id: id of a conference member
:param bool join_tone: If "true", will play a tone when the member joins the conference. \
If "false", no tone is played when the member joins the conference.
:param bool leaving_tone: If "true", will play a tone when the member leaves the conference. \
If "false", no tone is played when the member leaves the conference.
:param bool mute: If "true", member can't speak in the conference. \
If "false", this members can speak in the conference (unless set at the conference level).
:param bool hold: If "true", member can't hear or speak in the conference. \
If "false", member can hear and speak in the conference (unless set at the conference level).
Example: update conference member::
my_conf_id = api.create_conference(from_='+19192223333')
print(my_conf)
# conf-confId
my_call_id = api.create_call(from_='+19192223333', to='+19192223334', conference_id= 'conf-confId')
print(my_call_id)
# c-callId
my_conf_member_id = api.create_conference_member(my_conf_id, call_id=my_call_id, join_tone=True)
print(my_conf_member_id)
# member-memberId
my_conf_member = api.get_conference_member(my_conf_id, my_member_id)
print(my_conf_member)
## {
## 'addedTime': '2017-01-30T22:01:11Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' : False,
## 'id' : 'member-memberId',
## 'joinTone' : True,
## 'leavingTone' : False,
## 'mute' : False,
## 'removedTime' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.