ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5ca89c8bc34585fafd24cfcea93582f553b564
|
# coding: utf-8
"""
Metal API
This is the API for Equinix Metal. The API allows you to programmatically interact with all of your Equinix Metal resources, including devices, networks, addresses, organizations, projects, and your user account. The official API docs are hosted at <https://metal.equinix.com/developers/api>. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from metal.configuration import Configuration
class VirtualNetworkList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'virtual_networks': 'list[VirtualNetwork]'
}
attribute_map = {
'virtual_networks': 'virtual_networks'
}
def __init__(self, virtual_networks=None, local_vars_configuration=None): # noqa: E501
"""VirtualNetworkList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._virtual_networks = None
self.discriminator = None
if virtual_networks is not None:
self.virtual_networks = virtual_networks
@property
def virtual_networks(self):
"""Gets the virtual_networks of this VirtualNetworkList. # noqa: E501
:return: The virtual_networks of this VirtualNetworkList. # noqa: E501
:rtype: list[VirtualNetwork]
"""
return self._virtual_networks
@virtual_networks.setter
def virtual_networks(self, virtual_networks):
"""Sets the virtual_networks of this VirtualNetworkList.
:param virtual_networks: The virtual_networks of this VirtualNetworkList. # noqa: E501
:type virtual_networks: list[VirtualNetwork]
"""
self._virtual_networks = virtual_networks
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VirtualNetworkList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, VirtualNetworkList):
return True
return self.to_dict() != other.to_dict()
|
py
|
1a5ca9b73f372e693a6a92cb9d9146a1fe9ca506
|
#!/usr/bin/env python
import pcbnew
import csv
import re
import sys
import os
import gerber_drill as gd
import wx
import io
import loadnet
import traceback
reload(sys)
sys.setdefaultencoding("utf8")
import re
patten = re.compile(r'\d+')
def ref_comp(x):
if type(x) == unicode:
x = x.encode('gbk')
if type(x) == str:
t = patten.findall(x)
if len(t)>0:
hh = x.replace(t[0],'')
vv = '0'*(6-len(hh)) + hh + '0'*(6-len(t[0])) + t[0]
return vv
else:
print(t)
else:
print(type(x))
return x
def ref_sorted(iterable, key = None):
return sorted(iterable, key = ref_comp)
def GetExcludeRefs():
f = pcbnew.GetBoard().GetFileName()
delimer = '/'
pos = f.rfind('/')
if pos < 0:
delimer = '\\'
pos = f.rfind('\\')
f = f[0:pos] + delimer + "exclude.txt"
if os.path.exists(f):
file = io.open(f, "r")
return file.read()
return ""
class ExcludeRefClass:
def __init__(self, refs):
self.refNames = {}
self.refPrefix = {}
xx = re.findall(r'([A-Za-z]+[0-9]+)', refs.upper())
for v in xx:
self.refNames[v] = True
xx = re.findall(r'([A-Za-z]+)\*', refs.upper())
for v in xx:
self.refPrefix[v] = True
def contains(self, ref):
if self.refNames.get(ref.upper()):
return True
xx = re.findall(r'[A-Za-z_]+', ref)
if len(xx) > 0:
return self.refPrefix.get(xx[0].upper())
return False
unusedRef = None
class RefBuilder:
''' RefBuilder use to re-build the module referrence number
Step 1: use rb = RefBuilder() to create a RefBuilder object
Step 2: use rb.collect(ref) to collect current exist reference
Step 3: usb newRef = rb.build(oldRef) to build new ref, if oldRef already built
use the last oldRef's new Ref
'''
def __init__(self, init_ref = None):
self.patten = re.compile(r'([a-zA-Z]+)\s*(\d+)')
self.refMap = {}
self.builtMap = {}
if init_ref:
self.refMap = init_ref
def collect(self, ref):
m = self.patten.match(ref)
if m:
if not self.refMap.has_key(m.group(1)):
self.refMap[m.group(1)] = m.group(2)
else:
max = self.refMap[m.group(1)]
if int(m.group(2)) > int(max):
self.refMap[m.group(1)] = m.group(2)
def collects(self, refs):
for ref in refs:
self.collect(ref)
def build(self, oldRef):
m = re.match(r'([a-zA-Z]+)\s*(\d+)',oldRef)
if not m:
print 'Ref is invalid %s'%oldRef
return None
if self.builtMap.has_key(oldRef):
return self.builtMap[oldRef]
newRef = ''
if not self.refMap.has_key(m.group(1)):
self.refMap[m.group(1)] = m.group(2)
newRef = oldRef
else:
max = int(self.refMap[m.group(1)])
max = max + 1
self.refMap[m.group(1)] = str(max)
newRef = m.group(1) + str(max)
self.builtMap[oldRef] = newRef
return newRef
def Show(self):
print self.refMap
def testRefBuilder():
rb = RefBuilder()
rb.collects(['R1','R2','R14', 'R10', 'D1', 'D2', 'U3', 'U2', 'U1'])
rb.Show()
print 'R1 -> %s'%rb.build('R1')
print 'R2 -> %s'%rb.build('R2')
print 'R3 -> %s'%rb.build('R3')
print 'U1 -> %s'%rb.build('U1')
print 'U2 -> %s'%rb.build('U2')
print 'X2 -> %s'%rb.build('X2')
print 'X1 -> %s'%rb.build('X1')
print 'R? -> %s'%rb.build('R?')
print 'R1 -> %s'%rb.build('R1')
print 'R2 -> %s'%rb.build('R2')
print 'X2 -> %s'%rb.build('X2')
rb.Show()
# Get Board Bounding rect by the margin layer element
#def GetBoardArea(brd = None, marginLayer = pcbnew.Margin):
# if not brd:
# brd = pcbnew.GetBoard()
# rect = None
# for dwg in brd.GetDrawings():
# if dwg.GetLayer() == marginLayer:
# box = dwg.GetBoundingBox()
# if rect:
# rect.Merge(box)
# else:
# rect = box
# rect.SetX(rect.GetX() + 100001)
# rect.SetY(rect.GetY() + 100001)
# rect.SetWidth(rect.GetWidth() - 200002)
# rect.SetHeight(rect.GetHeight() - 200002)
# #print rect.GetX(), rect.GetY(), rect.GetWidth(), rect.GetHeight()
# return rect
def GetBoardBound(brd = None, marginLayer = pcbnew.Edge_Cuts):
''' Calculate board edge from the margin layer, the default margin layer is Edge_Cuts
enum all the draw segment on the specified layer, and merge their bound rect
'''
if not brd:
brd = pcbnew.GetBoard()
rect = None
l = None
r = None
t = None
b = None
for dwg in brd.GetDrawings():
if dwg.GetLayer() == marginLayer:
if hasattr(dwg, 'Cast_to_DRAWSEGMENT'):
d = dwg.Cast_to_DRAWSEGMENT()
else:
d = pcbnew.Cast_to_DRAWSEGMENT(dwg)
w = d.GetWidth()
box = d.GetBoundingBox()
box.SetX(box.GetX() + w/2)
box.SetY(box.GetY() + w/2)
box.SetWidth(box.GetWidth() - w)
box.SetHeight(box.GetHeight() - w)
if rect:
rect.Merge(box)
else:
rect = box
w = 2
rect.SetX(rect.GetX() + w/2)
rect.SetY(rect.GetY() + w/2)
rect.SetWidth(rect.GetWidth() - w)
rect.SetHeight(rect.GetHeight() - w)
return rect
def GetOtherBoard(brd):
r = brd
curbrd = pcbnew.GetBoard()
s = curbrd.GetFileName()
if not brd:
brd = curbrd
elif type(brd) == str:
if os.path.exists(brd):
brd = pcbnew.LoadBoard(brd)
elif os.path.exists(s[0:s.rfind('/')] + '/' + brd):
brd = pcbnew.LoadBoard(s[0:s.rfind('/')] + '/' + brd)
else:
return None
else:
return brd
return brd
class BoardItems:
''' Class to hold all interest board items
Use Collect method to get all board items
'''
def __init__(self):
self.rb = RefBuilder()
self.orgItems = []
self.mods = []
self.rect = None
def ItemValid(self, item):
''' Check the item is in the rect or not'''
return item.HitTest(self.rect, False)
def Collect(self, brd = None, rect = None):
''' Collect board items in specify rect'''
brd = GetOtherBoard(brd)
#if not brd:
# brd = pcbnew.GetBoard()
if not rect:
rect = GetBoardBound(brd)
self.rect = rect
for mod in brd.GetModules():
if self.ItemValid(mod):
self.orgItems.append(mod)
self.mods.append(mod)
self.rb.collect(mod.GetReference())
for track in brd.GetTracks():
if self.ItemValid(track):
self.orgItems.append(track)
for dwg in brd.GetDrawings():
if self.ItemValid(dwg):
self.orgItems.append(dwg)
#print dwg.GetLayer()
area_cnt = brd.GetAreaCount()
for i in range(area_cnt):
area = brd.GetArea(i)
if self.ItemValid(area):
self.orgItems.append(area)
self.brd = brd
#self.rb.Show()
def Mirror(self):
rotPt = pcbnew.wxPoint(self.rect.GetX() + self.rect.GetWidth()/2, self.rect.GetY() + self.rect.GetHeight()/2)
for item in self.orgItems:
item.Flip(rotPt)
item.Rotate(rotPt, 1800)
def Rotate(self, angle = 90):
rotPt = pcbnew.wxPoint(self.rect.GetX() + self.rect.GetWidth()/2, self.rect.GetY() + self.rect.GetHeight()/2)
for item in self.orgItems:
item.Rotate(rotPt, angle * 10)
def MoveToMM(self, x, y):
self.MoveTo(pcbnew.wxPointMM(x,y))
def ShowRect(self):
r = '('
r += str(self.rect.GetX()/1000000) + ','
r += str(self.rect.GetY()/1000000) + ','
r += str(self.rect.GetWidth()/1000000) + ','
r += str(self.rect.GetHeight()/1000000) + ')'
return r
def MoveTo(self, pos):
off = pcbnew.wxPoint( pos.x - self.rect.GetX(), pos.y - self.rect.GetY() )
#print 'org is:', self.x, ',', self.y
#print 'off is:', off
for item in self.orgItems:
item.Move(off)
print 'Move item in ', self.ShowRect(), 'off = (', off.x/1000000, ',' ,off.y/1000000,')'
self.rect.Move(off)
print 'Result is ', self.ShowRect()
def Clone(self, brd = None):
if not brd:
brd = self.brd
newBI = BoardItems()
newBI.rect = self.rect
for item in self.orgItems:
newItem = item.Duplicate()
newBI.orgItems.append(newItem)
brd.Add(newItem)
newBI.brd = brd
return newBI
def Remove(self):
for item in self.orgItems:
self.brd.Remove(item)
def UpdateRef(self, rb):
''' Update items reference with specify ref builder'''
for item in self.orgItems:
if isinstance(item,pcbnew.MODULE):
newRef = rb.build(item.GetReference())
if newRef:
item.SetReference(newRef)
def ChangeBrd(self, brd = None):
if not brd:
brd = pcbnew.GetBoard()
if brd == self.brd:
print 'Same board, do nothing'
for item in self.orgItems:
self.brd.Remove(item)
brd.Add(item)
self.brd = brd
def HideValue(self, hide = True):
for m in self.mods:
if hide:
m.Value().SetVisible(False)
else:
m.Value().SetVisible(True)
def test2():
# load board to be panelized
#b1 = pcbnew.LoadBoard(r'test1.kicad_pcb')
b2 = pcbnew.LoadBoard(r'test2.kicad_pcb')
# Get current work borad, must be a empty board
brd = pcbnew.GetBoard()
# Collect items
bi1 = BoardItems()
bi2 = BoardItems()
bi1.Collect(brd)
bi2.Collect(b2)
#bi1 = bi1.Clone(brd)
#bi2 = bi2.Clone(brd)
# Clone items in board 1
bb1 = bi1.Clone()
# Change the module reference
bi2.UpdateRef(bi1.rb)
# Clone items in board 2
bb2 = bi2.Clone()
# Copy board items to current board
#bi1.ChangeBrd(brd)
#bb1.ChangeBrd(brd)
bi2.ChangeBrd(brd)
bb2.ChangeBrd(brd)
# Move them
bi2.MoveToMM(0,0)
bi2.Rotate(180)
bb1.Mirror()
bb2.Rotate(180)
bb2.Mirror()
bb1.MoveToMM(54, -59)
bb2.MoveToMM(54, -59)
def GetPad1(mod):
'''Get the first pad of a module'''
padx = None
for pad in mod.Pads():
if not padx:
padx = pad
if pad.GetPadName() == '1':
return pad
#print 'Pad 1 not found, use the first pad instead'
return padx
def IsSMD(mod):
for pad in mod.Pads():
attr_smd = pcbnew.PAD_SMD if hasattr(pcbnew,'PAD_SMD') else pcbnew.PAD_ATTRIB_SMD
if pad.GetAttribute() != attr_smd:
return False
return True
def footPrintName(mod):
fid = mod.GetFPID()
f = fid.GetFootprintName().Cast_to_CChar() if hasattr(fid, 'GetFootprintName') else fid.GetLibItemName().Cast_to_CChar()
return f
class BOMItem:
def __init__(self, ref, footprint, value, pincount, netList = None):
self.refs = [ref]
self.fp = footprint
self.value = value
self.pincount = pincount
kv = value
#if kv.rfind('[') != -1:
# kv = kv[0:kv.rfind('[')]
self.netKey = kv + "&" + footprint
if not isinstance(self.netKey, unicode):
self.netKey = unicode(self.netKey)
self.partNumber = ""
self.desc = "desc"
self.url = ""
self.libRef = "libref"
if netList:
if netList.has_key(self.netKey):
comp = netList[self.netKey]
if comp.has_key('partNumber'):
self.partNumber = comp['partNumber']
if comp.has_key('description'):
self.desc = comp['description']
if comp.has_key('datasheet'):
self.url = comp['datasheet']
if comp.has_key('comment'):
self.libRef = self.value
self.value = comp['comment']
else:
print "fail to find ", self.netKey, " in net list"
def Output(self, out = None):
refs = ''
for r in ref_sorted(self.refs):
refs += r + ','
if not out:
out = csv.writer(sys.stdout, lineterminator='\n', delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)
out.writerow([self.value, self.desc, refs, self.fp, str(len(self.refs)), self.partNumber])
def AddRef(self, ref):
self.refs.append(ref)
self.refs = ref_sorted(self.refs)
def OutputBOMHeader(out = None):
if not out:
out = csv.writer(sys.stdout, lineterminator='\n', delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)
out.writerow(['Comment','Description','Designator','Footprint','Quantity','PartNumber'])
def IsModExclude(mod, ExcludeRefs = [], ExcludeValues = []):
r = mod.GetReference()
v = mod.GetValue()
for pat in ExcludeRefs:
if pat.match(r):
return True
for pat in ExcludeValues:
if pat.match(v):
return True
return False
removedRefs = {}
def GenBOM(brd = None, layer = pcbnew.F_Cu, type = 1, ExcludeRefs = [], ExcludeValues = [], netList = None):
if not brd:
brd = pcbnew.GetBoard()
bomList = {}
for mod in brd.GetModules():
needOutput = False
needRemove = False
if unusedRef:
needRemove = unusedRef.contains(mod.GetReference())
if needRemove:
global removedRefs
removedRefs[mod.GetReference()] = mod.GetValue()
if (mod.GetLayer() == layer) and (not IsModExclude(mod, ExcludeRefs, ExcludeValues) and (not needRemove)):
needOutput = IsSMD(mod) == (type == 1)
if needOutput:
v = mod.GetValue()
f = footPrintName(mod)
r = mod.GetReference()
vf = v + f
if bomList.has_key(vf):
bomList[vf].AddRef(r)
else:
bomList[vf] = BOMItem(r,f,v, mod.GetPadCount(), netList)
print 'there are ', len(bomList), ' items at layer ', layer
return sorted(bomList.values(), key = lambda item: ref_comp(item.refs[0]))
def layerName(layerId):
if layerId == pcbnew.F_Cu:
return 'T'
if layerId == pcbnew.B_Cu:
return 'B'
return 'X'
def toMM(v):
return str(v/1000000.0) + 'mm'
class POSItem:
def __init__(self, mod, offx = 0, offy = 0):
self.MidX = toMM(mod.GetPosition().x-offx)
self.MidY = toMM(offy - mod.GetPosition().y)
self.RefX = toMM(mod.GetPosition().x-offx)
self.RefY = toMM(offy - mod.GetPosition().y)
pad = GetPad1(mod)
if pad:
self.PadX = toMM(pad.GetPosition().x-offx)
self.PadY = toMM(offy - pad.GetPosition().y)
else:
print 'Pad1 not found for mod'
self.PadX = self.MidX
self.PadY = self.MidY
self.rot = int(mod.GetOrientation()/10)
self.ref = mod.GetReference()
self.val = mod.GetValue()
self.layer = layerName(mod.GetLayer())
self.fp = footPrintName(mod)
def Output(self, out = None):
if not out:
out = csv.writer(sys.stdout, lineterminator='\n', delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)
out.writerow([self.ref, self.fp, str(self.MidX), str(self.MidY),
str(self.RefX), str(self.RefY), str(self.PadX), str(self.PadY),
self.layer, str(self.rot), self.val])
def GenPos(brd = None, layer = pcbnew.F_Cu, type = 1, ExcludeRefs = [], ExcludeValues = []):
if not brd:
brd = pcbnew.GetBoard()
posList = []
pt_org = brd.GetAuxOrigin()
for mod in brd.GetModules():
needOutput = False
if (mod.GetLayer() == layer) and (not IsModExclude(mod, ExcludeRefs, ExcludeValues)):
needOutput = IsSMD(mod) == (type == 1)
if needOutput:
posList.append(POSItem(mod, pt_org.x, pt_org.y))
posList = sorted(posList, key = lambda item: ref_comp(item.ref))
return posList
def OutputPosHeader(out = None):
if not out:
out = csv.writer(sys.stdout, lineterminator='\n', delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)
out.writerow(['Designator','Footprint','Mid X','Mid Y','Ref X','Ref Y','Pad X','Pad Y','Layer','Rotation','Comment'])
def PrintBOM(boms):
OutputBOMHeader()
i = 1
for bom in boms:
print 'BOM items for BOM', i
i = i + 1
for k,v in bom.items():
v.Output()
def PrintPOS(Poses):
OutputPosHeader()
i = 1
for pos in Poses:
print 'Pos items ', i
i = i+ 1
for v in pos:
v.Output()
def CollectItemByName(filename = None):
try:
brd = pcbnew.LoadBoard(filename)
except IOError:
print 'Can not open ', filename
filename = os.path.split(pcbnew.GetBoard().GetFileName())[0] + '\\' + filename
print 'Try to open ', filename
try:
brd = pcbnew.LoadBoard(filename)
except IOError:
print 'Can not open ', filename
return None
bi = BoardItems()
bi.Collect(brd)
return bi
def CollectItem(brd = None):
if not brd:
brd = pcbnew.GetBoard()
bi = BoardItems()
bi.Collect(brd)
return bi
def CopyItemTo(boardItem, x, y):
newBI = boardItem.Clone()
newBI.MoveToMM(x, y)
return newBI
def MirrorItemTo(boardItem, x, y):
newBI = boardItem.Clone()
newBI.MoveToMM(x, y)
newBI.Mirror()
return newBI
class UnicodeWriter:
def __init__(self, file, *a, **kw):
self.file = file
def writerow(self, data):
for e in data:
self.file.write(u'"')
#print isinstance(e, unicode)
if not isinstance(e, unicode):
self.file.write(unicode(e))
else:
self.file.write(e)
self.file.write(u'",')
self.file.write(u'\n')
def OpenCSV(fileName):
try:
f = io.open(fileName, 'w+', encoding="utf-8")
except IOError:
e = "Can't open output file for writing: " + fileName
print( __file__, ":", e, sys.stderr )
f = sys.stdout
#out = csv.writer( f, lineterminator='\n', delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL )
out = UnicodeWriter(f)
return out
def PreCompilePattenList(pattenList):
res = []
for pat in pattenList:
res.append(re.compile(pat))
return res
def def_logger(*args):
r = ""
for t in args:
r = r + str(t) + " "
print r
def GenMFDoc(SplitTopAndBottom = False, ExcludeRef = [], ExcludeValue = [], brd = None, needGenBOM = True, needGenPos = True, logger = def_logger):
if not brd:
brd = pcbnew.GetBoard()
if not needGenBOM and not needGenPos:
return
bound = GetBoardBound(brd)
org_pt = pcbnew.wxPoint( bound.GetLeft(), bound.GetBottom())
brd.SetAuxOrigin(org_pt)
logger("set board aux origin to left bottom point, at", org_pt)
fName = brd.GetFileName()
path = os.path.split(fName)[0]
fName = os.path.split(fName)[1]
bomName = fName.rsplit('.',1)[0]
netList = loadnet.loadNet(brd)
excludeRefs = PreCompilePattenList(ExcludeRef)
excludeValues = PreCompilePattenList(ExcludeValue)
bomSMDTop = GenBOM(brd, pcbnew.F_Cu, 1, excludeRefs, excludeValues, netList)
bomHoleTop = GenBOM(brd, pcbnew.F_Cu, 0, excludeRefs, excludeValues, netList)
bomSMDBot = GenBOM(brd, pcbnew.B_Cu, 1, excludeRefs, excludeValues, netList)
bomHoleBot = GenBOM(brd, pcbnew.B_Cu, 0, excludeRefs, excludeValues, netList)
posSMDTop = GenPos(brd, pcbnew.F_Cu, 1, excludeRefs, excludeValues)
posHoleTop = GenPos(brd, pcbnew.F_Cu, 0, excludeRefs, excludeValues)
posSMDBot = GenPos(brd, pcbnew.B_Cu, 1, excludeRefs, excludeValues)
posHoleBot = GenPos(brd, pcbnew.B_Cu, 0, excludeRefs, excludeValues)
if SplitTopAndBottom:
fName = bomName
bomName = path + '/' + fName + '_BOM_TOP.csv'
posName = path + '/' + fName + '_POS_TOP.csv'
if needGenBOM:
# Generate BOM for Top layer
logger('Genertate BOM file ', bomName)
csv = OpenCSV(bomName)
OutputBOMHeader(csv)
for v in bomSMDTop:
v.Output(csv)
if len(bomHoleTop)>0:
csv.writerow(['Through Hole Items '])
for v in bomHoleTop:
v.Output(csv)
if needGenPos:
# Generate POS for Top layer
logger('Genertate POS file ', posName)
csv = OpenCSV(posName)
OutputPosHeader(csv)
for v in posSMDTop:
v.Output(csv)
if len(posHoleTop)>0:
csv.writerow(['Through Hole Items '])
for v in posHoleTop:
v.Output(csv)
bomName = path + '/' + fName + '_BOM_BOT.csv'
posName = path + '/' + fName + '_POS_BOT.csv'
if needGenBOM:
# Generate BOM for Bottom layer
logger('Genertate BOM file ', bomName)
csv = OpenCSV(bomName)
OutputBOMHeader(csv)
for v in bomSMDBot:
v.Output(csv)
if len(bomHoleBot)>0:
csv.writerow(['Through Hole Items '])
for v in bomHoleBot:
v.Output(csv)
if needGenPos:
# Generate POS for Bottom layer
logger('Genertate POS file ', posName)
csv = OpenCSV(posName)
OutputPosHeader(csv)
for v in posSMDBot:
v.Output(csv)
if len(posHoleBot)>0:
csv.writerow(['Through Hole Items '])
for v in posHoleBot:
v.Output(csv)
else:
posName = path + '/' + bomName + '_POS.csv'
bomName = path + '/' + bomName + '_BOM.csv'
# Generate BOM for both layer
if needGenBOM:
logger('Genertate BOM file ', bomName)
csv = OpenCSV(bomName)
OutputBOMHeader(csv)
for v in bomSMDTop:
v.Output(csv)
for v in bomSMDBot:
v.Output(csv)
if len(bomHoleTop)+len(bomHoleBot)>0:
csv.writerow(['Through Hole Items '])
for v in bomHoleTop:
v.Output(csv)
for v in bomHoleBot:
v.Output(csv)
if needGenPos:
# Generate POS for both layer
logger('Genertate POS file ', posName)
csv = OpenCSV(posName)
OutputPosHeader(csv)
for v in posSMDTop:
v.Output(csv)
for v in posSMDBot:
v.Output(csv)
if len(posHoleTop)+len(posHoleBot)>0:
csv.writerow(['Through Hole Items '])
for v in posHoleTop:
v.Output(csv)
for v in posHoleBot:
v.Output(csv)
return bomName, posName
def version():
print "1.1"
def GenSMTFiles():
#reload(sys)
#sys.setdefaultencoding("utf8")
GenMFDoc()
gd.GenGerberDrill(board = None, split_G85 = 0.2, plotDir = "gerber/")
class MFDialog(wx.Dialog):
def __init__(self):
wx.Dialog.__init__(self, None, -1, 'Generate Manufacture docs', size=(800, 430))
self.chkBOM = wx.CheckBox(self, label = "BOM List", pos = (15, 10))
self.chkPos = wx.CheckBox(self, label = "Positon File ", pos = (15, 30))
self.chkGerber = wx.CheckBox(self, label = "Gerber Files", pos = (15, 50))
self.chkPlotRef = wx.CheckBox(self, label = "Plot Reference", pos = (130, 50))
self.chkSplitSlot = wx.CheckBox(self, label = "Split Slot", pos = (280, 50))
self.chkBOM.SetValue(True)
self.chkPos.SetValue(True)
self.chkGerber.SetValue(True)
self.chkPlotRef.SetValue(True)
self.chkSplitSlot.SetValue(False)
self.static_text = wx.StaticText(self, -1, 'Log:', style=wx.ALIGN_CENTER, pos = (15, 90))
self.area_text = wx.TextCtrl(self, -1, '', size=(770, 280), pos = (15, 110),
style=(wx.TE_MULTILINE | wx.TE_AUTO_SCROLL | wx.TE_DONTWRAP| wx.TE_READONLY))
self.static_text1 = wx.StaticText(self, -1, 'Exclude Refs:', style=wx.ALIGN_CENTER, pos = (15, 70))
self.exclude_ref_text = wx.TextCtrl(self, -1, '', size=(670, 25), pos = (100, 70))
self.btnGen = wx.Button(self, label = "Generate Manufacture Docs", pos=(400, 30))
self.Bind(wx.EVT_BUTTON, self.Onclick, self.btnGen)
self.btnClearLog = wx.Button(self, label = "Clear Log", pos=(700, 30))
self.Bind(wx.EVT_BUTTON, self.ClearLog, self.btnClearLog)
self.exclude_ref_text.Clear()
self.exclude_ref_text.AppendText(GetExcludeRefs())
#okButton = wx.Button(self, wx.ID_OK, "OK", pos=(15, 100))
#okButton.SetDefault()
#cancelButton = wx.Button(self, wx.ID_CANCEL, "Cancel", pos=(200, 150))
def log(self, *args):
for v in args:
try:
self.area_text.AppendText(str(v) + " ")
except Exception as e:
try:
self.area_text.AppendText(v + " ")
except Exception as e1:
self.area_text.AppendText("\nError:\nfail to log content ")
self.area_text.AppendText(traceback.format_exc())
self.area_text.AppendText("\n")
def ClearLog(self, e):
self.area_text.SetValue("")
def Onclick(self, e):
try:
if self.chkBOM.GetValue():
self.area_text.AppendText("Start generate BOM list\n")
if self.chkPos.GetValue():
self.area_text.AppendText("Start generate position file\n")
global unusedRef
unusedRef = ExcludeRefClass(self.exclude_ref_text.GetValue())
global removedRefs
removedRefs = {}
GenMFDoc(needGenBOM = self.chkBOM.GetValue(), needGenPos = self.chkPos.GetValue(), logger = lambda *args: self.log(*args) )
#self.area_text.AppendText("Removed refs in BOM: " + ",".join(ref_sorted(removedRefs.keys())) + "\n")
self.area_text.AppendText("Removed refs in BOM:\n")
for n in ref_sorted(removedRefs.keys()):
self.area_text.AppendText(n+":" + removedRefs[n] + "\n")
if self.chkGerber.GetValue():
self.area_text.AppendText("Start generate gerber files\n")
split_slot = None
if self.chkSplitSlot.GetValue():
split_slot = 0.2
gerberPath = gd.GenGerberDrill(
board = None,
split_G85 = split_slot,
plotDir = "gerber/",
plotReference = self.chkPlotRef.GetValue(),
logger = lambda *args: self.log(*args))
self.area_text.AppendText( 'Gerber file dir is "%s"' % gerberPath)
except Exception as e:
self.area_text.AppendText("Error:\n")
self.area_text.AppendText(traceback.format_exc())
class gen_mf_doc( pcbnew.ActionPlugin ):
"""
gen_mf_doc: A plugin used to generate BOM and position file
How to use:
- just call the plugin
- the BOM and Position file will generate under the PCB file folder
BOM file name is <pcb file name>_BOM.csv
Position file name is <pcb file name>_POS.csv
- the Gerber and drill file will generate under gerber folder
"""
def defaults( self ):
"""
Method defaults must be redefined
self.name should be the menu label to use
self.category should be the category (not yet used)
self.description should be a comprehensive description
of the plugin
"""
self.name = "Gen Manufacture Docs"
#self.category = "Modify PCB"
self.description = "Automatically generate manufacture document, Gerber, Drill, BOM, Position"
self.icon_file_name = os.path.join(os.path.dirname(__file__), "./mf_tool.png")
self.show_toolbar_button = True
def Run( self ):
tt = MFDialog()
tt.Show()
gen_mf_doc().register()
|
py
|
1a5caa77aec7b94afb2f493a3e69d38174106212
|
import pytest
from _mock_data.xpath.method_2 import exception_handling_for_methods_with_3_arguments_or_more
from _mock_data.xpath.test_set_2 import XPATH_TEST_SET_W3SCHOOLS_COM_INPUT
from browserist import Browser
from browserist.browser.select.input_field import select_input_field
from browserist.constant import timeout
from browserist.model.type.callable import BrowserMethodWith3ArgumentsCallable
@pytest.mark.parametrize("method", [
select_input_field,
])
def test_xpath_exception_handling_for_select_methods(
browser_default_headless: Browser,
method: BrowserMethodWith3ArgumentsCallable,
) -> None:
exception_handling_for_methods_with_3_arguments_or_more(
browser_default_headless, method, timeout.VERY_SHORT, test_set=XPATH_TEST_SET_W3SCHOOLS_COM_INPUT)
|
py
|
1a5caa8f2e744f4da2ca55656efa32ac61c29fb7
|
"""
Symmetric Functions in Non-Commuting Variables
AUTHORS:
- Travis Scrimshaw (08-04-2013): Initial version
"""
# ****************************************************************************
# Copyright (C) 2013 Travis Scrimshaw <tscrim at ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.misc.misc_c import prod
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.categories.graded_hopf_algebras import GradedHopfAlgebras
from sage.categories.rings import Rings
from sage.categories.fields import Fields
from sage.arith.misc import factorial
from sage.combinat.free_module import CombinatorialFreeModule
from sage.combinat.ncsym.bases import NCSymBases, MultiplicativeNCSymBases, NCSymBasis_abstract
from sage.combinat.set_partition import SetPartitions
from sage.combinat.set_partition_ordered import OrderedSetPartitions
from sage.combinat.posets.posets import Poset
from sage.combinat.sf.sf import SymmetricFunctions
from sage.matrix.matrix_space import MatrixSpace
from sage.sets.set import Set
from sage.rings.integer_ring import ZZ
from functools import reduce
def matchings(A, B):
"""
Iterate through all matchings of the sets `A` and `B`.
EXAMPLES::
sage: from sage.combinat.ncsym.ncsym import matchings
sage: list(matchings([1, 2, 3], [-1, -2]))
[[[1], [2], [3], [-1], [-2]],
[[1], [2], [3, -1], [-2]],
[[1], [2], [3, -2], [-1]],
[[1], [2, -1], [3], [-2]],
[[1], [2, -1], [3, -2]],
[[1], [2, -2], [3], [-1]],
[[1], [2, -2], [3, -1]],
[[1, -1], [2], [3], [-2]],
[[1, -1], [2], [3, -2]],
[[1, -1], [2, -2], [3]],
[[1, -2], [2], [3], [-1]],
[[1, -2], [2], [3, -1]],
[[1, -2], [2, -1], [3]]]
"""
lst_A = list(A)
lst_B = list(B)
# Handle corner cases
if not lst_A:
if not lst_B:
yield []
else:
yield [[b] for b in lst_B]
return
if not lst_B:
yield [[a] for a in lst_A]
return
rem_A = lst_A[:]
a = rem_A.pop(0)
for m in matchings(rem_A, lst_B):
yield [[a]] + m
for i in range(len(lst_B)):
rem_B = lst_B[:]
b = rem_B.pop(i)
for m in matchings(rem_A, rem_B):
yield [[a, b]] + m
def nesting(la, nu):
r"""
Return the nesting number of ``la`` inside of ``nu``.
If we consider a set partition `A` as a set of arcs `i - j` where `i`
and `j` are in the same part of `A`. Define
.. MATH::
\operatorname{nst}_{\lambda}^{\nu} = \#\{ i < j < k < l \mid
i - l \in \nu, j - k \in \lambda \},
and this corresponds to the number of arcs of `\lambda` strictly
contained inside of `\nu`.
EXAMPLES::
sage: from sage.combinat.ncsym.ncsym import nesting
sage: nu = SetPartition([[1,4], [2], [3]])
sage: mu = SetPartition([[1,4], [2,3]])
sage: nesting(set(mu).difference(nu), nu)
1
::
sage: lst = list(SetPartitions(4))
sage: d = {}
sage: for i, nu in enumerate(lst):
....: for mu in nu.coarsenings():
....: if set(nu.arcs()).issubset(mu.arcs()):
....: d[i, lst.index(mu)] = nesting(set(mu).difference(nu), nu)
sage: matrix(d)
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 1 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
"""
arcs = []
for p in nu:
p = sorted(p)
arcs += [(p[i], p[i+1]) for i in range(len(p)-1)]
nst = 0
for p in la:
p = sorted(p)
for i in range(len(p)-1):
for a in arcs:
if a[0] >= p[i]:
break
if p[i+1] < a[1]:
nst += 1
return nst
class SymmetricFunctionsNonCommutingVariables(UniqueRepresentation, Parent):
r"""
Symmetric functions in non-commutative variables.
The ring of symmetric functions in non-commutative variables,
which is not to be confused with the :class:`non-commutative symmetric
functions<NonCommutativeSymmetricFunctions>`, is the ring of all
bounded-degree noncommutative power series in countably many
indeterminates (i.e., elements in
`R \langle \langle x_1, x_2, x_3, \ldots \rangle \rangle` of bounded
degree) which are invariant with respect to the action of the
symmetric group `S_{\infty}` on the indices of the indeterminates.
It can be regarded as a direct limit over all `n \to \infty` of rings
of `S_n`-invariant polynomials in `n` non-commuting variables
(that is, `S_n`-invariant elements of `R\langle x_1, x_2, \ldots, x_n \rangle`).
This ring is implemented as a Hopf algebra whose basis elements are
indexed by set partitions.
Let `A = \{A_1, A_2, \ldots, A_r\}` be a set partition of the integers
`[k] := \{ 1, 2, \ldots, k \}`. This partition `A` determines an
equivalence relation `\sim_A` on `[k]`, which has `c \sim_A d` if and
only if `c` and `d` are in the same part `A_j` of `A`.
The monomial basis element `\mathbf{m}_A` indexed by `A` is the sum of
monomials `x_{i_1} x_{i_2} \cdots x_{i_k}` such that `i_c = i_d` if
and only if `c \sim_A d`.
The `k`-th graded component of the ring of symmetric functions in
non-commutative variables has its dimension equal to the number of
set partitions of `[k]`. (If we work, instead, with finitely many --
say, `n` -- variables, then its dimension is equal to the number of
set partitions of `[k]` where the number of parts is at most `n`.)
.. NOTE::
All set partitions are considered standard (i.e., set partitions
of `[n]` for some `n`) unless otherwise stated.
REFERENCES:
.. [BZ05] \N. Bergeron, M. Zabrocki. *The Hopf algebra of symmetric
functions and quasisymmetric functions in non-commutative variables
are free and cofree*. (2005). :arxiv:`math/0509265v3`.
.. [BHRZ06] \N. Bergeron, C. Hohlweg, M. Rosas, M. Zabrocki.
*Grothendieck bialgebras, partition lattices, and symmetric
functions in noncommutative variables*. Electronic Journal of
Combinatorics. **13** (2006).
.. [RS06] \M. Rosas, B. Sagan. *Symmetric functions in noncommuting
variables*. Trans. Amer. Math. Soc. **358** (2006). no. 1, 215-232.
:arxiv:`math/0208168`.
.. [BRRZ08] \N. Bergeron, C. Reutenauer, M. Rosas, M. Zabrocki.
*Invariants and coinvariants of the symmetric group in noncommuting
variables*. Canad. J. Math. **60** (2008). 266-296.
:arxiv:`math/0502082`
.. [BT13] \N. Bergeron, N. Thiem. *A supercharacter table decomposition
via power-sum symmetric functions*. Int. J. Algebra Comput. **23**,
763 (2013). :doi:`10.1142/S0218196713400171`. :arxiv:`1112.4901`.
EXAMPLES:
We begin by first creating the ring of `NCSym` and the bases that are
analogues of the usual symmetric functions::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: m = NCSym.m()
sage: e = NCSym.e()
sage: h = NCSym.h()
sage: p = NCSym.p()
sage: m
Symmetric functions in non-commuting variables over the Rational Field in the monomial basis
The basis is indexed by set partitions, so we create a few elements and
convert them between these bases::
sage: elt = m(SetPartition([[1,3],[2]])) - 2*m(SetPartition([[1],[2]])); elt
-2*m{{1}, {2}} + m{{1, 3}, {2}}
sage: e(elt)
1/2*e{{1}, {2, 3}} - 2*e{{1, 2}} + 1/2*e{{1, 2}, {3}} - 1/2*e{{1, 2, 3}} - 1/2*e{{1, 3}, {2}}
sage: h(elt)
-4*h{{1}, {2}} - 2*h{{1}, {2}, {3}} + 1/2*h{{1}, {2, 3}} + 2*h{{1, 2}}
+ 1/2*h{{1, 2}, {3}} - 1/2*h{{1, 2, 3}} + 3/2*h{{1, 3}, {2}}
sage: p(elt)
-2*p{{1}, {2}} + 2*p{{1, 2}} - p{{1, 2, 3}} + p{{1, 3}, {2}}
sage: m(p(elt))
-2*m{{1}, {2}} + m{{1, 3}, {2}}
sage: elt = p(SetPartition([[1,3],[2]])) - 4*p(SetPartition([[1],[2]])) + 2; elt
2*p{} - 4*p{{1}, {2}} + p{{1, 3}, {2}}
sage: e(elt)
2*e{} - 4*e{{1}, {2}} + e{{1}, {2}, {3}} - e{{1, 3}, {2}}
sage: m(elt)
2*m{} - 4*m{{1}, {2}} - 4*m{{1, 2}} + m{{1, 2, 3}} + m{{1, 3}, {2}}
sage: h(elt)
2*h{} - 4*h{{1}, {2}} - h{{1}, {2}, {3}} + h{{1, 3}, {2}}
sage: p(m(elt))
2*p{} - 4*p{{1}, {2}} + p{{1, 3}, {2}}
There is also a shorthand for creating elements. We note that we must use
``p[[]]`` to create the empty set partition due to python's syntax. ::
sage: eltm = m[[1,3],[2]] - 3*m[[1],[2]]; eltm
-3*m{{1}, {2}} + m{{1, 3}, {2}}
sage: elte = e[[1,3],[2]]; elte
e{{1, 3}, {2}}
sage: elth = h[[1,3],[2,4]]; elth
h{{1, 3}, {2, 4}}
sage: eltp = p[[1,3],[2,4]] + 2*p[[1]] - 4*p[[]]; eltp
-4*p{} + 2*p{{1}} + p{{1, 3}, {2, 4}}
There is also a natural projection to the usual symmetric functions by
letting the variables commute. This projection map preserves the product
and coproduct structure. We check that Theorem 2.1 of [RS06]_ holds::
sage: Sym = SymmetricFunctions(QQ)
sage: Sm = Sym.m()
sage: Se = Sym.e()
sage: Sh = Sym.h()
sage: Sp = Sym.p()
sage: eltm.to_symmetric_function()
-6*m[1, 1] + m[2, 1]
sage: Sm(p(eltm).to_symmetric_function())
-6*m[1, 1] + m[2, 1]
sage: elte.to_symmetric_function()
2*e[2, 1]
sage: Se(h(elte).to_symmetric_function())
2*e[2, 1]
sage: elth.to_symmetric_function()
4*h[2, 2]
sage: Sh(m(elth).to_symmetric_function())
4*h[2, 2]
sage: eltp.to_symmetric_function()
-4*p[] + 2*p[1] + p[2, 2]
sage: Sp(e(eltp).to_symmetric_function())
-4*p[] + 2*p[1] + p[2, 2]
"""
def __init__(self, R):
"""
Initialize ``self``.
EXAMPLES::
sage: NCSym1 = SymmetricFunctionsNonCommutingVariables(FiniteField(23))
sage: NCSym2 = SymmetricFunctionsNonCommutingVariables(Integers(23))
sage: TestSuite(SymmetricFunctionsNonCommutingVariables(QQ)).run()
"""
# change the line below to assert(R in Rings()) once MRO issues from #15536, #15475 are resolved
assert(R in Fields() or R in Rings()) # side effect of this statement assures MRO exists for R
self._base = R # Won't be needed once CategoryObject won't override base_ring
category = GradedHopfAlgebras(R) # TODO: .Cocommutative()
Parent.__init__(self, category = category.WithRealizations())
def _repr_(self):
r"""
EXAMPLES::
sage: SymmetricFunctionsNonCommutingVariables(ZZ)
Symmetric functions in non-commuting variables over the Integer Ring
"""
return "Symmetric functions in non-commuting variables over the %s"%self.base_ring()
def a_realization(self):
r"""
Return the realization of the powersum basis of ``self``.
OUTPUT:
- The powersum basis of symmetric functions in non-commuting variables.
EXAMPLES::
sage: SymmetricFunctionsNonCommutingVariables(QQ).a_realization()
Symmetric functions in non-commuting variables over the Rational Field in the powersum basis
"""
return self.powersum()
_shorthands = tuple(['chi', 'cp', 'm', 'e', 'h', 'p', 'rho', 'x'])
def dual(self):
r"""
Return the dual Hopf algebra of the symmetric functions in
non-commuting variables.
EXAMPLES::
sage: SymmetricFunctionsNonCommutingVariables(QQ).dual()
Dual symmetric functions in non-commuting variables over the Rational Field
"""
from sage.combinat.ncsym.dual import SymmetricFunctionsNonCommutingVariablesDual
return SymmetricFunctionsNonCommutingVariablesDual(self.base_ring())
class monomial(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the monomial basis.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: m = NCSym.m()
sage: m[[1,3],[2]]*m[[1,2]]
m{{1, 3}, {2}, {4, 5}} + m{{1, 3}, {2, 4, 5}} + m{{1, 3, 4, 5}, {2}}
sage: m[[1,3],[2]].coproduct()
m{} # m{{1, 3}, {2}} + m{{1}} # m{{1, 2}} + m{{1, 2}} # m{{1}} + m{{1,
3}, {2}} # m{}
"""
def __init__(self, NCSym):
"""
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: TestSuite(NCSym.m()).run()
"""
CombinatorialFreeModule.__init__(self, NCSym.base_ring(), SetPartitions(),
prefix='m', bracket=False,
category=NCSymBases(NCSym))
@cached_method
def _m_to_p_on_basis(self, A):
r"""
Return `\mathbf{m}_A` in terms of the powersum basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the powersum basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: m = NCSym.m()
sage: all(m(m._m_to_p_on_basis(A)) == m[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
def lt(s, t):
if s == t:
return False
for p in s:
if len([z for z in t if z.intersection(p)]) != 1:
return False
return True
p = self.realization_of().p()
P = Poset((A.coarsenings(), lt))
R = self.base_ring()
return p._from_dict({B: R(P.moebius_function(A, B)) for B in P})
@cached_method
def _m_to_cp_on_basis(self, A):
r"""
Return `\mathbf{m}_A` in terms of the `\mathbf{cp}` basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{cp}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: m = NCSym.m()
sage: all(m(m._m_to_cp_on_basis(A)) == m[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
cp = self.realization_of().cp()
arcs = set(A.arcs())
R = self.base_ring()
return cp._from_dict({B: R((-1)**len(set(B.arcs()).difference(A.arcs())))
for B in A.coarsenings() if arcs.issubset(B.arcs())},
remove_zeros=False)
def from_symmetric_function(self, f):
r"""
Return the image of the symmetric function ``f`` in ``self``.
This is performed by converting to the monomial basis and
extending the method :meth:`sum_of_partitions` linearly. This is a
linear map from the symmetric functions to the symmetric functions
in non-commuting variables that does not preserve the product or
coproduct structure of the Hopf algebra.
.. SEEALSO:: :meth:`~Element.to_symmetric_function`
INPUT:
- ``f`` -- an element of the symmetric functions
OUTPUT:
- An element of the `\mathbf{m}` basis
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).m()
sage: mon = SymmetricFunctions(QQ).m()
sage: elt = m.from_symmetric_function(mon[2,1,1]); elt
1/12*m{{1}, {2}, {3, 4}} + 1/12*m{{1}, {2, 3}, {4}} + 1/12*m{{1}, {2, 4}, {3}}
+ 1/12*m{{1, 2}, {3}, {4}} + 1/12*m{{1, 3}, {2}, {4}} + 1/12*m{{1, 4}, {2}, {3}}
sage: elt.to_symmetric_function()
m[2, 1, 1]
sage: e = SymmetricFunctionsNonCommutingVariables(QQ).e()
sage: elm = SymmetricFunctions(QQ).e()
sage: e(m.from_symmetric_function(elm[4]))
1/24*e{{1, 2, 3, 4}}
sage: h = SymmetricFunctionsNonCommutingVariables(QQ).h()
sage: hom = SymmetricFunctions(QQ).h()
sage: h(m.from_symmetric_function(hom[4]))
1/24*h{{1, 2, 3, 4}}
sage: p = SymmetricFunctionsNonCommutingVariables(QQ).p()
sage: pow = SymmetricFunctions(QQ).p()
sage: p(m.from_symmetric_function(pow[4]))
p{{1, 2, 3, 4}}
sage: p(m.from_symmetric_function(pow[2,1]))
1/3*p{{1}, {2, 3}} + 1/3*p{{1, 2}, {3}} + 1/3*p{{1, 3}, {2}}
sage: p([[1,2]])*p([[1]])
p{{1, 2}, {3}}
Check that `\chi \circ \widetilde{\chi}` is the identity on `Sym`::
sage: all(m.from_symmetric_function(pow(la)).to_symmetric_function() == pow(la)
....: for la in Partitions(4))
True
"""
m = SymmetricFunctions(self.base_ring()).m()
return self.sum([c * self.sum_of_partitions(i) for i,c in m(f)])
def dual_basis(self):
r"""
Return the dual basis to the monomial basis.
OUTPUT:
- the `\mathbf{w}` basis of the dual Hopf algebra
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).m()
sage: m.dual_basis()
Dual symmetric functions in non-commuting variables over the Rational Field in the w basis
"""
return self.realization_of().dual().w()
def duality_pairing(self, x, y):
r"""
Compute the pairing between an element of ``self`` and an element
of the dual.
INPUT:
- ``x`` -- an element of symmetric functions in non-commuting
variables
- ``y`` -- an element of the dual of symmetric functions in
non-commuting variables
OUTPUT:
- an element of the base ring of ``self``
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: m = NCSym.m()
sage: w = m.dual_basis()
sage: matrix([[m(A).duality_pairing(w(B)) for A in SetPartitions(3)] for B in SetPartitions(3)])
[1 0 0 0 0]
[0 1 0 0 0]
[0 0 1 0 0]
[0 0 0 1 0]
[0 0 0 0 1]
sage: (m[[1,2],[3]] + 3*m[[1,3],[2]]).duality_pairing(2*w[[1,3],[2]] + w[[1,2,3]] + 2*w[[1,2],[3]])
8
"""
x = self(x)
y = self.dual_basis()(y)
return sum(coeff * y[I] for (I, coeff) in x)
def product_on_basis(self, A, B):
r"""
The product on monomial basis elements.
The product of the basis elements indexed by two set partitions `A`
and `B` is the sum of the basis elements indexed by set partitions
`C` such that `C \wedge ([n] | [k]) = A | B` where `n = |A|`
and `k = |B|`. Here `A \wedge B` is the infimum of `A` and `B`
and `A | B` is the
:meth:`SetPartition.pipe` operation.
Equivalently we can describe all `C` as matchings between the
parts of `A` and `B` where if `a \in A` is matched
with `b \in B`, we take `a \cup b` instead of `a` and `b` in `C`.
INPUT:
- ``A``, ``B`` -- set partitions
OUTPUT:
- an element of the `\mathbf{m}` basis
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).monomial()
sage: A = SetPartition([[1], [2,3]])
sage: B = SetPartition([[1], [3], [2,4]])
sage: m.product_on_basis(A, B)
m{{1}, {2, 3}, {4}, {5, 7}, {6}} + m{{1}, {2, 3, 4}, {5, 7}, {6}}
+ m{{1}, {2, 3, 5, 7}, {4}, {6}} + m{{1}, {2, 3, 6}, {4}, {5, 7}}
+ m{{1, 4}, {2, 3}, {5, 7}, {6}} + m{{1, 4}, {2, 3, 5, 7}, {6}}
+ m{{1, 4}, {2, 3, 6}, {5, 7}} + m{{1, 5, 7}, {2, 3}, {4}, {6}}
+ m{{1, 5, 7}, {2, 3, 4}, {6}} + m{{1, 5, 7}, {2, 3, 6}, {4}}
+ m{{1, 6}, {2, 3}, {4}, {5, 7}} + m{{1, 6}, {2, 3, 4}, {5, 7}}
+ m{{1, 6}, {2, 3, 5, 7}, {4}}
sage: B = SetPartition([[1], [2]])
sage: m.product_on_basis(A, B)
m{{1}, {2, 3}, {4}, {5}} + m{{1}, {2, 3, 4}, {5}}
+ m{{1}, {2, 3, 5}, {4}} + m{{1, 4}, {2, 3}, {5}} + m{{1, 4}, {2, 3, 5}}
+ m{{1, 5}, {2, 3}, {4}} + m{{1, 5}, {2, 3, 4}}
sage: m.product_on_basis(A, SetPartition([]))
m{{1}, {2, 3}}
TESTS:
We check that we get all of the correct set partitions::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).monomial()
sage: A = SetPartition([[1], [2,3]])
sage: B = SetPartition([[1], [2]])
sage: S = SetPartition([[1,2,3], [4,5]])
sage: AB = SetPartition([[1], [2,3], [4], [5]])
sage: L = sorted(filter(lambda x: S.inf(x) == AB, SetPartitions(5)), key=str)
sage: list(map(list, L)) == list(map(list, sorted(m.product_on_basis(A, B).support(), key=str)))
True
"""
if not A:
return self.monomial(B)
if not B:
return self.monomial(A)
P = SetPartitions()
n = A.size()
B = [Set([y+n for y in b]) for b in B] # Shift B by n
unions = lambda m: [reduce(lambda a,b: a.union(b), x) for x in m]
one = self.base_ring().one()
return self._from_dict({P(unions(m)): one for m in matchings(A, B)},
remove_zeros=False)
def coproduct_on_basis(self, A):
r"""
Return the coproduct of a monomial basis element.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- The coproduct applied to the monomial symmetric function in
non-commuting variables indexed by ``A`` expressed in the
monomial basis.
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).monomial()
sage: m[[1, 3], [2]].coproduct()
m{} # m{{1, 3}, {2}} + m{{1}} # m{{1, 2}} + m{{1, 2}} # m{{1}} + m{{1, 3}, {2}} # m{}
sage: m.coproduct_on_basis(SetPartition([]))
m{} # m{}
sage: m.coproduct_on_basis(SetPartition([[1,2,3]]))
m{} # m{{1, 2, 3}} + m{{1, 2, 3}} # m{}
sage: m[[1,5],[2,4],[3,7],[6]].coproduct()
m{} # m{{1, 5}, {2, 4}, {3, 7}, {6}} + m{{1}} # m{{1, 5}, {2, 4}, {3, 6}}
+ 2*m{{1, 2}} # m{{1, 3}, {2, 5}, {4}} + m{{1, 2}} # m{{1, 4}, {2, 3}, {5}}
+ 2*m{{1, 2}, {3}} # m{{1, 3}, {2, 4}} + m{{1, 3}, {2}} # m{{1, 4}, {2, 3}}
+ 2*m{{1, 3}, {2, 4}} # m{{1, 2}, {3}} + 2*m{{1, 3}, {2, 5}, {4}} # m{{1, 2}}
+ m{{1, 4}, {2, 3}} # m{{1, 3}, {2}} + m{{1, 4}, {2, 3}, {5}} # m{{1, 2}}
+ m{{1, 5}, {2, 4}, {3, 6}} # m{{1}} + m{{1, 5}, {2, 4}, {3, 7}, {6}} # m{}
"""
P = SetPartitions()
# Handle corner cases
if not A:
return self.tensor_square().monomial(( P([]), P([]) ))
if len(A) == 1:
return self.tensor_square().sum_of_monomials([(P([]), A), (A, P([]))])
ell_set = list(range(1, len(A) + 1)) # +1 for indexing
L = [[[], ell_set]] + list(SetPartitions(ell_set, 2))
def to_basis(S):
if not S:
return P([])
sub_parts = [list(A[i-1]) for i in S] # -1 for indexing
mins = [min(p) for p in sub_parts]
over_max = max([max(p) for p in sub_parts]) + 1
ret = [[] for i in range(len(S))]
cur = 1
while min(mins) != over_max:
m = min(mins)
i = mins.index(m)
ret[i].append(cur)
cur += 1
sub_parts[i].pop(sub_parts[i].index(m))
if sub_parts[i]:
mins[i] = min(sub_parts[i])
else:
mins[i] = over_max
return P(ret)
L1 = [(to_basis(S), to_basis(C)) for S,C in L]
L2 = [(M, N) for N,M in L1]
return self.tensor_square().sum_of_monomials(L1 + L2)
def internal_coproduct_on_basis(self, A):
r"""
Return the internal coproduct of a monomial basis element.
The internal coproduct is defined by
.. MATH::
\Delta^{\odot}(\mathbf{m}_A) = \sum_{B \wedge C = A}
\mathbf{m}_B \otimes \mathbf{m}_C
where we sum over all pairs of set partitions `B` and `C`
whose infimum is `A`.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the tensor square of the `\mathbf{m}` basis
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).monomial()
sage: m.internal_coproduct_on_basis(SetPartition([[1,3],[2]]))
m{{1, 2, 3}} # m{{1, 3}, {2}} + m{{1, 3}, {2}} # m{{1, 2, 3}} + m{{1, 3}, {2}} # m{{1, 3}, {2}}
"""
P = SetPartitions()
SP = SetPartitions(A.size())
ret = [[A,A]]
for i, B in enumerate(SP):
for C in SP[i+1:]:
if B.inf(C) == A:
B_std = P(list(B.standardization()))
C_std = P(list(C.standardization()))
ret.append([B_std, C_std])
ret.append([C_std, B_std])
return self.tensor_square().sum_of_monomials((B, C) for B,C in ret)
def sum_of_partitions(self, la):
r"""
Return the sum over all set partitions whose shape is ``la``
with a fixed coefficient `C` defined below.
Fix a partition `\lambda`, we define
`\lambda! := \prod_i \lambda_i!` and `\lambda^! := \prod_i m_i!`.
Recall that `|\lambda| = \sum_i \lambda_i` and `m_i` is the
number of parts of length `i` of `\lambda`. Thus we defined the
coefficient as
.. MATH::
C := \frac{\lambda! \lambda^!}{|\lambda|!}.
Hence we can define a lift `\widetilde{\chi}` from `Sym`
to `NCSym` by
.. MATH::
m_{\lambda} \mapsto C \sum_A \mathbf{m}_A
where the sum is over all set partitions whose shape
is `\lambda`.
INPUT:
- ``la`` -- an integer partition
OUTPUT:
- an element of the `\mathbf{m}` basis
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).m()
sage: m.sum_of_partitions(Partition([2,1,1]))
1/12*m{{1}, {2}, {3, 4}} + 1/12*m{{1}, {2, 3}, {4}} + 1/12*m{{1}, {2, 4}, {3}}
+ 1/12*m{{1, 2}, {3}, {4}} + 1/12*m{{1, 3}, {2}, {4}} + 1/12*m{{1, 4}, {2}, {3}}
TESTS:
Check that `\chi \circ \widetilde{\chi}` is the identity on `Sym`::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).m()
sage: mon = SymmetricFunctions(QQ).monomial()
sage: all(m.from_symmetric_function(mon[la]).to_symmetric_function() == mon[la]
....: for i in range(6) for la in Partitions(i))
True
"""
from sage.combinat.partition import Partition
la = Partition(la) # Make sure it is a partition
R = self.base_ring()
P = SetPartitions()
c = R( prod(factorial(i) for i in la) / ZZ(factorial(la.size())) )
return self._from_dict({P(m): c for m in SetPartitions(sum(la), la)},
remove_zeros=False)
class Element(CombinatorialFreeModule.Element):
"""
An element in the monomial basis of `NCSym`.
"""
def expand(self, n, alphabet='x'):
r"""
Expand ``self`` written in the monomial basis in `n`
non-commuting variables.
INPUT:
- ``n`` -- an integer
- ``alphabet`` -- (default: ``'x'``) a string
OUTPUT:
- The symmetric function of ``self`` expressed in the ``n``
non-commuting variables described by ``alphabet``.
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).monomial()
sage: m[[1,3],[2]].expand(4)
x0*x1*x0 + x0*x2*x0 + x0*x3*x0 + x1*x0*x1 + x1*x2*x1 + x1*x3*x1
+ x2*x0*x2 + x2*x1*x2 + x2*x3*x2 + x3*x0*x3 + x3*x1*x3 + x3*x2*x3
One can use a different set of variables by using the
optional argument ``alphabet``::
sage: m[[1],[2,3]].expand(3,alphabet='y')
y0*y1^2 + y0*y2^2 + y1*y0^2 + y1*y2^2 + y2*y0^2 + y2*y1^2
"""
from sage.algebras.free_algebra import FreeAlgebra
from sage.combinat.permutation import Permutations
m = self.parent()
F = FreeAlgebra(m.base_ring(), n, alphabet)
x = F.gens()
def on_basis(A):
basic_term = [0] * A.size()
for index, part in enumerate(A):
for i in part:
basic_term[i-1] = index # -1 for indexing
return sum( prod(x[p[i]-1] for i in basic_term) # -1 for indexing
for p in Permutations(n, len(A)) )
return m._apply_module_morphism(self, on_basis, codomain=F)
def to_symmetric_function(self):
r"""
The projection of ``self`` to the symmetric functions.
Take a symmetric function in non-commuting variables
expressed in the `\mathbf{m}` basis, and return the projection of
expressed in the monomial basis of symmetric functions.
The map `\chi \colon NCSym \to Sym` is defined by
.. MATH::
\mathbf{m}_A \mapsto
m_{\lambda(A)} \prod_i n_i(\lambda(A))!
where `\lambda(A)` is the partition associated with `A` by
taking the sizes of the parts and `n_i(\mu)` is the
multiplicity of `i` in `\mu`.
OUTPUT:
- an element of the symmetric functions in the monomial basis
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).monomial()
sage: m[[1,3],[2]].to_symmetric_function()
m[2, 1]
sage: m[[1],[3],[2]].to_symmetric_function()
6*m[1, 1, 1]
"""
m = SymmetricFunctions(self.parent().base_ring()).monomial()
c = lambda la: prod(factorial(i) for i in la.to_exp())
return m.sum_of_terms((i.shape(), coeff*c(i.shape()))
for (i, coeff) in self)
m = monomial
class elementary(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the elementary basis.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: e = NCSym.e()
"""
def __init__(self, NCSym):
"""
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: TestSuite(NCSym.e()).run()
"""
CombinatorialFreeModule.__init__(self, NCSym.base_ring(), SetPartitions(),
prefix='e', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
## Register coercions
# monomials
m = NCSym.m()
self.module_morphism(self._e_to_m_on_basis, codomain=m).register_as_coercion()
# powersum
# NOTE: Keep this ahead of creating the homogeneous basis to
# get the coercion path m -> p -> e
p = NCSym.p()
self.module_morphism(self._e_to_p_on_basis, codomain=p,
triangular="upper").register_as_coercion()
p.module_morphism(p._p_to_e_on_basis, codomain=self,
triangular="upper").register_as_coercion()
# homogeneous
h = NCSym.h()
self.module_morphism(self._e_to_h_on_basis, codomain=h,
triangular="upper").register_as_coercion()
h.module_morphism(h._h_to_e_on_basis, codomain=self,
triangular="upper").register_as_coercion()
@cached_method
def _e_to_m_on_basis(self, A):
r"""
Return `\mathbf{e}_A` in terms of the monomial basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{m}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: e = NCSym.e()
sage: all(e(e._e_to_m_on_basis(A)) == e[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
m = self.realization_of().m()
n = A.size()
P = SetPartitions(n)
min_elt = P([[i] for i in range(1, n+1)])
one = self.base_ring().one()
return m._from_dict({B: one for B in P if A.inf(B) == min_elt},
remove_zeros=False)
@cached_method
def _e_to_h_on_basis(self, A):
r"""
Return `\mathbf{e}_A` in terms of the homogeneous basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{h}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: e = NCSym.e()
sage: all(e(e._e_to_h_on_basis(A)) == e[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
h = self.realization_of().h()
sign = lambda B: (-1)**(B.size() - len(B))
coeff = lambda B: sign(B) * prod(factorial(sum( 1 for part in B if part.issubset(big) )) for big in A)
R = self.base_ring()
return h._from_dict({B: R(coeff(B)) for B in A.refinements()},
remove_zeros=False)
@cached_method
def _e_to_p_on_basis(self, A):
r"""
Return `\mathbf{e}_A` in terms of the powersum basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{p}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: e = NCSym.e()
sage: all(e(e._e_to_p_on_basis(A)) == e[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
p = self.realization_of().p()
coeff = lambda B: prod([(-1)**(i-1) * factorial(i-1) for i in B.shape()])
R = self.base_ring()
return p._from_dict({B: R(coeff(B)) for B in A.refinements()},
remove_zeros=False)
class Element(CombinatorialFreeModule.Element):
"""
An element in the elementary basis of `NCSym`.
"""
def omega(self):
r"""
Return the involution `\omega` applied to ``self``.
The involution `\omega` on `NCSym` is defined by
`\omega(\mathbf{e}_A) = \mathbf{h}_A`.
OUTPUT:
- an element in the basis ``self``
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: e = NCSym.e()
sage: h = NCSym.h()
sage: elt = e[[1,3],[2]].omega(); elt
2*e{{1}, {2}, {3}} - e{{1, 3}, {2}}
sage: elt.omega()
e{{1, 3}, {2}}
sage: h(elt)
h{{1, 3}, {2}}
"""
P = self.parent()
h = P.realization_of().h()
return P(h.sum_of_terms(self))
def to_symmetric_function(self):
r"""
The projection of ``self`` to the symmetric functions.
Take a symmetric function in non-commuting variables
expressed in the `\mathbf{e}` basis, and return the projection of
expressed in the elementary basis of symmetric functions.
The map `\chi \colon NCSym \to Sym` is given by
.. MATH::
\mathbf{e}_A \mapsto
e_{\lambda(A)} \prod_i \lambda(A)_i!
where `\lambda(A)` is the partition associated with `A` by
taking the sizes of the parts.
OUTPUT:
- An element of the symmetric functions in the elementary basis
EXAMPLES::
sage: e = SymmetricFunctionsNonCommutingVariables(QQ).e()
sage: e[[1,3],[2]].to_symmetric_function()
2*e[2, 1]
sage: e[[1],[3],[2]].to_symmetric_function()
e[1, 1, 1]
"""
e = SymmetricFunctions(self.parent().base_ring()).e()
c = lambda la: prod(factorial(i) for i in la)
return e.sum_of_terms((i.shape(), coeff*c(i.shape()))
for (i, coeff) in self)
e = elementary
class homogeneous(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the homogeneous basis.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: h = NCSym.h()
sage: h[[1,3],[2,4]]*h[[1,2,3]]
h{{1, 3}, {2, 4}, {5, 6, 7}}
sage: h[[1,2]].coproduct()
h{} # h{{1, 2}} + 2*h{{1}} # h{{1}} + h{{1, 2}} # h{}
"""
def __init__(self, NCSym):
"""
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: TestSuite(NCSym.h()).run()
"""
CombinatorialFreeModule.__init__(self, NCSym.base_ring(), SetPartitions(),
prefix='h', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
# Register coercions
m = NCSym.m()
self.module_morphism(self._h_to_m_on_basis, codomain=m).register_as_coercion()
p = NCSym.p()
self.module_morphism(self._h_to_p_on_basis, codomain=p).register_as_coercion()
p.module_morphism(p._p_to_h_on_basis, codomain=self).register_as_coercion()
@cached_method
def _h_to_m_on_basis(self, A):
r"""
Return `\mathbf{h}_A` in terms of the monomial basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{m}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: h = NCSym.h()
sage: all(h(h._h_to_m_on_basis(A)) == h[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
P = SetPartitions()
m = self.realization_of().m()
coeff = lambda B: prod(factorial(i) for i in B.shape())
R = self.base_ring()
return m._from_dict({P(B): R( coeff(A.inf(B)) )
for B in SetPartitions(A.size())}, remove_zeros=False)
@cached_method
def _h_to_e_on_basis(self, A):
r"""
Return `\mathbf{h}_A` in terms of the elementary basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{e}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: h = NCSym.h()
sage: all(h(h._h_to_e_on_basis(A)) == h[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
e = self.realization_of().e()
sign = lambda B: (-1)**(B.size() - len(B))
coeff = lambda B: (sign(B) * prod(factorial(sum( 1 for part in B if part.issubset(big) ))
for big in A))
R = self.base_ring()
return e._from_dict({B: R(coeff(B)) for B in A.refinements()},
remove_zeros=False)
@cached_method
def _h_to_p_on_basis(self, A):
r"""
Return `\mathbf{h}_A` in terms of the powersum basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{p}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: h = NCSym.h()
sage: all(h(h._h_to_p_on_basis(A)) == h[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
p = self.realization_of().p()
coeff = lambda B: abs( prod([(-1)**(i-1) * factorial(i-1) for i in B.shape()]) )
R = self.base_ring()
return p._from_dict({B: R(coeff(B)) for B in A.refinements()},
remove_zeros=False)
class Element(CombinatorialFreeModule.Element):
"""
An element in the homogeneous basis of `NCSym`.
"""
def omega(self):
r"""
Return the involution `\omega` applied to ``self``.
The involution `\omega` on `NCSym` is defined by
`\omega(\mathbf{h}_A) = \mathbf{e}_A`.
OUTPUT:
- an element in the basis ``self``
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: h = NCSym.h()
sage: e = NCSym.e()
sage: elt = h[[1,3],[2]].omega(); elt
2*h{{1}, {2}, {3}} - h{{1, 3}, {2}}
sage: elt.omega()
h{{1, 3}, {2}}
sage: e(elt)
e{{1, 3}, {2}}
"""
P = self.parent()
e = self.parent().realization_of().e()
return P(e.sum_of_terms(self))
def to_symmetric_function(self):
r"""
The projection of ``self`` to the symmetric functions.
Take a symmetric function in non-commuting variables
expressed in the `\mathbf{h}` basis, and return the projection of
expressed in the complete basis of symmetric functions.
The map `\chi \colon NCSym \to Sym` is given by
.. MATH::
\mathbf{h}_A \mapsto
h_{\lambda(A)} \prod_i \lambda(A)_i!
where `\lambda(A)` is the partition associated with `A` by
taking the sizes of the parts.
OUTPUT:
- An element of the symmetric functions in the complete basis
EXAMPLES::
sage: h = SymmetricFunctionsNonCommutingVariables(QQ).h()
sage: h[[1,3],[2]].to_symmetric_function()
2*h[2, 1]
sage: h[[1],[3],[2]].to_symmetric_function()
h[1, 1, 1]
"""
h = SymmetricFunctions(self.parent().base_ring()).h()
c = lambda la: prod(factorial(i) for i in la)
return h.sum_of_terms((i.shape(), coeff*c(i.shape()))
for (i, coeff) in self)
h = homogeneous
class powersum(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the powersum basis.
The powersum basis is given by
.. MATH::
\mathbf{p}_A = \sum_{A \leq B} \mathbf{m}_B,
where we sum over all coarsenings of the set partition `A`. If we
allow our variables to commute, then `\mathbf{p}_A` goes to the
usual powersum symmetric function `p_{\lambda}` whose (integer)
partition `\lambda` is the shape of `A`.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: p = NCSym.p()
sage: x = p.an_element()**2; x
4*p{} + 8*p{{1}} + 4*p{{1}, {2}} + 6*p{{1}, {2, 3}}
+ 12*p{{1, 2}} + 6*p{{1, 2}, {3}} + 9*p{{1, 2}, {3, 4}}
sage: x.to_symmetric_function()
4*p[] + 8*p[1] + 4*p[1, 1] + 12*p[2] + 12*p[2, 1] + 9*p[2, 2]
"""
def __init__(self, NCSym):
"""
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: TestSuite(NCSym.p()).run()
"""
CombinatorialFreeModule.__init__(self, NCSym.base_ring(), SetPartitions(),
prefix='p', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
# Register coercions
m = NCSym.m()
self.module_morphism(self._p_to_m_on_basis, codomain=m,
unitriangular="lower").register_as_coercion()
m.module_morphism(m._m_to_p_on_basis, codomain=self,
unitriangular="lower").register_as_coercion()
x = NCSym.x()
self.module_morphism(self._p_to_x_on_basis, codomain=x,
unitriangular="upper").register_as_coercion()
x.module_morphism(x._x_to_p_on_basis, codomain=self,
unitriangular="upper").register_as_coercion()
@cached_method
def _p_to_m_on_basis(self, A):
r"""
Return `\mathbf{p}_A` in terms of the monomial basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{m}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: p = NCSym.p()
sage: all(p(p._p_to_m_on_basis(A)) == p[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
m = self.realization_of().m()
one = self.base_ring().one()
return m._from_dict({B: one for B in A.coarsenings()}, remove_zeros=False)
@cached_method
def _p_to_e_on_basis(self, A):
r"""
Return `\mathbf{p}_A` in terms of the elementary basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{e}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: p = NCSym.p()
sage: all(p(p._p_to_e_on_basis(A)) == p[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
e = self.realization_of().e()
P_refine = Poset((A.refinements(), A.parent().lt))
c = prod((-1)**(i-1) * factorial(i-1) for i in A.shape())
R = self.base_ring()
return e._from_dict({B: R(P_refine.moebius_function(B, A) / ZZ(c))
for B in P_refine}, remove_zeros=False)
@cached_method
def _p_to_h_on_basis(self, A):
r"""
Return `\mathbf{p}_A` in terms of the homogeneous basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{h}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: p = NCSym.p()
sage: all(p(p._p_to_h_on_basis(A)) == p[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
h = self.realization_of().h()
P_refine = Poset((A.refinements(), A.parent().lt))
c = abs(prod((-1)**(i-1) * factorial(i-1) for i in A.shape()))
R = self.base_ring()
return h._from_dict({B: R(P_refine.moebius_function(B, A) / ZZ(c))
for B in P_refine}, remove_zeros=False)
@cached_method
def _p_to_x_on_basis(self, A):
r"""
Return `\mathbf{p}_A` in terms of the `\mathbf{x}` basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- An element of the `\mathbf{x}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: p = NCSym.p()
sage: all(p(p._p_to_x_on_basis(A)) == p[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
x = self.realization_of().x()
one = self.base_ring().one()
return x._from_dict({B: one for B in A.refinements()}, remove_zeros=False)
# Note that this is the same as the monomial coproduct_on_basis
def coproduct_on_basis(self, A):
r"""
Return the coproduct of a monomial basis element.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- The coproduct applied to the monomial symmetric function in
non-commuting variables indexed by ``A`` expressed in the
monomial basis.
EXAMPLES::
sage: p = SymmetricFunctionsNonCommutingVariables(QQ).powersum()
sage: p[[1, 3], [2]].coproduct()
p{} # p{{1, 3}, {2}} + p{{1}} # p{{1, 2}} + p{{1, 2}} # p{{1}} + p{{1, 3}, {2}} # p{}
sage: p.coproduct_on_basis(SetPartition([[1]]))
p{} # p{{1}} + p{{1}} # p{}
sage: p.coproduct_on_basis(SetPartition([]))
p{} # p{}
"""
P = SetPartitions()
# Handle corner cases
if not A:
return self.tensor_square().monomial(( P([]), P([]) ))
if len(A) == 1:
return self.tensor_square().sum_of_monomials([(P([]), A), (A, P([]))])
ell_set = list(range(1, len(A) + 1)) # +1 for indexing
L = [[[], ell_set]] + list(SetPartitions(ell_set, 2))
def to_basis(S):
if not S:
return P([])
sub_parts = [list(A[i-1]) for i in S] # -1 for indexing
mins = [min(p) for p in sub_parts]
over_max = max([max(p) for p in sub_parts]) + 1
ret = [[] for i in range(len(S))]
cur = 1
while min(mins) != over_max:
m = min(mins)
i = mins.index(m)
ret[i].append(cur)
cur += 1
sub_parts[i].pop(sub_parts[i].index(m))
if sub_parts[i]:
mins[i] = min(sub_parts[i])
else:
mins[i] = over_max
return P(ret)
L1 = [(to_basis(S), to_basis(C)) for S,C in L]
L2 = [(M, N) for N,M in L1]
return self.tensor_square().sum_of_monomials(L1 + L2)
def internal_coproduct_on_basis(self, A):
r"""
Return the internal coproduct of a powersum basis element.
The internal coproduct is defined by
.. MATH::
\Delta^{\odot}(\mathbf{p}_A) = \mathbf{p}_A \otimes
\mathbf{p}_A
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the tensor square of ``self``
EXAMPLES::
sage: p = SymmetricFunctionsNonCommutingVariables(QQ).powersum()
sage: p.internal_coproduct_on_basis(SetPartition([[1,3],[2]]))
p{{1, 3}, {2}} # p{{1, 3}, {2}}
"""
return self.tensor_square().monomial((A, A))
def antipode_on_basis(self, A):
r"""
Return the result of the antipode applied to a powersum basis element.
Let `A` be a set partition. The antipode given in [LM2011]_ is
.. MATH::
S(\mathbf{p}_A) = \sum_{\gamma} (-1)^{\ell(\gamma)}
\mathbf{p}_{\gamma[A]}
where we sum over all ordered set partitions (i.e. set
compositions) of `[\ell(A)]` and
.. MATH::
\gamma[A] = A_{\gamma_1}^{\downarrow} | \cdots |
A_{\gamma_{\ell(A)}}^{\downarrow}
is the action of `\gamma` on `A` defined in
:meth:`SetPartition.ordered_set_partition_action()`.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element in the basis ``self``
EXAMPLES::
sage: p = SymmetricFunctionsNonCommutingVariables(QQ).powersum()
sage: p.antipode_on_basis(SetPartition([[1], [2,3]]))
p{{1, 2}, {3}}
sage: p.antipode_on_basis(SetPartition([]))
p{}
sage: F = p[[1,3],[5],[2,4]].coproduct()
sage: F.apply_multilinear_morphism(lambda x,y: x.antipode()*y)
0
"""
P = SetPartitions()
def action(gamma):
cur = 1
ret = []
for S in gamma:
sub_parts = [list(A[i-1]) for i in S] # -1 for indexing
mins = [min(p) for p in sub_parts]
over_max = max([max(p) for p in sub_parts]) + 1
temp = [[] for i in range(len(S))]
while min(mins) != over_max:
m = min(mins)
i = mins.index(m)
temp[i].append(cur)
cur += 1
sub_parts[i].pop(sub_parts[i].index(m))
if sub_parts[i]:
mins[i] = min(sub_parts[i])
else:
mins[i] = over_max
ret += temp
return P(ret)
return self.sum_of_terms( (A.ordered_set_partition_action(gamma), (-1)**len(gamma))
for gamma in OrderedSetPartitions(len(A)) )
def primitive(self, A, i=1):
r"""
Return the primitive associated to ``A`` in ``self``.
Fix some `i \in S`. Let `A` be an atomic set partition of `S`,
then the primitive `p(A)` given in [LM2011]_ is
.. MATH::
p(A) = \sum_{\gamma} (-1)^{\ell(\gamma)-1}
\mathbf{p}_{\gamma[A]}
where we sum over all ordered set partitions of `[\ell(A)]` such
that `i \in \gamma_1` and `\gamma[A]` is the action of `\gamma`
on `A` defined in
:meth:`SetPartition.ordered_set_partition_action()`.
If `A` is not atomic, then `p(A) = 0`.
.. SEEALSO:: :meth:`SetPartition.is_atomic`
INPUT:
- ``A`` -- a set partition
- ``i`` -- (default: 1) index in the base set for ``A`` specifying
which set of primitives this belongs to
OUTPUT:
- an element in the basis ``self``
EXAMPLES::
sage: p = SymmetricFunctionsNonCommutingVariables(QQ).powersum()
sage: elt = p.primitive(SetPartition([[1,3], [2]])); elt
-p{{1, 2}, {3}} + p{{1, 3}, {2}}
sage: elt.coproduct()
-p{} # p{{1, 2}, {3}} + p{} # p{{1, 3}, {2}} - p{{1, 2}, {3}} # p{} + p{{1, 3}, {2}} # p{}
sage: p.primitive(SetPartition([[1], [2,3]]))
0
sage: p.primitive(SetPartition([]))
p{}
"""
if not A:
return self.one()
A = SetPartitions()(A) # Make sure it's a set partition
if not A.is_atomic():
return self.zero()
return self.sum_of_terms( (A.ordered_set_partition_action(gamma), (-1)**(len(gamma)-1))
for gamma in OrderedSetPartitions(len(A)) if i in gamma[0] )
class Element(CombinatorialFreeModule.Element):
"""
An element in the powersum basis of `NCSym`.
"""
def to_symmetric_function(self):
r"""
The projection of ``self`` to the symmetric functions.
Take a symmetric function in non-commuting variables
expressed in the `\mathbf{p}` basis, and return the projection of
expressed in the powersum basis of symmetric functions.
The map `\chi \colon NCSym \to Sym` is given by
.. MATH::
\mathbf{p}_A \mapsto p_{\lambda(A)}
where `\lambda(A)` is the partition associated with `A` by
taking the sizes of the parts.
OUTPUT:
- an element of symmetric functions in the power sum basis
EXAMPLES::
sage: p = SymmetricFunctionsNonCommutingVariables(QQ).p()
sage: p[[1,3],[2]].to_symmetric_function()
p[2, 1]
sage: p[[1],[3],[2]].to_symmetric_function()
p[1, 1, 1]
"""
p = SymmetricFunctions(self.parent().base_ring()).p()
return p.sum_of_terms((i.shape(), coeff) for (i, coeff) in self)
p = powersum
class coarse_powersum(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the `\mathbf{cp}` basis.
This basis was defined in [BZ05]_ as
.. MATH::
\mathbf{cp}_A = \sum_{A \leq_* B} \mathbf{m}_B,
where we sum over all strict coarsenings of the set partition `A`.
An alternative description of this basis was given in [BT13]_ as
.. MATH::
\mathbf{cp}_A = \sum_{A \subseteq B} \mathbf{m}_B,
where we sum over all set partitions whose arcs are a subset of
the arcs of the set partition `A`.
.. NOTE::
In [BZ05]_, this basis was denoted by `\mathbf{q}`. In [BT13]_,
this basis was called the powersum basis and denoted by `p`.
However it is a coarser basis than the usual powersum basis in
the sense that it does not yield the usual powersum basis
of the symmetric function under the natural map of letting
the variables commute.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: cp = NCSym.cp()
sage: cp[[1,3],[2,4]]*cp[[1,2,3]]
cp{{1, 3}, {2, 4}, {5, 6, 7}}
sage: cp[[1,2],[3]].internal_coproduct()
cp{{1, 2}, {3}} # cp{{1, 2}, {3}}
sage: ps = SymmetricFunctions(NCSym.base_ring()).p()
sage: ps(cp[[1,3],[2]].to_symmetric_function())
p[2, 1] - p[3]
sage: ps(cp[[1,2],[3]].to_symmetric_function())
p[2, 1]
"""
def __init__(self, NCSym):
"""
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: TestSuite(NCSym.cp()).run()
"""
CombinatorialFreeModule.__init__(self, NCSym.base_ring(), SetPartitions(),
prefix='cp', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
# Register coercions
m = NCSym.m()
self.module_morphism(self._cp_to_m_on_basis, codomain=m,
unitriangular="lower").register_as_coercion()
m.module_morphism(m._m_to_cp_on_basis, codomain=self,
unitriangular="lower").register_as_coercion()
@cached_method
def _cp_to_m_on_basis(self, A):
r"""
Return `\mathbf{cp}_A` in terms of the monomial basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the `\mathbf{m}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: cp = NCSym.cp()
sage: all(cp(cp._cp_to_m_on_basis(A)) == cp[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
m = self.realization_of().m()
one = self.base_ring().one()
return m._from_dict({B: one for B in A.strict_coarsenings()},
remove_zeros=False)
cp = coarse_powersum
class x_basis(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the `\mathbf{x}` basis.
This basis is defined in [BHRZ06]_ by the formula:
.. MATH::
\mathbf{x}_A = \sum_{B \leq A} \mu(B, A) \mathbf{p}_B
and has the following properties:
.. MATH::
\mathbf{x}_A \mathbf{x}_B = \mathbf{x}_{A|B}, \quad \quad
\Delta^{\odot}(\mathbf{x}_C) = \sum_{A \vee B = C} \mathbf{x}_A
\otimes \mathbf{x}_B.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: x = NCSym.x()
sage: x[[1,3],[2,4]]*x[[1,2,3]]
x{{1, 3}, {2, 4}, {5, 6, 7}}
sage: x[[1,2],[3]].internal_coproduct()
x{{1}, {2}, {3}} # x{{1, 2}, {3}} + x{{1, 2}, {3}} # x{{1}, {2}, {3}} +
x{{1, 2}, {3}} # x{{1, 2}, {3}}
"""
def __init__(self, NCSym):
"""
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: TestSuite(NCSym.x()).run()
"""
CombinatorialFreeModule.__init__(self, NCSym.base_ring(), SetPartitions(),
prefix='x', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
@cached_method
def _x_to_p_on_basis(self, A):
r"""
Return `\mathbf{x}_A` in terms of the powersum basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the `\mathbf{p}` basis
TESTS::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: x = NCSym.x()
sage: all(x(x._x_to_p_on_basis(A)) == x[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
def lt(s, t):
if s == t:
return False
for p in s:
if len([z for z in t if z.intersection(p)]) != 1:
return False
return True
p = self.realization_of().p()
P_refine = Poset((A.refinements(), lt))
R = self.base_ring()
return p._from_dict({B: R(P_refine.moebius_function(B, A))
for B in P_refine})
x = x_basis
class deformed_coarse_powersum(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the `\rho` basis.
This basis was defined in [BT13]_ as a `q`-deformation of the
`\mathbf{cp}` basis:
.. MATH::
\rho_A = \sum_{A \subseteq B}
\frac{1}{q^{\operatorname{nst}_{B-A}^A}} \mathbf{m}_B,
where we sum over all set partitions whose arcs are a subset of
the arcs of the set partition `A`.
INPUT:
- ``q`` -- (default: ``2``) the parameter `q`
EXAMPLES::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: rho = NCSym.rho(q)
We construct Example 3.1 in [BT13]_::
sage: rnode = lambda A: sorted([a[1] for a in A.arcs()], reverse=True)
sage: dimv = lambda A: sorted([a[1]-a[0] for a in A.arcs()], reverse=True)
sage: lst = list(SetPartitions(4))
sage: S = sorted(lst, key=lambda A: (dimv(A), rnode(A)))
sage: m = NCSym.m()
sage: matrix([[m(rho[A])[B] for B in S] for A in S])
[ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]
[ 0 1 0 0 1 1 0 1 0 0 1 0 0 0 0]
[ 0 0 1 0 1 0 1 1 0 0 0 0 0 0 1]
[ 0 0 0 1 0 1 1 1 0 0 0 1 0 0 0]
[ 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0]
[ 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 1 0 0 1 1 0 0]
[ 0 0 0 0 0 0 0 0 0 1 1 0 1 0 0]
[ 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1/q]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]
"""
def __init__(self, NCSym, q=2):
"""
EXAMPLES::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: TestSuite(NCSym.rho(q)).run()
"""
R = NCSym.base_ring()
self._q = R(q)
CombinatorialFreeModule.__init__(self, R, SetPartitions(),
prefix='rho', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
# Register coercions
m = NCSym.m()
self.module_morphism(self._rho_to_m_on_basis, codomain=m).register_as_coercion()
m.module_morphism(self._m_to_rho_on_basis, codomain=self).register_as_coercion()
def q(self):
"""
Return the deformation parameter `q` of ``self``.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: rho = NCSym.rho(5)
sage: rho.q()
5
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: rho = NCSym.rho(q)
sage: rho.q() == q
True
"""
return self._q
@cached_method
def _rho_to_m_on_basis(self, A):
r"""
Return `\rho_A` in terms of the monomial basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the `\mathbf{m}` basis
TESTS::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: rho = NCSym.rho(q)
sage: all(rho(rho._rho_to_m_on_basis(A)) == rho[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
m = self.realization_of().m()
arcs = set(A.arcs())
return m._from_dict({B: self._q**-nesting(set(B).difference(A), A)
for B in A.coarsenings() if arcs.issubset(B.arcs())},
remove_zeros=False)
@cached_method
def _m_to_rho_on_basis(self, A):
r"""
Return `\mathbf{m}_A` in terms of the `\rho` basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the `\rho` basis
TESTS::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: rho = NCSym.rho(q)
sage: m = NCSym.m()
sage: all(m(rho._m_to_rho_on_basis(A)) == m[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
coeff = lambda A,B: ((-1)**len(set(B.arcs()).difference(A.arcs()))
/ self._q**nesting(set(B).difference(A), B))
arcs = set(A.arcs())
return self._from_dict({B: coeff(A,B) for B in A.coarsenings()
if arcs.issubset(B.arcs())},
remove_zeros=False)
rho = deformed_coarse_powersum
class supercharacter(NCSymBasis_abstract):
r"""
The Hopf algebra of symmetric functions in non-commuting variables
in the supercharacter `\chi` basis.
This basis was defined in [BT13]_ as a `q`-deformation of the
supercharacter basis.
.. MATH::
\chi_A = \sum_B \chi_A(B) \mathbf{m}_B,
where we sum over all set partitions `A` and `\chi_A(B)` is the
evaluation of the supercharacter `\chi_A` on the superclass `\mu_B`.
.. NOTE::
The supercharacters considered in [BT13]_ are coarser than
those considered by Aguiar et. al.
INPUT:
- ``q`` -- (default: ``2``) the parameter `q`
EXAMPLES::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: chi = NCSym.chi(q)
sage: chi[[1,3],[2]]*chi[[1,2]]
chi{{1, 3}, {2}, {4, 5}}
sage: chi[[1,3],[2]].coproduct()
chi{} # chi{{1, 3}, {2}} + (2*q-2)*chi{{1}} # chi{{1}, {2}} +
(3*q-2)*chi{{1}} # chi{{1, 2}} + (2*q-2)*chi{{1}, {2}} # chi{{1}} +
(3*q-2)*chi{{1, 2}} # chi{{1}} + chi{{1, 3}, {2}} # chi{}
sage: chi2 = NCSym.chi()
sage: chi(chi2[[1,2],[3]])
((-q+2)/q)*chi{{1}, {2}, {3}} + 2/q*chi{{1, 2}, {3}}
sage: chi2
Symmetric functions in non-commuting variables over the Fraction Field
of Univariate Polynomial Ring in q over Rational Field in the
supercharacter basis with parameter q=2
"""
def __init__(self, NCSym, q=2):
"""
EXAMPLES::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: TestSuite(NCSym.chi(q)).run()
"""
R = NCSym.base_ring()
self._q = R(q)
CombinatorialFreeModule.__init__(self, R, SetPartitions(),
prefix='chi', bracket=False,
category=MultiplicativeNCSymBases(NCSym))
# Register coercions
m = NCSym.m()
self.module_morphism(self._chi_to_m_on_basis, codomain=m).register_as_coercion()
m.module_morphism(self._m_to_chi_on_basis, codomain=self).register_as_coercion()
def q(self):
"""
Return the deformation parameter `q` of ``self``.
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: chi = NCSym.chi(5)
sage: chi.q()
5
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: chi = NCSym.chi(q)
sage: chi.q() == q
True
"""
return self._q
@cached_method
def _chi_to_m_on_basis(self, A):
r"""
Return `\chi_A` in terms of the monomial basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the `\mathbf{m}` basis
TESTS::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: chi = NCSym.chi(q)
sage: all(chi(chi._chi_to_m_on_basis(A)) == chi[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
m = self.realization_of().m()
q = self._q
arcs = set(A.arcs())
ret = {}
for B in SetPartitions(A.size()):
Barcs = B.arcs()
if any((a[0] == b[0] and b[1] < a[1])
or (b[0] > a[0] and a[1] == b[1])
for a in arcs for b in Barcs):
continue
ret[B] = ((-1)**len(arcs.intersection(Barcs))
* (q - 1)**(len(arcs) - len(arcs.intersection(Barcs)))
* q**(sum(a[1] - a[0] for a in arcs) - len(arcs))
/ q**nesting(B, A))
return m._from_dict(ret, remove_zeros=False)
@cached_method
def _graded_inverse_matrix(self, n):
r"""
Return the inverse of the transition matrix of the ``n``-th
graded part from the `\chi` basis to the monomial basis.
EXAMPLES::
sage: R = QQ['q'].fraction_field(); q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: chi = NCSym.chi(q); m = NCSym.m()
sage: lst = list(SetPartitions(2))
sage: m = matrix([[m(chi[A])[B] for A in lst] for B in lst]); m
[ -1 1]
[q - 1 1]
sage: chi._graded_inverse_matrix(2)
[ -1/q 1/q]
[(q - 1)/q 1/q]
sage: chi._graded_inverse_matrix(2) * m
[1 0]
[0 1]
"""
lst = SetPartitions(n)
MS = MatrixSpace(self.base_ring(), lst.cardinality())
m = self.realization_of().m()
m = MS([[m(self[A])[B] for A in lst] for B in lst])
return ~m
@cached_method
def _m_to_chi_on_basis(self, A):
r"""
Return `\mathbf{m}_A` in terms of the `\chi` basis.
INPUT:
- ``A`` -- a set partition
OUTPUT:
- an element of the `\chi` basis
TESTS::
sage: R = QQ['q'].fraction_field()
sage: q = R.gen()
sage: NCSym = SymmetricFunctionsNonCommutingVariables(R)
sage: chi = NCSym.chi(q)
sage: m = NCSym.m()
sage: all(m(chi._m_to_chi_on_basis(A)) == m[A] for i in range(5)
....: for A in SetPartitions(i))
True
"""
n = A.size()
lst = list(SetPartitions(n))
m = self._graded_inverse_matrix(n)
i = lst.index(A)
return self._from_dict({B: m[j,i] for j,B in enumerate(lst)})
chi = supercharacter
|
py
|
1a5cac12e8c98c2551bcf7b8565e38df1bde41c2
|
import morpholib as morpho
morpho.importAll()
def main():
grid0 = morpho.grid.mathgrid(
tweenMethod=morpho.grid.Path.tweenSpiral,
transition=morpho.transition.quadease
)
grid = morpho.Actor(grid0)
mation = morpho.Animation(grid)
grid.newendkey(60, grid0.fimage(lambda s: s**2/10))
mation.endDelay(30)
grid.newendkey(60, grid0.fimage(lambda s: s**3/64))
mation.endDelay(30)
grid.newendkey(60, grid0.fimage(lambda s: s**4/8**3))
mation.endDelay(30)
grid.newendkey(60, grid0.fimage(lambda s: s**5/8**4))
mation.endDelay(30)
grid.newendkey(60, grid0.copy())
mation.play()
main()
|
py
|
1a5cad1c23304da1a661b160551c10df63674023
|
# Leauge of Legends Statistics Chat Bot
# A chat bot written in Python that provides match statistics right to your Twitch chat.
# 2015 Benjamin Chu - https://github.com/blenderben
import socket # imports module allowing connection to IRC
import threading # imports module allowing timing functions
import requests # imports module allowing requests
import json
import time
import calendar # imports module allowing epoch time
import ConfigParser # imports module allowing reading of .ini files
import os # for relative pathing
import string # for string manipulation
# from routes import API_ROUTES
class API_ROUTES:
# summoner-v1.4 - get summoner id data
summoner_url = 'https://{region}.api.pvp.net/api/lol/{region}/v1.4/summoner/by-name/{summonername}?api_key={key}'
# summoner-v1.4 - summoner mastery data
summonermastery_url = 'https://{region}.api.pvp.net/api/lol/{region}/v1.4/summoner/{summonerid}/masteries?api_key={key}'
# league-v2.5 - summoner league data
summonerleague_url = 'https://{region}.api.pvp.net/api/lol/{region}/v2.5/league/by-summoner/{summonerid}/entry?api_key={key}'
# lol-static-data-v1.2 - static champion data
championstaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/champion/{championid}?champData=all&api_key={key}'
# lol-static-data-v1.2 - static rune data
runestaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/rune/{runeid}?runeData=all&api_key={key}'
# lol-static-data-v1.2 - static mastery data
masterystaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/mastery/{masteryid}?masteryData=all&api_key={key}'
# lol-static-data-v1.2 - static spell data
spellstaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/summoner-spell/{spellid}?api_key={key}'
# current-game-v1.0 - current game data
current_url = 'https://{region}.api.pvp.net/observer-mode/rest/consumer/getSpectatorGameInfo/{region_upper}1/{summonerid}?api_key={key}'
# game-v1.3 - historic game data
last_url = 'https://{region}.api.pvp.net/api/lol/{region}/v1.3/game/by-summoner/{summonerid}/recent?api_key={key}'
# op.gg
opgg_url = 'http://{region}.op.gg/summoner/userName={summonername}'
opgg_masteries_url = 'http://{region}.op.gg/summoner/mastery/userName={summonername}'
opgg_runes_url = 'http://{region}.op.gg/summoner/rune/userName={summonername}'
opgg_matches_url = 'http://{region}.op.gg/summoner/matches/userName={summonername}'
opgg_leagues_url = 'http://{region}.op.gg/summoner/league/userName={summonername}'
opgg_champions_url = 'http://{region}.op.gg/summoner/champions/userName={summonername}'
# LoLNexus
lolnexus_url = 'http://www.lolnexus.com/{region}/search?name={summonername}&server={region}'
# LoLKing
lolking_url = 'http://www.lolking.net/summoner/{region}/{summonerid}'
# LoLSkill
lolskill_url = 'http://www.lolskill.net/summoner/{region}/{summonername}'
# ====== READ CONFIG ======
Config = ConfigParser.ConfigParser()
Config.read(os.path.dirname(os.path.abspath(__file__)) + '/config.ini')
def ConfigSectionMap(section):
temp_dict = {}
options = Config.options(section)
for option in options:
try:
temp_dict[option] = Config.get(section, option)
if temp_dict[option] == -1:
DebugPrint('skip: %s' % option)
except:
print('exception on %s!' % option)
temp_dict[option] = None
return temp_dict
# ====== CONNECTION INFO ======
# Set variables for connection
botOwner = ConfigSectionMap('settings')['botowner']
nick = ConfigSectionMap('settings')['nick']
channel = '#' + ConfigSectionMap('settings')['channel']
server = ConfigSectionMap('settings')['server']
port = int(ConfigSectionMap('settings')['port'])
password = ConfigSectionMap('settings')['oauth']
# ====== RIOT API PRELIM DATA ======
api_key = ConfigSectionMap('settings')['api']
# Riot API Information
summonerName = ConfigSectionMap('settings')['summonername'].lower()
summonerName = summonerName.replace(" ", "")
region = ConfigSectionMap('settings')['region']
summoner_url = API_ROUTES.summoner_url.format(region=region, summonername=summonerName, key=api_key)
# Initial Data Load // Get Summoner ID and Level
summonerName_dict = requests.get(summoner_url).json()
summonerID = str(summonerName_dict[summonerName]['id'])
summonerLevel = str(summonerName_dict[summonerName]['summonerLevel'])
# ====== RIOT API FUNCTIONS ======
def about(ircname):
return 'Hello ' + ircname + '! I am a League of Legends statistics chat bot. My creator is blenderben [ https://github.com/blenderben/LoLStatBot ].'\
+ ' I am currently assigned to summoner ' + summonerName.upper() + ' [ID:' + getSummonerID() + '].'
def getCommands():
return 'Available commands: ['\
+ ' !about, !summoner, !league, !last, !current, !runes, !mastery, !opgg, !lolnexus, !lolking, !lolskill ]'
def getSummonerInfo():
return summonerName.upper() + ' is summoner level ' + getSummonerLevel() + ', playing in Region: ' + region.upper() + ' // ' + opgg('')
def opgg(details):
if details == 'runes':
return API_ROUTES.opgg_runes_url.format(region=region, summonername=summonerName)
elif details == 'masteries':
return API_ROUTES.opgg_masteries_url.format(region=region, summonername=summonerName)
elif details == 'matches':
return API_ROUTES.opgg_matches_url.format(region=region, summonername=summonerName)
elif details == 'leagues':
return API_ROUTES.opgg_leagues_url.format(region=region, summonername=summonerName)
elif details == 'champions':
return API_ROUTES.opgg_champions_url.format(region=region, summonername=summonerName)
else:
return API_ROUTES.opgg_url.format(region=region, summonername=summonerName)
def lolnexus():
return API_ROUTES.lolnexus_url.format(region=region, summonername=summonerName)
def lolking(details):
if details == 'runes':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#runes'
elif details == 'masteries':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#masteries'
elif details == 'matches':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#matches'
elif details == 'rankedstats':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#ranked-stats'
elif details == 'leagues':
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#leagues'
else:
return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID)
def lolskill(details):
if details == 'runes':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/runes'
elif details == 'masteries':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/masteries'
elif details == 'matches':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/matches'
elif details == 'stats':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/stats'
elif details == 'champions':
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/champions'
else:
return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName)
def getTeamColor(teamid):
if teamid == 100:
return 'Blue Team'
elif teamid == 200:
return 'Purple Team'
else:
return 'No Team'
def getWinLoss(win):
if win == True:
return 'WON'
elif win == False:
return 'LOST'
else:
return 'TIED'
def getTimePlayed(time):
if time > 3600:
hours = time / 3600
minutes = time % 3600 / 60
seconds = time % 3600 % 60
if hours > 1:
return str(hours) + ' hours & ' + str(minutes) + ' minutes & ' + str(seconds) + ' seconds'
else:
return str(hours) + ' hour & ' + str(minutes) + ' minutes & ' + str(seconds) + ' seconds'
elif time > 60:
minutes = time / 60
seconds = time % 60
return str(minutes) + ' minutes & ' + str(seconds) + ' seconds'
else:
return str(time) + ' seconds'
def getKDA(kills, deaths, assists):
if deaths < 1:
return 'PERFECT'
else:
kda = float(kills) + float(assists) / (float(deaths))
kda = round(kda, 2)
return str(kda) + ':1'
def getChampionbyID(championid):
tempDict = requests.get(API_ROUTES.championstaticdata_url.format(region=region, championid=int(championid), key=api_key)).json()
name = tempDict['name'] + " " + tempDict['title']
return name
def getSpellbyID(spellid):
tempDict = requests.get(API_ROUTES.spellstaticdata_url.format(region=region, spellid=int(spellid), key=api_key)).json()
spellName = tempDict['name']
return spellName
# Refresh / Get Summoner ID
def getSummonerID():
global summonerID
try:
tempDict = requests.get(summoner_url).json()
summonerID = str(tempDict[summonerName]['id'])
return summonerID
except:
print 'Riot API Down'
return 1
# Refresh / Get Summoner Level
def getSummonerLevel():
global summonerLevel
tempDict = requests.get(summoner_url).json()
summonerLevel = str(tempDict[summonerName]['summonerLevel'])
return summonerLevel
def getWinRatio(win, loss):
total = float(win) + float(loss)
ratio = win / total
ratioPercent = round(ratio * 100, 1)
return str(ratioPercent) + '%'
def getStats():
# Function to eventually get statistics, avg kills, etc, for now, output Stats page from Lolskill
return lolskill('stats')
def getSummonerMastery():
tempDict = requests.get(API_ROUTES.summonermastery_url.format(region=region, summonerid=summonerID, key=api_key)).json()
i = 0
masteryIDList = []
masteryRank = []
for pages in tempDict[summonerID]['pages']:
if bool(pages.get('current')) == True:
pageName = tempDict[summonerID]['pages'][i]['name']
for mastery in tempDict[summonerID]['pages'][i]['masteries']:
masteryIDList.append(mastery.get('id'))
masteryRank.append(mastery.get('rank'))
else:
i += 1
return getCurrentMastery(masteryIDList, masteryRank) + ' // Mastery Name: ' + pageName
def getLeagueInfo():
try:
tempDict = requests.get(API_ROUTES.summonerleague_url.format(region=region, summonerid=summonerID, key=api_key)).json()
LEAGUE_TIER = string.capwords(tempDict[summonerID][0]['tier'])
LEAGUE_QUEUE = tempDict[summonerID][0]['queue'].replace('_', ' ')
LEAGUE_DIVISION = tempDict[summonerID][0]['entries'][0]['division']
LEAGUE_WINS = tempDict[summonerID][0]['entries'][0]['wins']
LEAGUE_LOSSES = tempDict[summonerID][0]['entries'][0]['losses']
LEAGUE_POINTS = tempDict[summonerID][0]['entries'][0]['leaguePoints']
# LEAGUE_ISVETERAN = tempDict[summonerID][0]['entries'][0]['isHotStreak']
# LEAGUE_ISHOTSTREAK = tempDict[summonerID][0]['entries'][0]['isVeteran']
# LEAGUE_ISFRESHBLOOD = tempDict[summonerID][0]['entries'][0]['isFreshBlood']
# LEAGUE_ISINACTIVE = tempDict[summonerID][0]['entries'][0]['isInactive']
return summonerName.upper() + ' is ' + LEAGUE_TIER + ' ' + LEAGUE_DIVISION + ' in ' + LEAGUE_QUEUE\
+ ' // ' + str(LEAGUE_WINS) + 'W / ' + str(LEAGUE_LOSSES) + 'L (Win Ratio ' + getWinRatio(LEAGUE_WINS, LEAGUE_LOSSES) + ')'\
+ ' // LP: ' + str(LEAGUE_POINTS)\
+ ' // ' + lolking('leagues')
except:
return 'Summoner ' + summonerName.upper() + ' has not played any Ranked Solo 5x5 matches'\
+ ' // ' + lolking('leagues')
# Get Current Match Stats
def getCurrent(details):
try:
current_api_url = API_ROUTES.current_url.format(region=region, region_upper=region.upper(), summonerid=summonerID, key=api_key)
tempDict = requests.get(current_api_url).json()
CURRENT_GAMEMODE = tempDict['gameMode']
CURRENT_GAMELENGTH = tempDict['gameLength']
CURRENT_GAMETYPE = tempDict['gameType'].replace('_', ' ')
CURRENT_TIME = calendar.timegm(time.gmtime())
CURRENT_EPOCHTIME = tempDict['gameStartTime'] / 1000
if CURRENT_EPOCHTIME <= 0:
CURRENT_TIMEDIFF = 0
else:
CURRENT_TIMEDIFF = CURRENT_TIME - CURRENT_EPOCHTIME
if CURRENT_TIMEDIFF < 0:
CURRENT_TIMEDIFF = 0
runeIDList = []
runeCount = []
masteryIDList = []
masteryRank = []
i = 0
for participant in tempDict['participants']:
if int(summonerID) == int(participant.get('summonerId')):
CURRENT_TEAM = participant.get('teamId')
CURRENT_CHAMPION = participant.get('championId')
CURRENT_SPELL1 = participant.get('spell1Id')
CURRENT_SPELL2 = participant.get('spell2Id')
for rune in tempDict['participants'][i]['runes']:
runeIDList.append(rune.get('runeId'))
runeCount.append(rune.get('count'))
for mastery in tempDict['participants'][i]['masteries']:
masteryIDList.append(mastery.get('masteryId'))
masteryRank.append(mastery.get('rank'))
else:
i += 1
runeCountOutput = ''
runeBonusOutput = ''
for x in range(len(runeIDList)):
runeCountOutput += ' [' + getCurrentRuneTotal(runeIDList[x], runeCount[x]) + '] '
runeBonusOutput += ' [' + getCurrentRuneBonusTotal(runeIDList[x], runeCount[x]) + '] '
masteryOutput = getCurrentMastery(masteryIDList, masteryRank)
if details == 'runes':
return 'Current Runes: ' + runeCountOutput\
+ ' // Rune Bonuses: ' + runeBonusOutput\
+ ' // ' + lolskill('runes')
elif details == 'masteries':
return 'Current Mastery Distribution: ' + masteryOutput\
+ ' // ' + lolskill('masteries')
else:
return summonerName.upper()\
+ ' is currently playing ' + CURRENT_GAMEMODE + ' ' + CURRENT_GAMETYPE\
+ ' with ' + getChampionbyID(CURRENT_CHAMPION)\
+ ' on the ' + getTeamColor(CURRENT_TEAM)\
+ ' // Elapsed Time: ' + getTimePlayed(CURRENT_TIMEDIFF)\
+ ' // Spells Chosen: ' + getSpellbyID(CURRENT_SPELL1) + ' & ' + getSpellbyID(CURRENT_SPELL2)\
+ ' // Mastery Distribution: ' + masteryOutput\
+ ' // Rune Bonuses: ' + runeBonusOutput\
+ ' // ' + lolnexus()
except:
if details == 'runes':
return 'Summoner ' + summonerName.upper() + ' needs to currently be in a game for current Rune data to display'\
+ ' // ' + lolking('runes')
elif details == 'masteries':
return 'Current Mastery Distribution: ' + getSummonerMastery() + ' // ' + lolskill('masteries')
else:
return 'The summoner ' + summonerName.upper() + ' is not currently in a game.'
def getCurrentMastery(masteryidlist, masteryrank):
offense = 0
defense = 0
utility = 0
for x in range(len(masteryidlist)):
masteryID = masteryidlist[x]
tempDict = requests.get(API_ROUTES.masterystaticdata_url.format(region=region, masteryid=masteryID, key=api_key)).json()
masteryTree = tempDict['masteryTree']
ranks = int(masteryrank[x])
if masteryTree == 'Offense':
offense += ranks
elif masteryTree == 'Defense':
defense += ranks
else:
utility += ranks
return '(' + str(offense) + '/' + str(defense) + '/' + str(utility) + ')'
def getCurrentRuneTotal(runeid, count):
tempDict = requests.get(API_ROUTES.runestaticdata_url.format(region=region, runeid=runeid, key=api_key)).json()
runeName = tempDict['name']
return str(count) + 'x ' + runeName
def getCurrentRuneBonusTotal(runeid, count):
tempDict = requests.get(API_ROUTES.runestaticdata_url.format(region=region, runeid=runeid, key=api_key)).json()
runeBonus = tempDict['description']
try:
runeBonus.split('/')[1]
except IndexError:
# Single Bonus
value = runeBonus.split()[0]
value = value.replace('+', '').replace('%', '').replace('-', '')
valueCount = float(value) * float(count)
valueCount = round(valueCount, 2)
description = tempDict['description'].split(' (', 1)[0]
description = string.capwords(description)
description = description.replace(value, str(valueCount))
return description
else:
# Hybrid Bonus
value = runeBonus.split()[0]
value = value.replace('+', '').replace('%', '').replace('-', '')
valueCount = float(value) * float(count)
valueCount = round(valueCount, 2)
firstDescription = runeBonus.split('/')[0].strip()
firstDescription = firstDescription.split(' (', 1)[0]
firstDescription = string.capwords(firstDescription)
firstDescription = firstDescription.replace(value, str(valueCount))
value = runeBonus.split('/')[1].strip()
if value.split()[1] == 'sec.':
return firstDescription + ' / 5 Sec.'
else:
value = value.split()[0]
value = value.replace('+', '').replace('%', '').replace('-', '')
valueCount = float(value) * float(count)
valueCount = round(valueCount, 2)
secondDescription = runeBonus.split('/')[1].strip()
secondDescription = secondDescription.split(' (', 1)[0]
secondDescription = string.capwords(secondDescription)
secondDescription = secondDescription.replace(value, str(valueCount))
return firstDescription + ' / ' + secondDescription
# Get Last Match Stats
def getLast():
tempDict = requests.get(API_ROUTES.last_url.format(region=region, summonerid=summonerID, key=api_key)).json()
LAST_GAMEID = tempDict['games'][0]['gameId']
# LAST_GAMEMODE = tempDict['games'][0]['gameMode']
LAST_SUBTYPE = tempDict['games'][0]['subType'].replace('_', ' ')
LAST_GAMETYPE = tempDict['games'][0]['gameType'].replace('_GAME', '')
LAST_TIMEPLAYED = tempDict['games'][0]['stats']['timePlayed']
LAST_WIN = tempDict['games'][0]['stats']['win']
LAST_GOLDSPENT = tempDict['games'][0]['stats']['goldSpent']
LAST_GOLDEARNED = tempDict['games'][0]['stats']['goldEarned']
LAST_CHAMPION_ID = str(tempDict['games'][0]['championId'])
LAST_IPEARNED = str(tempDict['games'][0]['ipEarned'])
LAST_LEVEL = str(tempDict['games'][0]['stats']['level'])
LAST_SPELL1 = tempDict['games'][0]['spell1']
LAST_SPELL2 = tempDict['games'][0]['spell2']
LAST_CHAMPIONSKILLED = str(tempDict['games'][0]['stats'].get('championsKilled', 0))
LAST_NUMDEATHS = str(tempDict['games'][0]['stats'].get('numDeaths' , 0))
LAST_ASSISTS = str(tempDict['games'][0]['stats'].get('assists', 0))
LAST_TOTALDAMAGECHAMPIONS = str(tempDict['games'][0]['stats']['totalDamageDealtToChampions'])
LAST_MINIONSKILLED = str(tempDict['games'][0]['stats']['minionsKilled'])
LAST_WARDSPLACED = str(tempDict['games'][0]['stats'].get('wardPlaced', 0))
output = summonerName.upper() + ' ' + getWinLoss(LAST_WIN)\
+ ' the last ' + LAST_GAMETYPE + ' ' + LAST_SUBTYPE\
+ ' GAME using ' + getChampionbyID(LAST_CHAMPION_ID)\
+ ' // The game took ' + getTimePlayed(LAST_TIMEPLAYED)\
+ ' // ' + getKDA(LAST_CHAMPIONSKILLED, LAST_NUMDEATHS, LAST_ASSISTS) + ' KDA (' + LAST_CHAMPIONSKILLED + '/' + LAST_NUMDEATHS + '/' + LAST_ASSISTS + ')'\
+ ' // ' + getSpellbyID(LAST_SPELL1) + ' & ' + getSpellbyID(LAST_SPELL2) + ' spells were chosen'\
+ ' // ' + LAST_TOTALDAMAGECHAMPIONS + ' damage was dealt to champions'\
+ ' // ' + LAST_MINIONSKILLED + ' minions were killed'\
+ ' // ' + LAST_WARDSPLACED + ' wards were placed'\
+ ' // Spent ' + str(round(float(LAST_GOLDSPENT) / float(LAST_GOLDEARNED)*100, 1)) + '% of Gold earned [' + str(LAST_GOLDSPENT) + '/' + str(LAST_GOLDEARNED) + ']'\
+ ' // ' + LAST_IPEARNED + ' IP was earned'
# add Official League Match history here
return output
# ====== IRC FUNCTIONS ======
# Extract Nickname
def getNick(data):
nick = data.split('!')[0]
nick = nick.replace(':', ' ')
nick = nick.replace(' ', '')
nick = nick.strip(' \t\n\r')
return nick
def getMessage(data):
if data.find('PRIVMSG'):
try:
message = data.split(channel, 1)[1][2:]
return message
except IndexError:
return 'Index Error'
except:
return 'No message'
else:
return 'Not a message'
# ====== TIMER FUNCTIONS ======
def printit():
threading.Timer(60.0, printit).start()
print "Hello World"
# ===============================
# queue = 13 #sets variable for anti-spam queue functionality
# Connect to server
print '\nConnecting to: ' + server + ' over port ' + str(port)
irc = socket.socket()
irc.connect((server, port))
# Send variables for connection to Twitch chat
irc.send('PASS ' + password + '\r\n')
irc.send('USER ' + nick + ' 0 * :' + botOwner + '\r\n')
irc.send('NICK ' + nick + '\r\n')
irc.send('JOIN ' + channel + '\r\n')
printit()
# Main Program Loop
while True:
ircdata = irc.recv(4096) # gets output from IRC server
ircuser = ircdata.split(':')[1]
ircuser = ircuser.split('!')[0] # determines the sender of the messages
# Check messages for any banned words against banned.txt list
f = open(os.path.dirname(os.path.abspath(__file__)) + '/banned.txt', 'r')
banned = f.readlines()
message = getMessage(ircdata).lower().strip(' \t\n\r')
for i in range(len(banned)):
if message.find(banned[i].strip(' \t\n\r')) != -1:
irc.send('PRIVMSG ' + channel + ' :' + getNick(ircdata) + ', banned words are not allowed. A timeout has been issued.' + '\r\n')
# irc.send('PRIVMSG ' + channel + ' :\/timeout ' + getNick(ircdata) + ' 5\r\n')
break
else:
pass
print 'DEBUG: ' + ircdata.strip(' \t\n\r')
print 'USER: ' + getNick(ircdata).strip(' \t\n\r')
print 'MESSAGE: ' + getMessage(ircdata).strip(' \t\n\r')
print '======================='
# About
if ircdata.find(':!about') != -1:
irc.send('PRIVMSG ' + channel + ' :' + about(getNick(ircdata)) + '\r\n')
# Commands
if ircdata.find(':!commands') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getCommands() + '\r\n')
# Last
if ircdata.find(':!last') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getLast() + '\r\n')
# Current
if ircdata.find(':!current') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getCurrent('games') + '\r\n')
# Current Runes
if ircdata.find(':!runes') != -1 or ircdata.find(':!rune') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getCurrent('runes') + '\r\n')
# Current Mastery
if ircdata.find(':!mastery') != -1 or ircdata.find(':!masteries') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getCurrent('masteries') + '\r\n')
# Basic Summoner Data
if ircdata.find(':!summoner') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getSummonerInfo() + '\r\n')
# Seaonal League Rank Data
if ircdata.find(':!league') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getLeagueInfo() + '\r\n')
# Stats
if ircdata.find(':!stats') != -1:
irc.send('PRIVMSG ' + channel + ' :' + getStats() + '\r\n')
# Return op.gg
if ircdata.find(':!opgg') != -1:
irc.send('PRIVMSG ' + channel + ' :' + opgg('') + '\r\n')
# Return lolnexus
if ircdata.find(':!lolnexus') != -1:
irc.send('PRIVMSG ' + channel + ' :' + lolnexus() + '\r\n')
# Return lolking
if ircdata.find(':!lolking') != -1:
irc.send('PRIVMSG ' + channel + ' :' + lolking('') + '\r\n')
# Return lolskill
if ircdata.find(':!lolskill') != -1:
irc.send('PRIVMSG ' + channel + ' :' + lolskill('') + '\r\n')
# Keep Alive
if ircdata.find('PING') != -1:
irc.send('PONG ' + ircdata.split()[1] + '\r\n')
|
py
|
1a5cad72db5be236e14f248de15aee150aafda13
|
'''Base strategy for refetching urls'''
import time
from core.metadata import Source
from schedulers.base_scheduler import BaseRefetchingStrategy
from utils.helpers import TWO_HOURS
class NewsRefetchingStrategy(BaseRefetchingStrategy):
'''
Strategy optimized for news
News usually doesn't change by time. But they can be
corrected after publishing.
The idea is to refetch them after two hours for fresh news
and waiting an exponential time after that. If the page doesn't change
after 4 fetching is removed from the refetching list.
'''
def __init__(self, start_delay, refetching_delay):
self.start_delay = start_delay
self.refetching_delay = refetching_delay
# this strategy refetch after two hours and after
# exponentially
self.refetching_vector = [
TWO_HOURS,
refetching_delay,
refetching_delay * 2,
refetching_delay * 4,
]
# we will remove None cases from refetching list
self.increase_delay = {refetching_delay * 4: None}
# on refetching we do not refetch more frequently than refetching_delay
self.decrease_delay = {refetching_delay: refetching_delay}
for i, v in enumerate(self.refetching_vector):
if i < len(self.refetching_vector)-1:
self.increase_delay[v] = self.refetching_vector[i+1]
if i > 1:
self.decrease_delay[v] = self.refetching_vector[i-1]
def compute(self, doc_meta, is_new, is_changed):
"""
Heuristic to determine next refetch
Return a couple:
expire -- the next date we want to refetch(epoch)
next_delay -- the current delay used
"""
next_delay = 0
if doc_meta.source == Source.priority:
if self.start_delay:
next_delay = self.start_delay
else:
# start_delay is not set. removing from priority
# it means we do not want to periodically refetch
# some specific urls
doc_meta.souce = Source.refetch
next_delay = self.refetching_delay
elif is_new or not doc_meta.delay:
# TODO: it is possible to do this only for new news
# but it requires to parse the page to check published_time
# url is not a priority one AND
# url is new or without previous delay -> init delay
next_delay = self.refetching_vector[0]
elif not is_changed or (doc_meta.response and
doc_meta.response.status_code != 200):
# url is not changed or nort 200 status -> doubling the delay
next_delay = self.increase_delay.get(doc_meta.delay, self.refetching_vector[1])
else:
next_delay = self.decrease_delay.get(doc_meta.delay, self.refetching_vector[2])
assert(next_delay > 0 or next_delay is None)
if not next_delay:
return None, None
expire = int(time.time()) + next_delay
return expire, next_delay
|
py
|
1a5caddc6435aeb32dc008367f83e4380fc7b559
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
from llnl.util.tty.color import colorize
description = "get help on spack and its commands"
section = "help"
level = "short"
#
# These are longer guides on particular aspects of Spack. Currently there
# is only one on spec syntax.
#
spec_guide = """\
spec expression syntax:
package [constraints] [^dependency [constraints] ...]
package any package from 'spack list', or
@K{/hash} unique prefix or full hash of
installed package
constraints:
versions:
@c{@version} single version
@c{@min:max} version range (inclusive)
@c{@min:} version <min> or higher
@c{@:max} up to version <max> (inclusive)
compilers:
@g{%compiler} build with <compiler>
@g{%compiler@version} build with specific compiler version
@g{%compiler@min:max} specific version range (see above)
compiler flags:
@g{cflags="flags"} cppflags, cflags, cxxflags,
fflags, ldflags, ldlibs
variants:
@B{+variant} enable <variant>
@r{-variant} or @r{~variant} disable <variant>
@B{variant=value} set non-boolean <variant> to <value>
@B{variant=value1,value2,value3} set multi-value <variant> values
architecture variants:
@m{platform=platform} linux, darwin, cray, bgq, etc.
@m{os=operating_system} specific <operating_system>
@m{target=target} specific <target> processor
@m{arch=platform-os-target} shortcut for all three above
cross-compiling:
@m{os=backend} or @m{os=be} build for compute node (backend)
@m{os=frontend} or @m{os=fe} build for login node (frontend)
dependencies:
^dependency [constraints] specify constraints on dependencies
^@K{/hash} build with a specific installed
dependency
examples:
hdf5 any hdf5 configuration
hdf5 @c{@1.10.1} hdf5 version 1.10.1
hdf5 @c{@1.8:} hdf5 1.8 or higher
hdf5 @c{@1.8:} @g{%gcc} hdf5 1.8 or higher built with gcc
hdf5 @B{+mpi} hdf5 with mpi enabled
hdf5 @r{~mpi} hdf5 with mpi disabled
hdf5 @B{+mpi} ^mpich hdf5 with mpi, using mpich
hdf5 @B{+mpi} ^openmpi@c{@1.7} hdf5 wtih mpi, using openmpi 1.7
boxlib @B{dim=2} boxlib built for 2 dimensions
libdwarf @g{%intel} ^libelf@g{%gcc}
libdwarf, built with intel compiler, linked to libelf built with gcc
mvapich2 @g{%pgi} @B{fabrics=psm,mrail,sock}
mvapich2, built with pgi compiler, with support for multiple fabrics
"""
guides = {
'spec': spec_guide,
}
def setup_parser(subparser):
help_cmd_group = subparser.add_mutually_exclusive_group()
help_cmd_group.add_argument('help_command', nargs='?', default=None,
help='command to get help on')
help_all_group = subparser.add_mutually_exclusive_group()
help_all_group.add_argument(
'-a', '--all', action='store_const', const='long', default='short',
help='list all available commands and options')
help_spec_group = subparser.add_mutually_exclusive_group()
help_spec_group.add_argument(
'--spec', action='store_const', dest='guide', const='spec',
default=None, help='help on the package specification syntax')
def help(parser, args):
if args.guide:
print(colorize(guides[args.guide]))
return 0
if args.help_command:
parser.add_command(args.help_command)
parser.parse_args([args.help_command, '-h'])
else:
sys.stdout.write(parser.format_help(level=args.all))
|
py
|
1a5cae6346205cb0b7f425c166f9c757868730ef
|
"""Collection of template processor."""
from typing import Any, Dict
from django.conf import settings
from rest_framework.request import Request
def from_settings(request: Request) -> Dict[str, Any]:
"""Custom template processor to show current env."""
return {
"ENVIRONMENT_NAME": settings.ENVIRONMENT_NAME,
"ENVIRONMENT_COLOR": settings.ENVIRONMENT_COLOR,
}
|
py
|
1a5cb0f4b72fc8e2f2e2129763185ca779034c9e
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for evaluating various tasks."""
import codecs
import tensorflow.compat.v1 as tf
from airdialogue.evaluator.metrics import bleu
from airdialogue.evaluator.metrics import rouge
from airdialogue.evaluator.metrics import kl
ROLE_TOKENS = ["<t1>", "<t2>"]
def evaluate(ref_file, trans_file, metric):
"""Pick a metric and evaluate depending on task."""
if ":" in metric:
metric, mode = metric.split(":")
else:
mode = "brief"
assert mode in ["brief", "all"]
# BLEU scores for translation task
if metric.lower() == "bleu":
evaluation_score = _bleu(
ref_file, trans_file, mode=mode)
# ROUGE scores for summarization tasks
elif metric.lower() == "rouge":
evaluation_score = _rouge(
ref_file, trans_file, mode=mode)
# kl scores for evaluating the ngram kl distribution of the whole corpus
elif metric.lower() == "kl":
evaluation_score = _kl(
ref_file, trans_file, mode=mode)
elif metric.lower() == "accuracy":
evaluation_score = _accuracy(ref_file, trans_file)
else:
raise ValueError("Unknown metric %s" % metric)
return evaluation_score
def _kl(ref_file, trans_file, mode="brief"):
"""Compute KL divergence and handling BPE."""
max_order = 4
ref_files = [ref_file]
reference_text = []
role_tokens = []
for reference_filename in ref_files:
with codecs.getreader("utf-8")(tf.gfile.GFile(reference_filename,
"rb")) as fh:
for line in fh:
reference, role = process_dialogue_infer(
line.rstrip(), get_role_token=True)
reference_text.append(reference.split(" "))
role_tokens.append(role)
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
translations.append(line.rstrip().split(" "))
results = {}
kl_scores = kl.compute_kl(reference_text, translations, max_order)
for key in kl_scores:
results["all-" + key] = kl_scores[key]
if mode == "brief":
return sum(results.values()) / len(results)
for role in ROLE_TOKENS:
_sub_ref_texts = []
_sub_trans = []
for _r, _t, _role in zip(reference_text, translations, role_tokens):
if _role == role:
_sub_ref_texts.append(_r)
_sub_trans.append(_t)
kl_scores = kl.compute_kl(_sub_ref_texts, _sub_trans, max_order)
for key in kl_scores:
results[role + "-" + key] = kl_scores[key]
return results
def _bleu(ref_file, trans_file, mode="brief"):
"""Compute BLEU scores and handling BPE."""
max_order = 4
smooth = False
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with codecs.getreader("utf-8")(tf.gfile.GFile(reference_filename,
"rb")) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
role_tokens = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference, role = process_dialogue_infer(
reference.rstrip(), get_role_token=True)
reference_list.append(reference.split(" "))
per_segment_references.append(reference_list)
role_tokens.append(role)
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
translations.append(line.rstrip().split(" "))
results = {}
bleu_score, _, _, _, _, _ = bleu.compute_bleu(per_segment_references,
translations, max_order, smooth)
results["all"] = 100 * bleu_score
if mode == "brief":
return results["all"]
for role in ROLE_TOKENS:
_sub_ref_texts = []
_sub_trans = []
for _r, _t, _role in zip(per_segment_references, translations, role_tokens):
if _role == role:
_sub_ref_texts.append(_r)
_sub_trans.append(_t)
bleu_score, _, _, _, _, _ = bleu.compute_bleu(_sub_ref_texts, _sub_trans,
max_order, smooth)
results[role] = 100 * bleu_score
return results
def _rouge(ref_file, summarization_file, mode="brief"):
"""Compute ROUGE scores and handling BPE."""
results = {}
references = []
role_tokens = []
with codecs.getreader("utf-8")(tf.gfile.GFile(ref_file, "rb")) as fh:
for line in fh:
ref, role = process_dialogue_infer(line.rstrip(), get_role_token=True)
references.append(ref)
role_tokens.append(role)
hypotheses = []
with codecs.getreader("utf-8")(tf.gfile.GFile(summarization_file,
"rb")) as fh:
for line in fh:
hypotheses.append(line)
rouge_score_map = rouge.rouge(hypotheses, references)
results["all"] = 100 * rouge_score_map["rouge_l/f_score"]
if mode == "brief":
return results["all"]
for role in ROLE_TOKENS:
_sub_ref_texts = []
_sub_hypos = []
for _r, _t, _role in zip(references, hypotheses, role_tokens):
if _role == role:
_sub_ref_texts.append(_r)
_sub_hypos.append(_t)
rouge_score_map = rouge.rouge(_sub_hypos, _sub_ref_texts)
results[role] = 100 * rouge_score_map["rouge_l/f_score"]
return results
def process_dialogue_infer(file_line, get_role_token=False):
# split the end token (<t1>,<t2>)
_line = file_line.replace(" <eod>", "")
_line = _line.rstrip().split("|")[1].rsplit(" ", 1)
if not get_role_token:
return _line[0]
else:
return _line[0], _line[1]
def _accuracy(label_file, pred_file):
"""Compute accuracy, each line contains a label."""
with codecs.getreader("utf-8")(tf.gfile.GFile(label_file, "rb")) as label_fh:
with codecs.getreader("utf-8")(tf.gfile.GFile(pred_file, "rb")) as pred_fh:
count = 0.0
match = 0.0
for label, pred in zip(label_fh, pred_fh):
label = process_dialogue_infer(label.strip()).strip()
pred = pred.strip()
if label == pred:
match += 1
count += 1
return 100 * match / count
|
py
|
1a5cb156ce5b0addf566ba9cffa355a59db8a808
|
from django import forms
from django.contrib.auth.models import AnonymousUser
from cabot.cabotapp.views import StatusCheckForm
from cabot.metricsapp.api import get_status_check_fields
from cabot.metricsapp.models import GrafanaInstance, GrafanaDataSource
# Model forms for admin site
from cabot.metricsapp.models.grafana import set_grafana_panel_from_session, GrafanaPanel
class GrafanaInstanceAdminForm(forms.ModelForm):
class Meta:
model = GrafanaInstance
exclude = []
class GrafanaDataSourceAdminForm(forms.ModelForm):
class Meta:
model = GrafanaDataSource
exclude = []
# Forms for selecting Grafana instance, dashboard, panel, etc.
class GrafanaInstanceForm(forms.Form):
"""Select a Grafana instance to use for a status check"""
grafana_instance = forms.ModelChoiceField(
queryset=GrafanaInstance.objects.all(),
initial=1,
help_text='Grafana site instance to select a dashboard from.'
)
def __init__(self, *args, **kwargs):
default_grafana_instance = kwargs.pop('default_grafana_instance')
super(GrafanaInstanceForm, self).__init__(*args, **kwargs)
if default_grafana_instance is not None:
self.fields['grafana_instance'].initial = default_grafana_instance
class GrafanaDashboardForm(forms.Form):
"""Select a Grafana dashboard to use for a status check"""
def __init__(self, *args, **kwargs):
dashboards = kwargs.pop('dashboards')
default_dashboard = kwargs.pop('default_dashboard')
super(GrafanaDashboardForm, self).__init__(*args, **kwargs)
self.fields['dashboard'] = forms.ChoiceField(
choices=dashboards,
help_text='Grafana dashboard to use for the check.'
)
if default_dashboard is not None:
self.fields['dashboard'].initial = default_dashboard
class GrafanaPanelForm(forms.Form):
"""Select a Grafana panel to use for a status check"""
def __init__(self, *args, **kwargs):
panels = kwargs.pop('panels')
default_panel_id = kwargs.pop('default_panel_id')
super(GrafanaPanelForm, self).__init__(*args, **kwargs)
self.fields['panel'] = forms.ChoiceField(
choices=panels,
help_text='Grafana panel to use for the check.'
)
if default_panel_id is not None:
for panel in panels:
panel_data = panel[0]
if panel_data['panel_id'] == default_panel_id:
self.fields['panel'].initial = panel_data
break
def clean_panel(self):
"""Make sure the data source for the panel is supported"""
panel = eval(self.cleaned_data['panel'])
datasource = panel['datasource']
grafana_instance_id = panel['grafana_instance_id']
try:
GrafanaDataSource.objects.get(grafana_source_name=datasource,
grafana_instance_id=grafana_instance_id)
except GrafanaDataSource.DoesNotExist:
raise forms.ValidationError('No matching data source for {}.'.format(datasource))
return panel
class GrafanaSeriesForm(forms.Form):
"""Select the series to use for a status check"""
def __init__(self, *args, **kwargs):
series = kwargs.pop('series')
default_series = kwargs.pop('default_series')
super(GrafanaSeriesForm, self).__init__(*args, **kwargs)
self.fields['series'] = forms.MultipleChoiceField(
choices=series,
widget=forms.CheckboxSelectMultiple,
help_text='Data series to use in the check.'
)
if default_series is not None:
self.fields['series'].initial = default_series
def clean_series(self):
"""Make sure at least one series is selected."""
series = self.cleaned_data.get('series')
if not series:
raise forms.ValidationError('At least one series must be selected.')
return series
class GrafanaStatusCheckForm(StatusCheckForm):
"""Generic form for creating a status check. Other metrics sources will subclass this."""
_autofilled_fields = ('time_range', 'check_type', 'warning_value', 'high_alert_value', 'source')
_disabled_fields = ('source',)
def __init__(self, grafana_session_data=None, user=None, initial=None, *args, **kwargs):
self.grafana_panel = ((initial and initial['grafana_panel'])
or (kwargs.get('instance') and kwargs['instance'].grafana_panel)
or GrafanaPanel())
if grafana_session_data:
dashboard_info = grafana_session_data['dashboard_info']
panel_info = grafana_session_data['panel_info']
templating_dict = grafana_session_data['templating_dict']
instance_id = grafana_session_data['instance_id']
grafana_data_source = GrafanaDataSource.objects.get(
grafana_source_name=grafana_session_data['datasource'],
grafana_instance_id=instance_id
)
# we will reuse the PK of instance.grafana_panel if there's one set, changes are manually saved in save()
set_grafana_panel_from_session(self.grafana_panel, grafana_session_data)
grafana_fields = get_status_check_fields(dashboard_info, panel_info, grafana_data_source,
templating_dict, self.grafana_panel, user)
# MetricsSourceBase overrides __unicode__ to return its name, but we need it to serialize to
# its pk so ModelChoiceForm can handle it right
grafana_fields['source'] = grafana_fields['source'].pk
# apply initial on top of get_status_check_fields() to allow overriding
if initial:
grafana_fields.update(initial)
initial = grafana_fields
super(GrafanaStatusCheckForm, self).__init__(*args, initial=initial, **kwargs)
self.fields['name'].widget = forms.TextInput(attrs=dict(style='width:50%'))
self.fields['name'].help_text = None
for field_name in self._autofilled_fields:
self.fields[field_name].help_text += ' Autofilled from the Grafana dashboard.'
for field_name in self._disabled_fields:
self.fields[field_name].disabled = True
self.user = user # used in save(), ignored if None
def save(self, commit=True):
model = super(GrafanaStatusCheckForm, self).save(commit=False)
# the grafana panel may have been created or updated, so also save that
if self.grafana_panel:
self.grafana_panel.save()
model.grafana_panel = self.grafana_panel
if self.user and not isinstance(self.user, AnonymousUser):
model.created_by = self.user
# When commit is False, we just get the model, but the service/instance sets aren't saved
# (since the model doesn't have a pk yet). Re-run to actually save the service and instance sets
model = super(GrafanaStatusCheckForm, self).save()
return model
|
py
|
1a5cb35a3fc1e91d0153ced78a7c20d4d18d1db8
|
import sys
from flask import Flask, render_template, jsonify, redirect
import pymongo
import scrape_mars
sys.setrecursionlimit(2000)
app = Flask(__name__)
client = pymongo.MongoClient()
db = client.mars_db
collection = db.mars_facts
@app.route('/scrape')
def scrape():
mars = scrape_mars.scrape()
db.mars_facts.insert_one(mars)
@app.route("/")
def home():
mars = list(db.mars_facts.find())
print(mars)
return render_template("index.html", mars = mars)
if __name__ == "__main__":
app.run(debug=True)
|
py
|
1a5cb3915d411a72bfd0697ac4b0d19b8dceaae7
|
import codecs
import sys
import setuptools
def read_requirements_file(req_name):
requirements = []
try:
with codecs.open(req_name, encoding='utf-8') as req_file:
for req_line in req_file:
if '#' in req_line:
req_line = req_line[0:req_line.find('#')].strip()
if req_line:
requirements.append(req_line.strip())
except IOError:
pass
return requirements
install_requires = read_requirements_file('requirements.txt')
setup_requires = read_requirements_file('setup-requirements.txt')
tests_require = read_requirements_file('test-requirements.txt')
if sys.version_info < (2, 7):
tests_require.append('unittest2')
if sys.version_info < (3, 0):
tests_require.append('mock')
setuptools.setup(
name='sprockets.mixins.redis',
version='0.0.0',
description='Tornado handler mixin to provide easy read/write access to Redis',
long_description=codecs.open('README.rst', encoding='utf-8').read(),
url='https://github.com/sprockets/sprockets.mixins.redis.git',
author='AWeber Communications',
author_email='[email protected]',
license=codecs.open('LICENSE', encoding='utf-8').read(),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=['sprockets',
'sprockets.mixins',
'sprockets.mixins.redis'],
package_data={'': ['LICENSE', 'README.md']},
include_package_data=True,
namespace_packages=['sprockets',
'sprockets.mixins'],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
test_suite='nose.collector',
zip_safe=False)
|
py
|
1a5cb3c10668f5c157a6075b04b00d99c71cf355
|
from collections import OrderedDict
from requests.exceptions import ConnectionError
from .. import exceptions, utility, transports, schema
from . import response, messages
import logging
import yaml
logger = logging.getLogger(__name__)
class CommandHTTPSTransport(transports.BaseTransport):
def __init__(self,
headers = None,
auth = None,
options_callback = None,
message_callback = None,
encryption_key = None
):
super().__init__(
headers = headers,
auth = auth,
encryption_key = encryption_key
)
self._options_callback = options_callback
self._message_callback = message_callback
def transition(self, link, decoders, params = None):
params = self.get_params(link, params)
url = self.get_url(link.url, params.path)
headers = self.get_headers(url, decoders)
headers.update(self._headers)
if link.action == 'get':
# Schema
try:
result = self.request_page(url, headers, params, decoders)
if isinstance(result, schema.Error):
raise exceptions.CommandError(result['detail'])
return result
except ConnectionError as error:
raise exceptions.CommandConnectionError(error)
else:
# Command
if self._options_callback and callable(self._options_callback):
self._options_callback(params.data)
try:
return self.request_stream(url, headers, params, decoders)
except ConnectionError as error:
raise exceptions.CommandConnectionError(error)
def request_stream(self, url, headers, params, decoders):
session = self.init_session()
request = self._build_post_request(session, url, headers, params)
settings = session.merge_environment_settings(
request.url, None, True, False, None
)
logger.debug("Stream {} request headers: {}".format(request.url, request.headers))
request_response = session.send(request, **settings)
command_response = response.CommandResponse()
if request_response.status_code >= 400:
raise exceptions.CommandResponseError(utility.format_response_error(request_response))
try:
for line in request_response.iter_lines():
message = messages.Message.get(
self._decode_message(request_response, line, decoders),
self._cipher.key
)
if self._message_callback and callable(self._message_callback):
self._message_callback(message)
command_response.add(message)
except Exception as error:
logger.debug("Stream {} error response headers: {}".format(request.url, request_response.headers))
logger.debug("Stream {} error response params:\n\n{}".format(request.url, yaml.dump(params.data)))
logger.debug("Stream {} error status code: {}".format(request.url, request_response.status_code))
raise error
return command_response
def _decode_message(self, response, data, decoders):
result = None
if data:
content_type = response.headers.get('content-type')
codec = self._negotiate_decoder(decoders, content_type)
options = {
'base_url': response.url
}
if 'content-type' in response.headers:
options['content_type'] = response.headers['content-type']
if 'content-disposition' in response.headers:
options['content_disposition'] = response.headers['content-disposition']
result = codec.decode(data, **options)
return result
def _decode_result_error(self, result, response):
is_error = response.status_code >= 400 and response.status_code <= 599
if is_error and not isinstance(result, schema.Error):
default_title = "{} {}".format(response.status_code, response.reason)
result = self._coerce_to_error(result, default_title = default_title)
return result
def _coerce_to_error(self, obj, default_title):
if isinstance(obj, schema.Document):
return schema.Error(
title = obj.title or default_title,
content = self._coerce_to_error_content(obj)
)
elif isinstance(obj, dict):
return schema.Error(title = default_title, content = obj)
elif isinstance(obj, list):
return schema.Error(title = default_title, content = { 'messages': obj })
elif obj is None:
return schema.Error(title = default_title)
return schema.Error(title = default_title, content = { 'message': obj })
def _coerce_to_error_content(self, node):
if isinstance(node, (schema.Document, schema.Object)):
return OrderedDict([
(key, self._coerce_to_error_content(value))
for key, value in node.data.items()
])
elif isinstance(node, schema.Array):
return [
self._coerce_to_error_content(item)
for item in node
if not isinstance(item, schema.Link)
]
return node
|
py
|
1a5cb4e17f6d10a3c571f25c4faad19e9f07ba51
|
"""
# Definition for a Node.
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
"""
class Solution:
def postorder(self, root: 'Node') -> List[int]:
if not root:
return []
res = [root.val]
for child in root.children[::-1]:
res.extend(self.postorder(child)[::-1])
return res[::-1]
|
py
|
1a5cb4e7848d604c0f0372877f4243f1ff14f61c
|
import os
import networkx
from parameterized import parameterized
from netdiff import OlsrParser, diff
from netdiff.exceptions import ParserError
from netdiff.tests import TestCase
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
links2 = open('{0}/static/olsr-2-links.json'.format(CURRENT_DIR)).read()
links2_newformat = open(
'{0}/static/olsr-2-links-newformat.json'.format(CURRENT_DIR)
).read()
links2_cost = open(
'{0}/static/olsr-2-links-cost-changed.json'.format(CURRENT_DIR)
).read()
links3 = open('{0}/static/olsr-3-links.json'.format(CURRENT_DIR)).read()
links5 = open('{0}/static/olsr-5-links.json'.format(CURRENT_DIR)).read()
links5_cost = open(
'{0}/static/olsr-5-links-cost-changed.json'.format(CURRENT_DIR)
).read()
def parameterized_test_name_func(testcase_func, param_num, param):
format = 'newformat' if 'version' in param[0][0] else 'oldformat'
return f'{testcase_func.__name__}_{param_num}_{format}'
class TestOlsrParser(TestCase):
@parameterized.expand(
[(links2), (links2_newformat)], name_func=parameterized_test_name_func
)
def test_parse(self, links2):
p = OlsrParser(links2)
self.assertIsInstance(p.graph, networkx.Graph)
# test additional properties in networkx graph
properties = list(p.graph.edges(data=True))[0][2]
self.assertIsInstance(properties['weight'], float)
self.assertIsInstance(properties['link_quality'], float)
self.assertIsInstance(properties['neighbor_link_quality'], float)
# test additional node properties
properties = list(p.graph.nodes(data=True))[0][1]
self.assertIsInstance(properties['local_addresses'], list)
def test_init(self):
p = OlsrParser(links3, version='0.6.3', metric='ETC')
self.assertEqual(p.version, '0.6.3')
self.assertEqual(p.metric, 'ETC')
self.assertEqual(p.revision, None)
p = OlsrParser(links3, version='0.6.3', revision='a', metric='ETC')
self.assertEqual(p.revision, 'a')
def test_parse_exception(self):
with self.assertRaises(ParserError):
OlsrParser('{ "test": "test" }')
def test_parse_exception2(self):
with self.assertRaises(ParserError):
OlsrParser('{ "topology": [{ "a": "a" }], "mid": [] }')
def test_parse_exception_mid(self):
with self.assertRaises(ParserError):
OlsrParser('{ "topology": [], "missing_mid": [] }')
@parameterized.expand(
[(links2), (links2_newformat)], name_func=parameterized_test_name_func
)
def test_json_dict(self, links2):
p = OlsrParser(links2)
data = p.json(dict=True)
self.assertIsInstance(data, dict)
self.assertEqual(data['type'], 'NetworkGraph')
self.assertEqual(data['protocol'], 'OLSR')
self.assertEqual(data['version'], '0.6.6')
self.assertEqual(data['revision'], '5031a799fcbe17f61d57e387bc3806de')
self.assertEqual(data['metric'], 'ETX')
self.assertIsInstance(data['nodes'], list)
self.assertIsInstance(data['links'], list)
self.assertEqual(len(data['nodes']), 3)
self.assertEqual(len(data['links']), 2)
self.assertIsInstance(data['links'][0]['cost'], float)
# test additional link properties
properties = data['links'][0]['properties']
self.assertIsInstance(properties['link_quality'], float)
self.assertIsInstance(properties['neighbor_link_quality'], float)
# test local_addresses
self.assertIsInstance(data['nodes'][0]['local_addresses'], list)
found = False
for node in data['nodes']:
if node['id'] == '10.150.0.2':
self.assertEqual(len(node['local_addresses']), 2)
self.assertEqual(node['local_addresses'][0], '172.16.192.2')
self.assertEqual(node['local_addresses'][1], '192.168.0.2')
found = True
self.assertTrue(found)
@parameterized.expand(
[(links2), (links2_newformat)], name_func=parameterized_test_name_func
)
def test_json_string(self, links2):
p = OlsrParser(links2)
data = p.json()
self.assertIsInstance(data, str)
self.assertIn('NetworkGraph', data)
self.assertIn('protocol', data)
self.assertIn('version', data)
self.assertIn('revision', data)
self.assertIn('metric', data)
self.assertIn('OLSR', data)
self.assertIn('0.6.6', data)
self.assertIn('5031a799fcbe17f61d57e387bc3806de', data)
self.assertIn('ETX', data)
self.assertIn('links', data)
self.assertIn('nodes', data)
@parameterized.expand(
[(links2), (links2_newformat)], name_func=parameterized_test_name_func
)
def test_no_changes(self, links2):
old = OlsrParser(links2)
new = OlsrParser(links2)
result = diff(old, new)
self.assertIsInstance(result, dict)
self.assertIsNone(result['added'])
self.assertIsNone(result['removed'])
self.assertIsNone(result['changed'])
@parameterized.expand(
[(links2), (links2_newformat)], name_func=parameterized_test_name_func
)
def test_added_1_link(self, links2):
old = OlsrParser(links2)
new = OlsrParser(links3)
result = diff(old, new)
self.assertIsNone(result['removed'])
self.assertEqual(len(result['changed']['links']), 1)
link = result['changed']['links'][0]
self.assertEqual(link['source'], '10.150.0.3')
self.assertEqual(link['target'], '10.150.0.2')
self.assertEqual(link['cost'], 27.669921875)
self.assertEqual(link['cost_text'], '')
self.assertEqual(
link['properties'], {'link_quality': 0.195, 'neighbor_link_quality': 0.184}
)
# ensure there are differences
self.assertEqual(len(result['added']['links']), 1)
self.assertEqual(len(result['added']['nodes']), 1)
# ensure correct link added
self.assertIn('10.150.0.5', result['added']['links'][0].values())
self.assertIn('10.150.0.4', result['added']['links'][0].values())
# ensure correct node added
self.assertIn('10.150.0.5', result['added']['nodes'][0].values())
@parameterized.expand(
[(links2), (links2_newformat)], name_func=parameterized_test_name_func
)
def test_added_1_link_sub(self, links2):
old = OlsrParser(links2)
new = OlsrParser(links3)
result = new - old
self.assertIsNone(result['removed'])
# ensure there are differences
self.assertEqual(len(result['added']['links']), 1)
self.assertEqual(len(result['added']['nodes']), 1)
# ensure correct link added
self.assertIn('10.150.0.5', result['added']['links'][0].values())
self.assertIn('10.150.0.4', result['added']['links'][0].values())
# ensure correct node added
self.assertIn('10.150.0.5', result['added']['nodes'][0].values())
@parameterized.expand(
[(links2), (links2_newformat)], name_func=parameterized_test_name_func
)
def test_removed_1_link(self, links2):
old = OlsrParser(links3)
new = OlsrParser(links2)
result = diff(old, new)
self.assertIsNone(result['added'])
self.assertEqual(len(result['changed']['links']), 1)
link = result['changed']['links'][0]
self.assertEqual(link['source'], '10.150.0.2')
self.assertEqual(link['target'], '10.150.0.3')
self.assertEqual(link['cost'], 27.669921875)
self.assertEqual(link['cost_text'], '')
self.assertEqual(
link['properties'], {'link_quality': 0.195, 'neighbor_link_quality': 0.184}
)
self.assertIsInstance(result, dict)
self.assertTrue(type(result['removed']['links']) is list)
# ensure there are differences
self.assertEqual(len(result['removed']['links']), 1)
self.assertEqual(len(result['removed']['nodes']), 1)
# ensure correct link removed
self.assertIn('10.150.0.5', result['removed']['links'][0].values())
self.assertIn('10.150.0.4', result['removed']['links'][0].values())
# ensure correct node removed
self.assertIn('10.150.0.5', result['removed']['nodes'][0].values())
@parameterized.expand(
[(links2), (links2_newformat)], name_func=parameterized_test_name_func
)
def test_changed_links(self, links2):
old = OlsrParser(links2)
new = OlsrParser(links3)
result = diff(old, new)
self.assertEqual(len(result['changed']['links']), 1)
link = result['changed']['links'][0]
self.assertEqual(link['source'], '10.150.0.3')
self.assertEqual(link['target'], '10.150.0.2')
self.assertEqual(link['cost'], 27.669921875)
self.assertEqual(link['cost_text'], '')
self.assertEqual(
link['properties'], {'link_quality': 0.195, 'neighbor_link_quality': 0.184}
)
@parameterized.expand(
[(links2), (links2_newformat)], name_func=parameterized_test_name_func
)
def test_changed_nodes(self, links2):
old = OlsrParser(links2)
new = OlsrParser(links2_cost)
result = diff(old, new)
self.assertIsInstance(result['changed'], dict)
self.assertEqual(len(result['changed']['nodes']), 3)
node = result['changed']['nodes'][0]
self.assertEqual(node['id'], '10.150.0.2')
self.assertEqual(node['label'], '')
self.assertEqual(node['local_addresses'], [])
self.assertEqual(node['properties'], {})
node = result['changed']['nodes'][1]
self.assertEqual(node['id'], '10.150.0.3')
self.assertEqual(node['label'], '')
self.assertEqual(node['local_addresses'], [])
self.assertEqual(node['properties'], {})
node = result['changed']['nodes'][2]
self.assertEqual(node['id'], '10.150.0.4')
self.assertEqual(node['label'], '')
self.assertEqual(node['local_addresses'], [])
self.assertEqual(node['properties'], {})
def test_simple_diff(self):
old = OlsrParser(links3)
new = OlsrParser(links5)
result = diff(old, new)
self.assertIsNone(result['changed'])
# ensure there are differences
self.assertEqual(len(result['added']['links']), 3)
self.assertEqual(len(result['removed']['links']), 1)
self.assertEqual(len(result['added']['nodes']), 2)
self.assertEqual(len(result['removed']['nodes']), 1)
# ensure 3 links added
self._test_expected_links(
graph=result['added'],
expected_links=[
('10.150.0.3', '10.150.0.7'),
('10.150.0.3', '10.150.0.6'),
('10.150.0.7', '10.150.0.6'),
],
)
self._test_expected_links(
graph=result['removed'], expected_links=[('10.150.0.5', '10.150.0.4')]
)
added_nodes = [node['id'] for node in result['added']['nodes']]
self.assertIn('10.150.0.6', added_nodes)
self.assertIn('10.150.0.7', added_nodes)
self.assertIn('10.150.0.5', result['removed']['nodes'][0].values())
@parameterized.expand(
[(links2), (links2_newformat)], name_func=parameterized_test_name_func
)
def test_cost(self, links2):
parser = OlsrParser(links2)
graph = parser.json(dict=True)
a = graph['links'][0]['cost']
b = graph['links'][1]['cost']
self.assertIn(27.669921875, [a, b])
self.assertIn(1.0, [a, b])
def test_diff_format(self):
old = OlsrParser(links3)
new = OlsrParser(links5)
result = diff(old, new)
data = result['added']
self.assertEqual(data['type'], 'NetworkGraph')
self.assertEqual(data['protocol'], 'OLSR')
self.assertEqual(data['version'], '0.6.6')
self.assertEqual(data['revision'], '5031a799fcbe17f61d57e387bc3806de')
self.assertEqual(data['metric'], 'ETX')
self.assertIsInstance(data['nodes'], list)
self.assertIsInstance(data['links'], list)
data = result['removed']
self.assertEqual(data['type'], 'NetworkGraph')
self.assertEqual(data['protocol'], 'OLSR')
self.assertEqual(data['version'], '0.6.6')
self.assertEqual(data['revision'], '5031a799fcbe17f61d57e387bc3806de')
self.assertEqual(data['metric'], 'ETX')
self.assertIsInstance(data['nodes'], list)
self.assertIsInstance(data['links'], list)
@parameterized.expand(
[(links2), (links2_newformat)], name_func=parameterized_test_name_func
)
def test_cost_changes_1(self, links2):
old = OlsrParser(links2)
new = OlsrParser(links2_cost)
result = diff(old, new)
self.assertIsNone(result['added'])
self.assertIsNone(result['removed'])
self.assertIsInstance(result['changed'], dict)
links = result['changed']['links']
self.assertTrue(type(links) is list)
self.assertEqual(len(links), 2)
# ensure results are correct
self.assertTrue(1.302734375 in (links[0]['cost'], links[1]['cost']))
self.assertTrue(1.0234375 in (links[0]['cost'], links[1]['cost']))
def test_cost_changes_2(self):
old = OlsrParser(links5)
new = OlsrParser(links5_cost)
result = diff(old, new)
self.assertIsNone(result['added'])
self.assertIsNone(result['removed'])
self.assertIsInstance(result['changed'], dict)
self.assertEqual(len(result['changed']['nodes']), 0)
links = result['changed']['links']
self.assertEqual(len(links), 4)
costs = [link['cost'] for link in links]
self.assertIn(1.0, costs)
self.assertIn(2.0, costs)
self.assertIn(1.50390625, costs)
self.assertIn(3.515625, costs)
def test_link_with_infinite_cost(self):
p = OlsrParser(
{
"topology": [
{
"lastHopIP": "10.150.0.2",
"destinationIP": "10.150.0.3",
"linkQuality": 0.195,
"neighborLinkQuality": 0.184,
"tcEdgeCost": float('inf'),
"validityTime": 284572,
}
],
"mid": [],
}
)
# ensure link is ignored
self.assertEqual(len(p.graph.edges()), 0)
|
py
|
1a5cb4ee22756c56a8cd88b3047953a29a45e47e
|
from helpers.time_utils import to_minutes
from brownie import *
from config.keeper import keeper_config
from helpers.gas_utils import gas_strategies
from helpers.run_persistent import run_persistent
from rich.console import Console
from scripts.keeper.earn import earn_all
from scripts.systems.badger_system import connect_badger
from tabulate import tabulate
console = Console()
gas_strategies.set_default_for_active_chain()
def main():
badger = connect_badger(load_deployer=True, load_keeper=True)
skip = keeper_config.get_active_chain_skipped_setts("earn")
run_interval = keeper_config.get_active_chain_run_interval("earn")
console.print("=== Earn (Eternal) ===")
console.print("All Setts on chain", badger.getAllSettIds())
console.print("Setts to skip", skip)
console.print("Interval between runs: {} minutes".format(to_minutes(run_interval)))
run_persistent(earn_all, (badger, skip), run_interval=run_interval)
|
py
|
1a5cb666dab02512cfcdf5f0e2c401fe20aea837
|
# Copyright 2022 Layne Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if 1:
x = 1
else:
x = False
res = x
|
py
|
1a5cb7556db824d29fdbbab8f09bdeb04cffd92a
|
"""Save codes into library"""
from typing import List
from d2lbook import notebook
from d2lbook import common
import logging
import os
import copy
import re
import pathlib
import ast
import astor
from yapf.yapflib.yapf_api import FormatCode
import isort
HEADER = '################# WARNING ################\n'
def _write_header(f):
f.write(HEADER)
f.write('# The below part is generated automatically through:\n')
f.write('# d2lbook build lib\n')
f.write('# Don\'t edit it directly\n\n')
def save_tab(notebooks: List[str], lib_fname: str, tab: str, default_tab: str):
logging.info(
f'Matching with the pattern: "#@save", seaching for tab {tab}')
custom_header = []
if os.path.exists(lib_fname):
with open(lib_fname, 'r') as f:
lines = f.readlines()
for i, l in enumerate(lines):
if l.strip() == HEADER.strip():
custom_header = lines[:i]
break
with open(lib_fname, 'w') as f:
if custom_header:
f.write(''.join(custom_header))
_write_header(f)
saved = []
for nb in notebooks:
saved.extend(_save_code(nb, tab=tab, default_tab=default_tab))
f.write(_refactor_blocks(saved))
logging.info('Saved %d blocks into %s', len(saved), lib_fname)
def save_version(version: str, version_fn: str):
if version and version_fn:
with open(version_fn, 'r', encoding='UTF-8') as f:
lines = f.read().split('\n')
for i, l in enumerate(lines):
if '__version__' in l:
lines[i] = f'__version__ = "{version}"'
logging.info(f'save {lines[i]} into {version_fn}')
with open(version_fn, 'w') as f:
f.write('\n'.join(lines))
def _save_block(source: str, save_mark: str):
if not save_mark: return ''
lines = source.splitlines()
block = []
for i, l in enumerate(lines):
m = re.search(f'# *{save_mark}', l)
if m:
l = l[:m.span()[0]].rstrip()
if l: block.append(l)
for j in range(i + 1, len(lines)):
l = lines[j]
if not l.startswith(' ') and len(l):
block.append(lines[j])
else:
for k in range(j, len(lines)):
if lines[k].startswith(' ') or not len(lines[k]):
block.append(lines[k])
else:
break
break
return format_code('\n'.join(block))
def _save_code(input_fn, save_mark='@save', tab=None,
default_tab=None):
"""get the code blocks (import, class, def) that will be saved"""
with open(input_fn, 'r', encoding='UTF-8') as f:
nb = notebook.read_markdown(f.read())
if tab:
nb = notebook.get_tab_notebook(nb, tab, default_tab)
if not nb:
return []
saved = []
for i, cell in enumerate(nb.cells):
if cell.cell_type == 'code':
block = _save_block(cell.source, save_mark)
if block:
label = _find_latest_label(nb.cells[:i-1])
saved.append([block, label, input_fn])
return saved
def _find_latest_label(cells):
for cell in reversed(cells):
if cell.cell_type == 'markdown':
matches = re.findall(common.md_mark_pattern, cell.source)
for m in reversed(matches):
if m[0] == 'label' and 'sec_' in m[1]:
return m[1]
return ''
def _refactor_blocks(saved_blocks):
# add label into docstring
for i, (block, label, _) in enumerate(saved_blocks):
if not label: continue
modules = common.split_list(block.split('\n'), lambda l: l.startswith('def') or l.startswith('class'))
new_block = []
if modules[0]: new_block.append('\n'.join(modules[0]))
for m in modules[1:]:
parts = common.split_list(m, lambda l: '):' in l)
# find the docstring
if len(parts) > 1:
docstr = parts[1][1] if len(parts[1]) > 1 else common.head_spaces(m[0]) + ' '
loc = f'Defined in :numref:{label}"""'
if docstr.lstrip().startswith('"""') and docstr.endswith('"""'):
parts[1][1] = docstr[:-3] + f'\n\n{common.head_spaces(docstr)}{loc}'
else:
parts[1].insert(1, f'{common.head_spaces(docstr)}"""{loc}')
new_block.append('\n'.join(common.flatten(parts)))
saved_blocks[i][0] = '\n'.join(new_block)
# merge @d2l.save_to_class
new_blocks = []
class_blocks = {}
for i, (block, _, _) in enumerate(saved_blocks):
lines = block.split('\n')
if lines[0].startswith('class'):
new_blocks.append(block)
m = re.search('class +([\w\_]+)', lines[0])
if m:
class_blocks[m.groups()[0]] = len(new_blocks) - 1
continue
register = '@d2l.add_to_class'
if register in block:
parts = common.split_list(lines, lambda x: x.startswith(register))
if parts[0]:
new_blocks.append(parts[0])
if len(parts) > 1:
for p in parts[1:]:
m = re.search('\@d2l\.add_to_class\(([\.\w\_]+)\)', p[0])
if m:
cls = m.groups()[0].split('.')[-1]
new_blocks[class_blocks[cls]] += '\n\n' + '\n'.join([' '+l for l in p[1:]])
continue
new_blocks.append(block)
return '\n\n'.join(new_blocks)
def _parse_mapping_config(config: str, split_line=True):
"""Parse config such as: numpy -> asnumpy, reshape, ...
Return a list of string pairs
"""
terms = []
for line in config.splitlines():
if split_line:
terms.extend(line.split(','))
else:
terms.append(line)
mapping = []
for term in terms:
term = term.strip()
if not term:
continue
if len(term.split('->')) == 2:
a, b = term.split('->')
mapping.append((a.strip(), b.strip()))
else:
mapping.append((term, term))
return mapping
def node_to_source(node):
if isinstance(node, ast.Constant):
return str(node.value)
return astor.to_source(node).rstrip()
def save_alias(tab_lib):
"""Save alias into the library file"""
alias = ''
if 'alias' in tab_lib:
alias += tab_lib['alias'].strip() + '\n'
if 'lib_name' in tab_lib:
lib_name = tab_lib["lib_name"]
if 'simple_alias' in tab_lib:
mapping = _parse_mapping_config(tab_lib['simple_alias'])
for a, b in mapping:
if a.endswith('('): a = a[:-1]
if b.endswith('('): b = b[:-1]
alias += f'\n{a} = {lib_name}.{b}'
if 'fluent_alias' in tab_lib:
mapping = _parse_mapping_config(tab_lib['fluent_alias'])
alias += '\n' + '\n'.join([
f'{a} = lambda x, *args, **kwargs: x.{b}(*args, **kwargs)'
for a, b in mapping])
if 'args_alias' in tab_lib:
mapping = _parse_mapping_config(tab_lib['args_alias'], split_line=False)
for a, b in mapping:
alias += f'\ndef {a}:\n return {b}'
if alias:
lib_file = tab_lib['lib_file']
with open(lib_file, 'a') as f:
logging.info(
f'Wrote {len(alias.splitlines())} alias into {lib_file}')
f.write('# Alias defined in config.ini\n')
f.write(alias + '\n\n')
def replace_call(source: str, mapping, replace_fn):
matched = False
for a in mapping:
if 'd2l.'+a in source:
matched = True
if not matched:
return source
lines = source.splitlines()
if lines[0].startswith('%'):
source = '\n'.join(lines[1:])
for _ in range(100): # 100 is a (random) big enough number
replaced = False
tree = ast.parse(source)
for node in ast.walk(tree):
if (isinstance(node, ast.Call) and
isinstance(node.func, ast.Attribute) and
isinstance(node.func.value, ast.Name) and
node.func.value.id == 'd2l' and
node.func.attr in mapping):
new_node = replace_fn(node, mapping[node.func.attr])
if new_node:
source = source.replace(
ast.get_source_segment(source, node),
new_node if isinstance(new_node, str) else node_to_source(new_node))
replaced = True
break
if not replaced:
break
if lines[0].startswith('%'):
source = lines[0] + '\n' + source
return source
def replace_fluent_alias(source, fluent_mapping):
def _replace(node, b):
return ast.Call(
ast.Attribute(value=node.args[0], attr=b),
node.args[1:], node.keywords)
return replace_call(source, fluent_mapping, _replace)
def replace_args_alias(source, args_mapping):
def _replace(node, b):
a_args, b = b
a_kwargs = {a: b for a, b in a_args if not a.startswith('a_')}
a_args = [a for a, _ in a_args if a.startswith('a_')]
if len(node.args) != len(a_args):
return None
key_value = {a : node_to_source(arg) for arg, a in zip(node.args, a_args)}
for kw in node.keywords:
assert kw.arg in a_kwargs, (kw.arg, a_kwargs)
key_value['='+kw.arg] = '='+node_to_source(kw.value)
# remove not appeared keywords
b_call = ast.parse(b).body[0].value
if isinstance(b_call, ast.Call):
new_keywords = [kw for kw in b_call.keywords if '='+kw.value.id in key_value]
b_call.keywords = new_keywords
b = node_to_source(b_call)
for k, v in key_value.items():
b = b.replace(k, v)
return b
return replace_call(source, dict(args_mapping), _replace)
def call_args(call_str):
call = ast.parse(call_str).body[0].value
assert isinstance(call, ast.Call), call_str
name = call.func.id
args = [(a.id,None) for a in call.args] + [(k.arg, k.value) for k in call.keywords]
return name, args
def replace_alias(nb, tab_lib):
nb = copy.deepcopy(nb)
patterns = []
fluent_mapping = {}
args_mapping = {}
if 'reverse_alias' in tab_lib:
patterns += _parse_mapping_config(tab_lib['reverse_alias'], split_line=False)
if 'lib_name' in tab_lib:
lib_name = tab_lib["lib_name"]
if 'simple_alias' in tab_lib:
mapping = _parse_mapping_config(tab_lib['simple_alias'])
patterns += [(f'd2l.{a}', f'{lib_name}.{b}') for a, b in mapping]
if 'fluent_alias' in tab_lib:
fluent_mapping = dict(_parse_mapping_config(tab_lib['fluent_alias']))
if 'args_alias' in tab_lib:
for a, b in _parse_mapping_config(tab_lib['args_alias'], split_line=False):
name, args = call_args(a)
args_mapping[name] = (args, b)
for cell in nb.cells:
if cell.cell_type == 'code':
for p, r in patterns:
cell.source = cell.source.replace(p, r)
if fluent_mapping:
cell.source = replace_fluent_alias(cell.source, fluent_mapping)
if args_mapping:
cell.source = replace_args_alias(cell.source, args_mapping)
return nb
def format_code(source: str):
if 'import ' in source:
config = isort.settings.Config(no_lines_before=[
isort.settings.FUTURE, isort.settings.STDLIB, isort.settings.
THIRDPARTY, isort.settings.FIRSTPARTY, isort.settings.LOCALFOLDER])
source = isort.code(source, config=config)
# remove tailing spaces
source = '\n'.join([l.rstrip() for l in source.split('\n')]).strip()
# Disable yapf, as it doesn't work well for long sentences
return source
# fix the bug that yapf cannot handle jupyter magic
for l in source.splitlines():
if l.startswith('%') or l.startswith('!'):
return source
# fix the bug that yapf remove the tailling ;
has_tailling_semicolon = source.rstrip().endswith(';')
style = {
'DISABLE_ENDING_COMMA_HEURISTIC': True,
'SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET': False,
'SPLIT_BEFORE_CLOSING_BRACKET': False,
'SPLIT_BEFORE_DICT_SET_GENERATOR': False,
'SPLIT_BEFORE_LOGICAL_OPERATOR': False,
'SPLIT_BEFORE_NAMED_ASSIGNS': False,
'COLUMN_LIMIT': 78,
'BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION': 1,}
source = FormatCode(source, style_config=style)[0].strip()
if has_tailling_semicolon: source += ';'
return source
def format_code_nb(nb):
for cell in nb.cells:
if cell.cell_type == 'code':
cell.source = format_code(cell.source)
return nb
# DEPRECATED
# def save_file(root_dir: str, nbfile: str):
# nbfile = pathlib.Path(nbfile)
# pyfile = root_dir / nbfile.with_suffix('.py')
# with nbfile.open('r') as f:
# nb = notebook.read_markdown(f.read())
# saved = []
# save_all = False
# for cell in nb.cells:
# if cell.cell_type == 'code':
# src = cell.source.lstrip()
# if re.search('# *@save_all', src):
# save_all = True
# if save_all or re.search('# *@save_cell', src):
# saved.append(src)
# else:
# blk = _save_block(src, '@save')
# if blk:
# saved.append(blk)
# if saved:
# with pyfile.open('w') as f:
# f.write(
# f'# This file is generated from {str(nbfile)} automatically through:\n'
# )
# f.write('# d2lbook build lib\n')
# f.write('# Don\'t edit it directly\n\n')
# for blk in saved:
# f.write(blk + '\n\n')
# logging.info(f'Found {len(saved)} blocks in {str(nbfile)}')
# DEPRECATED
# def save_mark(notebooks: List[str], lib_fname: str, save_mark: str):
# logging.info('Matching with the pattern: "%s"', save_mark)
# with open(lib_fname, 'w') as f:
# _write_header(f)
# lib_name = os.path.dirname(lib_fname)
# lib_name = lib_name.split('/')[-1]
# f.write('import sys\n' + lib_name + ' = sys.modules[__name__]\n\n')
# for nb in notebooks:
# _save_code(nb, f, save_mark=save_mark)
# logging.info('Saved into %s', lib_fname)
|
py
|
1a5cb7ad5c20be97e1f229a7fb39f065d4fdb4e1
|
import os
import argparse
import datetime
import tensorflow as tf
import yolo.config as cfg
from yolo.yolo_net import YOLONet
from utils.timer import Timer
from utils.pascal_voc import pascal_voc
slim = tf.contrib.slim
class Solver(object):
def __init__(self, net, data):
self.net = net
self.data = data
self.weights_file = cfg.WEIGHTS_FILE
self.max_iter = cfg.MAX_ITER
self.initial_learning_rate = cfg.LEARNING_RATE
self.decay_steps = cfg.DECAY_STEPS
self.decay_rate = cfg.DECAY_RATE
self.staircase = cfg.STAIRCASE
self.summary_iter = cfg.SUMMARY_ITER
self.save_iter = cfg.SAVE_ITER
self.output_dir = os.path.join(
cfg.OUTPUT_DIR, datetime.datetime.now().strftime('%Y_%m_%d_%H_%M'))
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
self.save_cfg()
self.variable_to_restore = tf.global_variables()
self.saver = tf.train.Saver(self.variable_to_restore, max_to_keep=None)
self.ckpt_file = os.path.join(self.output_dir, 'yolo')
self.summary_op = tf.summary.merge_all()
self.writer = tf.summary.FileWriter(self.output_dir, flush_secs=60)
self.global_step = tf.train.create_global_step()
self.learning_rate = tf.train.exponential_decay(
self.initial_learning_rate, self.global_step, self.decay_steps,
self.decay_rate, self.staircase, name='learning_rate')
self.optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self.learning_rate)
self.train_op = slim.learning.create_train_op(
self.net.total_loss, self.optimizer, global_step=self.global_step)
gpu_options = tf.GPUOptions()
config = tf.ConfigProto(gpu_options=gpu_options)
self.sess = tf.Session(config=config)
self.sess.run(tf.global_variables_initializer())
if self.weights_file is not None:
print('Restoring weights from: ' + self.weights_file)
self.saver.restore(self.sess, self.weights_file)
self.writer.add_graph(self.sess.graph)
def train(self):
train_timer = Timer()
load_timer = Timer()
for step in range(1, self.max_iter + 1):
load_timer.tic()
images, labels = self.data.get()
load_timer.toc()
feed_dict = {self.net.images: images,
self.net.labels: labels}
if step % self.summary_iter == 0:
if step % (self.summary_iter * 10) == 0:
train_timer.tic()
summary_str, loss, _ = self.sess.run(
[self.summary_op, self.net.total_loss, self.train_op],
feed_dict=feed_dict)
train_timer.toc()
log_str = '''{} Epoch: {}, Step: {}, Learning rate: {},\
Loss: {:5.3f}\nSpeed: {:.3f}s/iter,\
Load: {:.3f}s/iter, Remain: {}'''.format(
datetime.datetime.now().strftime('%m-%d %H:%M:%S'),
self.data.epoch,
int(step),
round(self.learning_rate.eval(session=self.sess), 6),
loss,
train_timer.average_time,
load_timer.average_time,
train_timer.remain(step, self.max_iter))
print(log_str)
else:
train_timer.tic()
summary_str, _ = self.sess.run(
[self.summary_op, self.train_op],
feed_dict=feed_dict)
train_timer.toc()
self.writer.add_summary(summary_str, step)
else:
train_timer.tic()
self.sess.run(self.train_op, feed_dict=feed_dict)
train_timer.toc()
if step % self.save_iter == 0:
print('{} Saving checkpoint file to: {}'.format(
datetime.datetime.now().strftime('%m-%d %H:%M:%S'),
self.output_dir))
self.saver.save(
self.sess, self.ckpt_file, global_step=self.global_step)
def save_cfg(self):
with open(os.path.join(self.output_dir, 'config.txt'), 'w') as f:
cfg_dict = cfg.__dict__
for key in sorted(cfg_dict.keys()):
if key[0].isupper():
cfg_str = '{}: {}\n'.format(key, cfg_dict[key])
f.write(cfg_str)
def update_config_paths(data_dir, weights_file):
cfg.DATA_PATH = data_dir
cfg.PASCAL_PATH = os.path.join(data_dir, 'pascal_voc')
cfg.CACHE_PATH = os.path.join(cfg.PASCAL_PATH, 'cache')
cfg.OUTPUT_DIR = os.path.join(cfg.PASCAL_PATH, 'output')
cfg.WEIGHTS_DIR = os.path.join(cfg.PASCAL_PATH, 'weights')
cfg.WEIGHTS_FILE = os.path.join(cfg.WEIGHTS_DIR, weights_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', default="YOLO_small.ckpt", type=str)
parser.add_argument('--data_dir', default="data", type=str)
parser.add_argument('--threshold', default=0.2, type=float)
parser.add_argument('--iou_threshold', default=0.5, type=float)
parser.add_argument('--gpu', default='', type=str)
args = parser.parse_args()
if args.gpu is not None:
cfg.GPU = args.gpu
if args.data_dir != cfg.DATA_PATH:
update_config_paths(args.data_dir, args.weights)
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPU
yolo = YOLONet()
pascal = pascal_voc('train')
solver = Solver(yolo, pascal)
print('Start training ...')
solver.train()
print('Done training.')
if __name__ == '__main__':
# python train.py --weights YOLO_small.ckpt --gpu 0
main()
|
py
|
1a5cb7d1ab610eedc0719e237dc47441d1b671c5
|
"""
login page
"""
import logging
from zoom.mvc import DynamicView, Controller, View
from zoom.page import page
from zoom.users import Users
from zoom.tools import redirect_to, load_content
from zoom.components import error
import zoom.html as html
class LoginForm(DynamicView):
@property
def registration_link(self):
if self.user.is_member('a_register'):
return html.a('New User?', href='/register')
return ''
@property
def forgot_password(self):
if 'forgot' in self.user.apps:
return load_content('views/forgot_password.html')
return ''
class LoginView(View):
def index(self, *a, **k):
username = k.get('username', '')
user = self.model.user
referrer_url = k.get('referrer')
if referrer_url:
referrer = html.hidden(
id="referrer",
name="referrer",
value=referrer_url,
)
else:
referrer = ''
form = LoginForm(username=username, user=user, referrer=referrer)
return page(form)
class LoginController(Controller):
def login_button(self, **data):
logger = logging.getLogger(__name__)
logger.debug('login_button called')
site = self.model.site
username = data.get('username')
password = data.get('password')
remember_me = bool(data.get('remember_me'))
if username and password:
users = Users(site.db)
user = users.first(username=username, status='A')
if user:
if user.login(self.model, password, remember_me):
logger.info('user {!r} sucesfully logged in'.format(username))
return redirect_to(user.default_app)
logger.debug('failed login attempt for user {!r}'.format(username))
error('incorrect username or password')
elif username:
error('password missing')
else:
error('username missing')
def main(route, request):
return (
LoginController(request)(*route, **request.data) or
LoginView(request)(*route, **request.data)
)
|
py
|
1a5cb7d35f51de2c34f5dfdeed9b5d1b5ea216d0
|
from RLTest import Env
from redisgraph import Graph
from pathos.pools import ProcessPool as Pool
# 1.test getting and setting config
# 2. test overflowing the server when there's a limit
# expect to get error!
# 3. test overflowing the server when there's no limit
# expect not to get any exceptions
GRAPH_NAME = "max_pending_queries"
SLOW_QUERY = "UNWIND range (0, 1000000) AS x WITH x WHERE (x / 2) = 50 RETURN x"
def issue_query(conn, q):
try:
conn.execute_command("GRAPH.QUERY", GRAPH_NAME, q)
return False
except Exception as e:
assert "Max pending queries exceeded" in str(e)
return True
class testPendingQueryLimit():
def __init__(self):
# skip test if we're running under Valgrind
if Env().envRunner.debugger is not None:
Env().skip() # valgrind is not working correctly with multi process
self.env = Env(decodeResponses=True)
self.conn = self.env.getConnection()
def test_01_query_limit_config(self):
# read max queued queries config
result = self.conn.execute_command("GRAPH.CONFIG", "GET", "MAX_QUEUED_QUERIES")
max_queued_queries = result[1]
self.env.assertEquals(max_queued_queries, 4294967295)
# update configuration, set max queued queries
self.conn.execute_command("GRAPH.CONFIG", "SET", "MAX_QUEUED_QUERIES", 10)
# re-read configuration
result = self.conn.execute_command("GRAPH.CONFIG", "GET", "MAX_QUEUED_QUERIES")
max_queued_queries = result[1]
self.env.assertEquals(max_queued_queries, 10)
def stress_server(self):
threadpool_size = self.conn.execute_command("GRAPH.CONFIG", "GET", "THREAD_COUNT")[1]
thread_count = threadpool_size * 5
qs = [SLOW_QUERY] * thread_count
connections = []
pool = Pool(nodes=thread_count)
# init connections
for i in range(thread_count):
connections.append(self.env.getConnection())
# invoke queries
result = pool.map(issue_query, connections, qs)
# return if error encountered
return any(result)
def test_02_overflow_no_limit(self):
# no limit on number of pending queries
limit = 4294967295
self.conn.execute_command("GRAPH.CONFIG", "SET", "MAX_QUEUED_QUERIES", limit)
error_encountered = self.stress_server()
self.env.assertFalse(error_encountered)
def test_03_overflow_with_limit(self):
# limit number of pending queries
limit = 1
self.conn.execute_command("GRAPH.CONFIG", "SET", "MAX_QUEUED_QUERIES", limit)
error_encountered = self.stress_server()
self.env.assertTrue(error_encountered)
|
py
|
1a5cb82c10b3db9c9f1fc7fcde9f7ae5ec877f38
|
import numpy as np
import random
#### Version that maintains IDs
def new_cluster_points(X, mu):
clusters = {}
# this is for excluding IDs from the calculation
tmp_mu = []
for point in mu:
tmp_mu.append(point[1:13])
for x in X:
tmp_x = x[1:13]
# norm calculates the distance of a vector
# In this formula, it cacluates the distance between the sample vectors and all the other vectors, and select the min value as the best mean
bestmukey = min([(i[0], np.linalg.norm(tmp_x-tmp_mu[i[0]])) for i in enumerate(tmp_mu)], key=lambda t:t[1])[0]
try:
clusters[bestmukey].append(x)
except KeyError:
clusters[bestmukey] = [x]
return clusters
def new_reevaluate_centers(mu, clusters):
newmu = []
keys = sorted(clusters.keys())
for k in keys:
tmp_mu = []
for point in clusters[k]:
tmp_mu.append(point[1:13])
newmean = np.mean(tmp_mu, axis = 0)
newmean = np.insert(newmean, 0, 0)
newmu.append(newmean)
return newmu
def new_has_converged(mu, oldmu):
tmp_mu = []
tmp_oldmu = []
for point in mu:
tmp_mu.append(point[1:13])
for point in oldmu:
tmp_oldmu.append(point[1:13])
return (set([tuple(a) for a in tmp_mu]) == set([tuple(a) for a in tmp_oldmu]))
def new_find_centers(X, K):
oldmu = random.sample(X, K)
mu = random.sample(X, K)
while not new_has_converged(mu, oldmu):
oldmu = mu
# Assign all points in X to clusters
clusters = new_cluster_points(X, mu)
# Reevaluate centers
mu = new_reevaluate_centers(oldmu, clusters)
try:
clusters
except:
clusters = new_cluster_points(X, mu) # added to avoid null cluster
return(mu, clusters)
### Original clustering functions without maintaining IDs (allowing multiple dimensions)
def cluster_points(X, mu):
clusters = {}
for x in X:
bestmukey = min([(i[0], np.linalg.norm(x-mu[i[0]])) for i in enumerate(mu)], key=lambda t:t[1])[0]
try:
clusters[bestmukey].append(x)
except KeyError:
clusters[bestmukey] = [x]
return clusters
def reevaluate_centers(mu, clusters):
newmu = []
keys = sorted(clusters.keys())
for k in keys:
newmu.append(np.mean(clusters[k], axis = 0))
return newmu
def has_converged(mu, oldmu):
return (set([tuple(a) for a in mu]) == set([tuple(a) for a in oldmu]))
def find_centers(X, K):
# Initialize to K random centers
oldmu = random.sample(X, K)
mu = random.sample(X, K)
while not has_converged(mu, oldmu):
oldmu = mu
# Assign all points in X to clusters
clusters = cluster_points(X, mu)
# Reevaluate centers
mu = reevaluate_centers(oldmu, clusters)
try:
clusters
except:
clusters = cluster_points(X, mu) # added to avoid null cluster
return(mu, clusters)
def Wk(mu, clusters):
K = len(mu)
try:
r = sum([np.linalg.norm(mu[i]-c)**2/(2*len(c)) for i in range(K) for c in clusters[i]])
except:
r = 1
print("index error")
return r
def bounding_box(X):
size = len(X[0])
xmins = [0 for x in range(size)]
xmaxs = [0 for x in range(size)]
for i in range(0, size):
xmins[i], xmaxs[i] = min(X,key=lambda a:a[i])[i], max(X,key=lambda a:a[i])[i]
return (xmins,xmaxs)
def gap_statistic(X, num_k):
(xmins,xmaxs) = bounding_box(X)
# Dispersion for real distribution
ks = range(1,num_k)
Wks = np.zeros(len(ks))
Wkbs = np.zeros(len(ks))
sk = np.zeros(len(ks))
for indk, k in enumerate(ks):
print("K:" + str(k))
mu, clusters = find_centers(X,k)
Wks[indk] = np.log(Wk(mu, clusters))
# Create B reference datasets
B = 10
BWkbs = np.zeros(B)
for i in range(B):
# print("B: " + str(i))
Xb = []
for n in range(len(X)):
randomvalues = []
for index in range(len(xmins)):
randomvalues.insert(0, random.uniform(xmins[index], xmaxs[index]))
Xb.append(randomvalues)
Xb = np.array(Xb)
mu, clusters = find_centers(Xb,k)
BWkbs[i] = np.log(Wk(mu, clusters))
Wkbs[indk] = sum(BWkbs)/B
sk[indk] = np.sqrt(sum((BWkbs-Wkbs[indk])**2)/B)
sk = sk*np.sqrt(1+1/B)
return(ks, Wks, Wkbs, sk)
#example
input_list = np.array([[1, 2], [4, 5], [4, 3], [4, 5], [3, 3], [1, 3], [7, 8]])
num_k=3
# to start the gap analysis to determin K
ks, logWks, logWkbs, sk = gap_statistic(input_list, num_k)
print (ks, logWks, logWkbs, sk)
|
py
|
1a5cb9527909b33c23aaa69c893e3e4ac924cc3d
|
from __future__ import absolute_import, division
import random
import mock
_test_addresses = {
'1111111111111111111111111111111111': {
'value': 0,
},
'1111111112': {
'value': 1,
},
'11111111121': {
'value': 58,
},
'1111111111111111111114oLvT2': {
'value': 2493516049,
},
}
def test_base58_decoding():
from coldwallet.encoding import base58_decode
for address, info in _test_addresses.items():
assert info['value'] == base58_decode(address)
def test_base58_encoding():
from coldwallet.encoding import base58_encode
for address, info in _test_addresses.items():
assert address.lstrip('1') == base58_encode(info['value'])
def test_block7_encoding():
from coldwallet.encoding import block7_encode
assert block7_encode(0) == '111111N'
maxvalue = 2 ** 36 - 1 # 68719476735
assert block7_encode(maxvalue) == 'zmM9z3t'
def test_block7_decoding():
from coldwallet.encoding import block7_decode
assert block7_decode('1111111') == { 'value': 0, 'valid': False }
assert block7_decode('111111M') == { 'value': 0, 'valid': False }
assert block7_decode('111111N') == { 'value': 0, 'valid': True }
assert block7_decode('111111P') == { 'value': 0, 'valid': False }
assert block7_decode('111111o') == { 'value': 1, 'valid': False }
# Checksum should detect most - 31 out of 32 - typos:
assert block7_decode('X11111N') == { 'value': mock.ANY, 'valid': False }
assert block7_decode('1X1111N') == { 'value': mock.ANY, 'valid': False }
# assert block7_decode('11X111N') == { 'value': mock.ANY, 'valid': True } # undetected typo
assert block7_decode('111X11N') == { 'value': mock.ANY, 'valid': False }
assert block7_decode('1111X1N') == { 'value': mock.ANY, 'valid': False }
assert block7_decode('11111XN') == { 'value': mock.ANY, 'valid': False }
assert block7_decode('111111X') == { 'value': mock.ANY, 'valid': False }
assert block7_decode('zmM9z3s') == { 'value': 2**36-1, 'valid': False }
assert block7_decode('zmM9z3t') == { 'value': 2**36-1, 'valid': True }
assert block7_decode('zmM9z3u') == { 'value': 2**36-1, 'valid': False }
assert block7_decode('XmM9z3t') == { 'value': mock.ANY, 'valid': False }
# assert block7_decode('zXM9z3t') == { 'value': mock.ANY, 'valid': True } # undetected typo
assert block7_decode('zmX9z3t') == { 'value': mock.ANY, 'valid': False }
assert block7_decode('zmMXz3t') == { 'value': mock.ANY, 'valid': False }
# assert block7_decode('zmM9X3t') == { 'value': mock.ANY, 'valid': True } # undetected typo
assert block7_decode('zmM9zXt') == { 'value': mock.ANY, 'valid': False }
assert block7_decode('zmM9z3X') == { 'value': mock.ANY, 'valid': False }
def test_block7_encoding_roundtrip():
from coldwallet.encoding import block7_decode, block7_encode
for test in range(100):
number = random.randrange(2 ** 36)
retval = block7_decode(block7_encode(number))
assert retval['value'] == number, 'value %d was altered to %d in block7 encoding!' % (number, retval['value'])
assert retval['valid'], 'value %d could not be decoded successfully' % number
def test_splitting_data_into_block7s():
from coldwallet.encoding import block7_split, block7_decode
doubledata = b"\x14\x23\x99\xc1\xff\x1d\x31\x0a\x8b" # 9 bytes = 72 bit = 2x36 bit
block7s = block7_split(doubledata)
assert len(block7s) == 2
assert block7_decode(block7s[0]) == { 'value': 0x142399c1f, 'valid': True }
assert block7_decode(block7s[1]) == { 'value': 0xf1d310a8b, 'valid': True }
def test_merging_data_into_block7s():
from coldwallet.encoding import block7_merge
block7s = ['5YZpdK6', 'wZq33nn']
assert block7_merge(block7s) == { 'key': b"\x14\x23\x99\xc1\xff\x1d\x31\x0a\x8b",
'valid': True }
def test_crc8_returns_correct_values():
from coldwallet.encoding import crc8
assert crc8('1234567') == '9f'
assert crc8('1234568') == '0e'
assert crc8('1azZza1') == '95'
|
py
|
1a5cba0d5157456981322de4396882559485a35a
|
from os import environ
import constructs
from aws_cdk import aws_iam
from aws_cdk.core import Environment, Stack
from backend.environment import environment_name
from .constructs.api import API
from .constructs.lambda_layers import LambdaLayers
from .constructs.lds import LDS
from .constructs.notify import Notify
from .constructs.opentopo import OpenTopography
from .constructs.processing import Processing
from .constructs.staging import Staging
from .constructs.storage import Storage
class Application(Stack):
def __init__(self, scope: constructs.Construct, stack_id: str) -> None:
environment = Environment(
account=environ["CDK_DEFAULT_ACCOUNT"], region=environ["CDK_DEFAULT_REGION"]
)
super().__init__(scope, stack_id, env=environment)
env_name = environment_name()
principal: aws_iam.PrincipalBase
if saml_provider_arn := environ.get("GEOSTORE_SAML_IDENTITY_PROVIDER_ARN"):
principal = aws_iam.FederatedPrincipal(
federated=saml_provider_arn,
assume_role_action="sts:AssumeRoleWithSAML",
conditions={"StringEquals": {"SAML:aud": "https://signin.aws.amazon.com/saml"}},
)
else:
principal = aws_iam.AccountPrincipal(
account_id=aws_iam.AccountRootPrincipal().account_id
)
storage = Storage(self, "storage", env_name=env_name)
lambda_layers = LambdaLayers(self, "lambda-layers", env_name=env_name)
processing = Processing(
self,
"processing",
botocore_lambda_layer=lambda_layers.botocore,
env_name=env_name,
principal=principal,
storage_bucket=storage.storage_bucket,
validation_results_table=storage.validation_results_table,
)
Staging(self, "staging", users_role=processing.staging_users_role)
API(
self,
"api",
botocore_lambda_layer=lambda_layers.botocore,
datasets_table=storage.datasets_table,
env_name=env_name,
principal=principal,
state_machine=processing.state_machine,
state_machine_parameter=processing.state_machine_parameter,
sqs_queue=processing.message_queue,
sqs_queue_parameter=processing.message_queue_name_parameter,
storage_bucket=storage.storage_bucket,
validation_results_table=storage.validation_results_table,
)
Notify(
self,
"notify",
botocore_lambda_layer=lambda_layers.botocore,
env_name=env_name,
state_machine=processing.state_machine,
validation_results_table=storage.validation_results_table,
)
if self.node.try_get_context("enableLDSAccess"):
LDS(self, "lds", env_name=env_name, storage_bucket=storage.storage_bucket)
if self.node.try_get_context("enableOpenTopographyAccess"):
OpenTopography(
self, "opentopography", env_name=env_name, storage_bucket=storage.storage_bucket
)
|
py
|
1a5cba9bd4c7ed3c0ba1e2e98f3cdf29d3197770
|
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2011, 2012, 2013, 2014, 2015 E-Democracy.org and
# Contributors.
#
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
import codecs
import os
from setuptools import setup, find_packages
from version import get_version
name = 'gs.auth.oauth.client'
version = get_version()
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
with codecs.open(os.path.join("docs", "HISTORY.rst"),
encoding='utf-8') as f:
long_description += '\n' + f.read()
setup(
name=name,
version=version,
description="outh2 client registration methods",
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
"Environment :: Web Environment",
"Framework :: Zope2",
"Intended Audience :: Developers",
'License :: OSI Approved :: Zope Public License',
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='outh2, client, registration',
author='Richard Waid',
author_email='[email protected]',
maintainer='Michael JasonSmith',
maintainer_email='[email protected]',
url='https://github.com/groupserver/{0}'.format(name),
license='ZPL 2.1',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['.'.join(name.split('.')[:i])
for i in range(1, len(name.split('.')))],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'simplejson',
],
test_suite="{0}.tests.test_all".format(name),
entry_points="""
# -*- Entry points: -*-
""",
)
|
py
|
1a5cbb0807e99ae458c68ff073d4dfb9f65648b7
|
#%%
# read full assignment
# think algo before implementing
# dont use a dict when you need a list
# assignment is still = and not ==
# dont use itertools when you can use np.roll
# check mathemathical functions if the parentheses are ok
# networkx is awesome
# sometimes while true is better than just too small for loop
# networkx addes nodes when adding edge to nonexistent node
# bitwise comparison is a nice trick
# fiddling with regex can take a lot of time
# %%
import os
import re
import numpy as np
try:
os.chdir(os.path.join(os.getcwd(), 'day 17'))
print(os.getcwd())
except:
pass
# %%
step = 369
buffer = [0]
pos = 0
for p in range(1,2018):
if p%1000000==0: print(p)
pos = (pos+step)%len(buffer)+1
buffer.insert(pos,p)
buffer[buffer.index(2017)+1]
# %%
# part 2
step = 369
buffer = 1
pos = 0
res = []
for p in range(1,50000000):
if p%1000000==0: print(p)
pos = (pos+step)%buffer+1
if pos == 1 :
print(p)
res.append(p)
buffer+=1
res
# %%
# found this one from the megathread on reddit
from collections import deque
step = 369
spinlock = deque([0])
for i in range(1, 2018):
spinlock.rotate(-step)
spinlock.append(i)
print(spinlock[0])
|
py
|
1a5cbbfaf722d976c83555f12c57e3baad170c33
|
from collections import defaultdict
class Graph():
def __init__(self,vertices):
self.graph = defaultdict(list)
self.V = vertices
def addEdge(self,u,v):
self.graph[u].append(v)
def isCyclicUtil(self,v,visited,recStack):
visited[v] = True
recStack[v] = True
for neighbour in self.graph[v]:
if visited[neighbour] == False:
if self.isCyclicUtil(neighbour,visited,recStack) == True:
return True
elif recStack[neighbour] == True:
return True
#remove from recStack
recStack[v]= False
return False
def isCyclic(self):
visited = [False]*self.V
recStack = [False]*self.V
# loop through all nodes.
for node in range(self.V):
if visited[node]==False:
if self.isCyclicUtil(node,visited,recStack) == True:
return True
# no cycles found
return False
g = Graph(4)
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(2, 3)
g.addEdge(3, 3)
if g.isCyclic() == 1:
print("Graph has a cycle")
else:
print("Graph has no cycle")
|
py
|
1a5cbc1a1e730ab53a69b66adffc19b38975b8ba
|
from django.forms import ModelForm
from api.models import Person
from django.forms.fields import TextInput
from django.forms.widgets import Textarea, EmailInput, DateInput
class PersonForm(ModelForm):
class Meta:
model = Person
exclude = ('id',)
widgets = {
'firstname':TextInput(attrs={'class':'form-control'}),
'lastname':TextInput(attrs={'class':'form-control'}),
'country':TextInput(attrs={'class':'form-control'}),
'email':EmailInput(attrs={'class':'form-control'}),
'phone':TextInput(attrs={'class':'form-control', 'type':'tel'}),
'occupation_field':TextInput(attrs={'class':'form-control'}),
'occupation':TextInput(attrs={'class':'form-control'}),
'birthdate':DateInput(attrs={'class':'form-control', 'type':'date'}),
'description':Textarea(attrs={'class':'form-control', 'rows':'5'}),
}
|
py
|
1a5cbde59b7870753a427fc0add53feb097f3647
|
from dtc.enums.message_types import MessageTypes
from lib.base_message_type import BaseMessageType
class JournalEntryResponse(BaseMessageType):
def __init__(self,
journal_entry=None,
date_time=None,
is_final_response=None):
self.Type = MessageTypes.JOURNAL_ENTRY_RESPONSE
self.JournalEntry = journal_entry
self.DateTime = date_time
self.IsFinalResponse = is_final_response
@staticmethod
def from_message_short(message_obj):
packet = message_obj.get('F')
return JournalEntryResponse(
journal_entry=packet[0],
date_time=packet[1],
is_final_response=packet[2]
)
@staticmethod
def from_message_long(message_obj):
return JournalEntryResponse(
journal_entry=message_obj.get('JournalEntry'),
date_time=message_obj.get('DateTime'),
is_final_response=message_obj.get('IsFinalResponse')
)
@staticmethod
def from_message(message_obj):
if 'F' in message_obj:
return JournalEntryResponse.from_message_short(message_obj)
else:
return JournalEntryResponse.from_message_long(message_obj)
@staticmethod
def get_message_type_name():
return "JournalEntryResponse"
|
py
|
1a5cc002d7334aac81ca4e5612bc1308a3ed8add
|
from setuptools import setup
setup(
name='SanAntonioScientist-CLI',
version='1.0',
packages=['cli', 'cli.commands'],
include_package_data=True,
install_requires=[
'click',
],
entry_points="""
[console_scripts]
sanantonioscientist=cli.cli:cli
""",
)
|
py
|
1a5cc14ba7bb895f2b947acf60a5e13714260a0c
|
import numpy as np
def Mat4(A):
r"""
Matrix representation of a 4th order tensor
with minor symmety using the :math:`(\phi,\phi)` bases
Args:
A (ndarray of shape (3, 3, 3, 3)): 4th-order tensor
"""
assert A.ndim == 4, "Only support 4th order tensor"
M = np.array(
[
[
A[0, 0, 0, 0],
A[0, 0, 1, 1],
A[0, 0, 2, 2],
2 * A[0, 0, 0, 1],
2 * A[0, 0, 1, 2],
2 * A[0, 0, 0, 2],
],
[
A[1, 1, 0, 0],
A[1, 1, 1, 1],
A[1, 1, 2, 2],
2 * A[1, 1, 0, 1],
2 * A[1, 1, 1, 2],
2 * A[1, 1, 0, 2],
],
[
A[2, 2, 0, 0],
A[2, 2, 1, 1],
A[2, 2, 2, 2],
2 * A[2, 2, 0, 1],
2 * A[2, 2, 1, 2],
2 * A[2, 2, 0, 2],
],
[
A[0, 1, 0, 0],
A[0, 1, 1, 1],
A[0, 1, 2, 2],
2 * A[0, 1, 0, 1],
2 * A[0, 1, 1, 2],
2 * A[0, 1, 0, 2],
],
[
A[1, 2, 0, 0],
A[1, 2, 1, 1],
A[1, 2, 2, 2],
2 * A[1, 2, 0, 1],
2 * A[1, 2, 1, 2],
2 * A[1, 2, 0, 2],
],
[
A[0, 2, 0, 0],
A[0, 2, 1, 1],
A[0, 2, 2, 2],
2 * A[0, 2, 0, 1],
2 * A[0, 2, 1, 2],
2 * A[0, 2, 0, 2],
],
]
)
return M
def Mat2(sig):
r"""
Bijection between a symmetric 2nd order tensor space
and 6-dim vector space using the :math:`\phi` basis
.. math::
\begin{bmatrix}
s_{11} & s_{12} & s_{13} \\
s_{12} & s_{22} & s_{23} \\
s_{13} & s_{23} & s_{33} \\
\end{bmatrix}\iff\begin{bmatrix}
s_{11} \\ s_{22} \\ s_{33} \\ s_{12} \\ s_{23} \\ s_{13}
\end{bmatrix}
Args:
sig (ndarray of dim 1 or 2): 2nd-order tensor
"""
if sig.ndim == 1: # vector to matrix
return np.array(
[
[sig[0], sig[3], sig[5]],
[sig[3], sig[1], sig[4]],
[sig[5], sig[4], sig[2]],
]
)
elif sig.ndim == 2: # matrix to vector
return np.array(
[sig[0, 0], sig[1, 1], sig[2, 2], sig[0, 1], sig[1, 2], sig[0, 2]]
)
else:
raise NotImplementedError("Only support vector or 2th order tensor")
def Mat22(eps):
r"""
Bijection between a symmetric 2nd order tensor space
and 6-dim vector space using the :math:`\phi_2` basis
.. math::
\begin{bmatrix}
e_{11} & e_{12} & e_{13} \\
e_{12} & e_{22} & e_{23} \\
e_{13} & e_{23} & e_{33} \\
\end{bmatrix}\iff\begin{bmatrix}
e_{11} \\ e_{22} \\ e_{33} \\ 2e_{12} \\ 2e_{23} \\ 2e_{13}
\end{bmatrix}
Args:
eps (ndarray of dim 1 or 2): 2nd-order tensor
"""
if eps.ndim == 1: # vector to matrix
return np.array(
[
[eps[0], eps[3] / 2, eps[5] / 2],
[eps[3] / 2, eps[1], eps[4] / 2],
[eps[5] / 2, eps[4] / 2, eps[2]],
]
)
elif eps.ndim == 2: # matrix to vector
return np.array(
[
eps[0, 0],
eps[1, 1],
eps[2, 2],
2 * eps[0, 1],
2 * eps[1, 2],
2 * eps[0, 2],
]
)
else:
raise Exception("Only support vector or 2th order tensor")
def Mat2S(eps):
r"""
Bijection between a symmetric 2nd order tensor space
and 6-dim vector space using the :math:`\phi_\mathrm{S}` basis
.. math::
\begin{bmatrix}
e_{11} & e_{12} & e_{13} \\
e_{12} & e_{22} & e_{23} \\
e_{13} & e_{23} & e_{33} \\
\end{bmatrix}\iff\begin{bmatrix}
e_{11} \\ e_{22} \\ e_{33} \\ \sqrt{2}e_{12} \\ \sqrt{2}e_{23} \\ \sqrt{2}e_{13}
\end{bmatrix}
Args:
eps (ndarray of dim 1 or 2): 2nd-order tensor
"""
sq2 = np.sqrt(2)
if eps.ndim == 1: # vector to matrix
return np.array(
[
[eps[0], eps[3] / sq2, eps[5] / sq2],
[eps[3] / sq2, eps[1], eps[4] / sq2],
[eps[5] / sq2, eps[4] / sq2, eps[2]],
]
)
elif eps.ndim == 2: # matrix to vector
return np.array(
[
eps[0, 0],
eps[1, 1],
eps[2, 2],
sq2 * eps[0, 1],
sq2 * eps[1, 2],
sq2 * eps[0, 2],
]
)
else:
raise Exception("Only support vector or 2th order tensor")
def ij2M(ij):
"""
Convert (i, j) indices of a symmetric
2nd-order tensor to its vector index
"""
if ij == "11":
return 0
elif ij == "22":
return 1
elif ij == "33":
return 2
elif ij == "12" or ij == "21":
return 3
elif ij == "23" or ij == "32":
return 4
elif ij == "13" or ij == "31":
return 5
def ijkl2MN(ijkl):
"""
Convert (i, j, k, l) indices of a symmetric
4nd-order tensor to its matrix index
"""
ij = ijkl[:2]
kl = ijkl[2:]
M = ij2M(ij)
N = ij2M(kl)
return M, N
def MatPG(v):
r"""
Matrix that converts a 2nd-order tensor
from the principal frame (:math:`\phi` basis) to the global frame (:math:`\phi` basis)
Args:
v (ndarray of shape (3, 3)): Principal directions along its columns
"""
return np.array(
[
[
v[0, 0] ** 2,
v[0, 1] ** 2,
v[0, 2] ** 2,
2 * v[0, 0] * v[0, 1],
2 * v[0, 1] * v[0, 2],
2 * v[0, 0] * v[0, 2],
],
[
v[1, 0] ** 2,
v[1, 1] ** 2,
v[1, 2] ** 2,
2 * v[1, 0] * v[1, 1],
2 * v[1, 1] * v[1, 2],
2 * v[1, 0] * v[1, 2],
],
[
v[2, 0] ** 2,
v[2, 1] ** 2,
v[2, 2] ** 2,
2 * v[2, 0] * v[2, 1],
2 * v[2, 1] * v[2, 2],
2 * v[2, 0] * v[2, 2],
],
[
v[0, 0] * v[1, 0],
v[0, 1] * v[1, 1],
v[0, 2] * v[1, 2],
v[0, 0] * v[1, 1] + v[0, 1] * v[1, 0],
v[0, 1] * v[1, 2] + v[0, 2] * v[1, 1],
v[0, 0] * v[1, 2] + v[0, 2] * v[1, 0],
],
[
v[1, 0] * v[2, 0],
v[1, 1] * v[2, 1],
v[1, 2] * v[2, 2],
v[1, 0] * v[2, 1] + v[1, 1] * v[2, 0],
v[1, 1] * v[2, 2] + v[1, 2] * v[2, 1],
v[1, 0] * v[2, 2] + v[1, 2] * v[2, 0],
],
[
v[0, 0] * v[2, 0],
v[0, 1] * v[2, 1],
v[0, 2] * v[2, 2],
v[0, 0] * v[2, 1] + v[0, 1] * v[2, 0],
v[0, 1] * v[2, 2] + v[0, 2] * v[2, 1],
v[0, 0] * v[2, 2] + v[0, 2] * v[2, 0],
],
]
)
def MatGP(v):
r"""
Matrix that converts a 2nd-order tensor
from the global frame (:math:`\phi` basis) to the principal frame (:math:`\phi` basis)
Args:
v (ndarray of shape (3, 3)): Principal directions along its columns
"""
return np.array(
[
[
v[0, 0] ** 2,
v[1, 0] ** 2,
v[2, 0] ** 2,
2 * v[0, 0] * v[1, 0],
2 * v[1, 0] * v[2, 0],
2 * v[0, 0] * v[2, 0],
],
[
v[0, 1] ** 2,
v[1, 1] ** 2,
v[2, 1] ** 2,
2 * v[0, 1] * v[1, 1],
2 * v[1, 1] * v[2, 1],
2 * v[0, 1] * v[2, 1],
],
[
v[0, 2] ** 2,
v[1, 2] ** 2,
v[2, 2] ** 2,
2 * v[0, 2] * v[1, 2],
2 * v[1, 2] * v[2, 2],
2 * v[0, 2] * v[2, 2],
],
[
v[0, 0] * v[0, 1],
v[1, 0] * v[1, 1],
v[2, 0] * v[2, 1],
v[0, 0] * v[1, 1] + v[0, 1] * v[1, 0],
v[1, 0] * v[2, 1] + v[1, 1] * v[2, 0],
v[0, 0] * v[2, 1] + v[0, 1] * v[2, 0],
],
[
v[0, 1] * v[0, 2],
v[1, 1] * v[1, 2],
v[2, 1] * v[2, 2],
v[0, 1] * v[1, 2] + v[0, 2] * v[1, 1],
v[1, 1] * v[2, 2] + v[1, 2] * v[2, 1],
v[0, 1] * v[2, 2] + v[0, 2] * v[2, 1],
],
[
v[0, 0] * v[0, 2],
v[1, 0] * v[1, 2],
v[2, 0] * v[2, 2],
v[0, 0] * v[1, 2] + v[0, 2] * v[1, 0],
v[1, 0] * v[2, 2] + v[1, 2] * v[2, 0],
v[0, 0] * v[2, 2] + v[0, 2] * v[2, 0],
],
]
)
def MatGP2(v):
r"""
Matrix that converts a 2nd-order tensor
from the global frame (:math:`\phi_2` basis) to the principal frame (:math:`\phi` basis)
Args:
v (ndarray of shape (3, 3)): Principal directions along its columns
"""
return np.array(
[
[
v[0, 0] ** 2,
v[1, 0] ** 2,
v[2, 0] ** 2,
v[0, 0] * v[1, 0],
v[1, 0] * v[2, 0],
v[0, 0] * v[2, 0],
],
[
v[0, 1] ** 2,
v[1, 1] ** 2,
v[2, 1] ** 2,
v[0, 1] * v[1, 1],
v[1, 1] * v[2, 1],
v[0, 1] * v[2, 1],
],
[
v[0, 2] ** 2,
v[1, 2] ** 2,
v[2, 2] ** 2,
v[0, 2] * v[1, 2],
v[1, 2] * v[2, 2],
v[0, 2] * v[2, 2],
],
[
v[0, 0] * v[0, 1],
v[1, 0] * v[1, 1],
v[2, 0] * v[2, 1],
(v[0, 0] * v[1, 1] + v[0, 1] * v[1, 0]) / 2,
(v[1, 0] * v[2, 1] + v[1, 1] * v[2, 0]) / 2,
(v[0, 0] * v[2, 1] + v[0, 1] * v[2, 0]) / 2,
],
[
v[0, 1] * v[0, 2],
v[1, 1] * v[1, 2],
v[2, 1] * v[2, 2],
(v[0, 1] * v[1, 2] + v[0, 2] * v[1, 1]) / 2,
(v[1, 1] * v[2, 2] + v[1, 2] * v[2, 1]) / 2,
(v[0, 1] * v[2, 2] + v[0, 2] * v[2, 1]) / 2,
],
[
v[0, 0] * v[0, 2],
v[1, 0] * v[1, 2],
v[2, 0] * v[2, 2],
(v[0, 0] * v[1, 2] + v[0, 2] * v[1, 0]) / 2,
(v[1, 0] * v[2, 2] + v[1, 2] * v[2, 0]) / 2,
(v[0, 0] * v[2, 2] + v[0, 2] * v[2, 0]) / 2,
],
]
)
|
py
|
1a5cc1715a6b28aa0ab88a021624d0bc92f90fe4
|
"""oss_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from django.views.generic import RedirectView
import info.views
import info.apis
import portal.views
urlpatterns = [
#path('admin/', admin.site.urls),
url(r'^oss/update_info/$', info.views.UpdateInfoView.as_view()),
url(r'^oss/update_info/api/', include(info.apis.router.urls)),
url(r'^portal/$', portal.views.PortalView.as_view()),
url(r'^portal/about/$', portal.views.PortalAboutView.as_view()),
url(r'^$', RedirectView.as_view(url='/portal/')),
]
|
py
|
1a5cc2e1a3ea39514e8478659e3cac4a70981282
|
from sys import platform
import sys
import os
import numpy as np
import shutil
import json
from FunctionalPackage import State
import random
from queue import Queue # LILO队列
import re
import copy
if platform == "linux" or platform == "linux2":# this is linux
os.environ['SUMO_HOME'] = '/usr/share/sumo'
try:
import traci
import traci.constants as tc
except ImportError:
if "SUMO_HOME" in os.environ:
print(os.path.join(os.environ["SUMO_HOME"], "tools"))
sys.path.append(
os.path.join(os.environ["SUMO_HOME"], "tools")
)
try:
import traci
import traci.constants as tc
except ImportError:
raise EnvironmentError("Please set SUMO_HOME environment variable or install traci as python module!")
else:
raise EnvironmentError("Please set SUMO_HOME environment variable or install traci as python module!")
elif platform == "win32":
os.environ['SUMO_HOME'] = 'C:\\Program Files (x86)\\DLR\\Sumo'
try:
import traci
import traci.constants as tc
except ImportError:
if "SUMO_HOME" in os.environ:
print(os.path.join(os.environ["SUMO_HOME"], "tools"))
sys.path.append(
os.path.join(os.environ["SUMO_HOME"], "tools")
)
try:
import traci
import traci.constants as tc
except ImportError:
raise EnvironmentError("Please set SUMO_HOME environment variable or install traci as python module!")
else:
raise EnvironmentError("Please set SUMO_HOME environment variable or install traci as python module!")
elif platform =='darwin':
os.environ['SUMO_HOME'] = "/Users/{0}/sumo/sumo-git".format(os.getlogin())
try:
import traci
import traci.constants as tc
except ImportError:
if "SUMO_HOME" in os.environ:
print(os.path.join(os.environ["SUMO_HOME"], "tools"))
sys.path.append(
os.path.join(os.environ["SUMO_HOME"], "tools")
)
try:
import traci
import traci.constants as tc
except ImportError:
raise EnvironmentError("Please set SUMO_HOME environment variable or install traci as python module!")
else:
raise EnvironmentError("Please set SUMO_HOME environment variable or install traci as python module!")
else:
sys.exit("platform error")
CAV_rate = 0.5 # smart rate
carWidth = 3
grid_width = 4
area_length = 1000
travel_time_scale = 300
listLanes = ['edge1-0_1', 'edge1-0_2', 'edge2-0_1', 'edge2-0_2',
'edge3-0_1', 'edge3-0_2', 'edge4-0_1', 'edge4-0_2']
intelli_edges = ['edge1-0', 'edge2-0', 'edge3-0', 'edge4-0']
center_edges = ['-gneE10', '-gneE11', '-gneE12', '-gneE13']
routeID_list = ['routewe', 'routeew', 'routesn', 'routens']
route_lane_id_list = [
[[['-gneE10_1', '-gneE10_2'], ['edge1-0_1', 'edge1-0_2'], ['edge0-2_1', 'edge0-2_2']],
[['-gneE10_0'], ['gneE3_2'], ['gneE4_1'], ['gneE5_2'], ['gneE6_0']]],
[[['-gneE11_1', '-gneE11_2'], ['edge2-0_1', 'edge2-0_2'], ['edge0-1_1', 'edge0-1_2']],
[['-gneE11_0'], ['gneE7_2'], ['-gneE9_1'], ['-gneE2_2'], ['-gneE1_0']]],
[[['-gneE12_1', '-gneE12_2'], ['edge3-0_1', 'edge3-0_2'], ['edge0-4_1', 'edge0-4_2']],
[['-gneE12_0'], ['gneE5_2'], ['gneE6_1'], ['gneE7_2'], ['-gneE9_0']]],
[[['-gneE13_1', '-gneE13_2'], ['edge4-0_1', 'edge4-0_2'], ['edge0-3_1', 'edge0-3_2']],
[['-gneE13_0'], ['-gneE2_2'], ['-gneE1_1'], ['gneE3_2'], ['gneE4_0']]]
]
# route_lane_id_list[四个方向中的一个][两条线路中的一个][线路中的不同edge][每个edge上的lanes]
def get_list(my_list, new_list):
for item in my_list:
if isinstance(item, list):
new_list = get_list(item, new_list)
else:
# print(item)
new_list.append(item)
return new_list
intelli_lanes = [[['edge1-0_1','edge1-0_2'],['edge1-0_3']],
[['edge2-0_1','edge2-0_2'],['edge2-0_3']],
[['edge3-0_1','edge3-0_2'],['edge3-0_3']],
[['edge4-0_1','edge4-0_2'],['edge4-0_3']]]
intelli_loops = [[['e1Detector_edge1-0_1_3','e1Detector_edge1-0_2_4'],['e1Detector_edge1-0_3_5']],
[['e1Detector_edge2-0_1_15','e1Detector_edge2-0_2_16'],['e1Detector_edge2-0_3_17']],
[['e1Detector_edge3-0_1_28','e1Detector_edge3-0_2_27'],['e1Detector_edge3-0_3_29']],
[['e1Detector_edge4-0_1_37','e1Detector_edge4-0_2_38'],['e1Detector_edge4-0_3_39']]]
intelli_loops_outgoing = [[['e1Detector_edge1-0_1_3_outgoing','e1Detector_edge1-0_2_4_outgoing'],['e1Detector_edge1-0_3_5_outgoing']],
[['e1Detector_edge2-0_1_15_outgoing','e1Detector_edge2-0_2_16_outgoing'],['e1Detector_edge2-0_3_17_outgoing']],
[['e1Detector_edge3-0_1_28_outgoing','e1Detector_edge3-0_2_27_outgoing'],['e1Detector_edge3-0_3_29_outgoing']],
[['e1Detector_edge4-0_1_37_outgoing','e1Detector_edge4-0_2_38_outgoing'],['e1Detector_edge4-0_3_39_outgoing']]]
intelli_lanes_list=[]
intelli_lanes_list = get_list(intelli_lanes,intelli_lanes_list)
intelli_eff_lane_num = 8
through_lane_id_list = []
through_lane_id_list = get_list(route_lane_id_list, through_lane_id_list)
through_lane_id_list += ['edge1-0_3','edge2-0_3','edge3-0_3','edge4-0_3']
base_travel_time = 500 / 15 # base travel time length/ speed
timeslot_factor=1
reward_weight = 1.0
C = 30*timeslot_factor # cycle length (sec)
s = 2*1800 / 3600 # vehicles per sec
Step_len = C # seconds
node_light_7 = "node0"
normal_speed = 13
varrho = 0.7
class SUMO_Agent(object):
def __init__(self, sumo_cmd_str, path_set, action_dim):
self.path_set = path_set
self.start_sumo(sumo_cmd_str)
self.induction_loop_ID_list = traci.inductionloop.getIDList()
self.model_based_TT = {'0':[],'1':[],'2':[],'3':[]}
self.speed_based_TT = {'0':[],'1':[],'2':[],'3':[]}
self.induction_loop_num = dict()
for loop_id in self.induction_loop_ID_list:
self.induction_loop_num[loop_id] = Queue()
for i in range(Step_len*4):
self.induction_loop_num[loop_id].put(0)
self.induction_loop_arrival_rate = dict(zip(intelli_lanes_list, list(np.zeros(len(intelli_lanes_list)))))
self.dic_vehicles = {}
self.current_phase = 0
self.current_phase_duration = 0
self.select_space_length = 50 # 50 grids per lane
self.advise1 = 0 # straight lane
if action_dim <= 2:
self.advise2 = 0
else:
self.advise2 = list(np.zeros(4))
self.advise3 = 0 # turn left lane
# vehicles information
self.all_vehs_info = dict()
# dictionary for record the information of all vehicles in whole simulation
# there are four elements for each vehicle
#[accu. wait time, enter time, travel time, type_index(0: in other road; 1: straight in main road; 2: shunt in main road)]
self.new_vehs = set()
self.current_all_vehs = set()
self.main_vehs = set()
self.main_new_vehs = set()
self.main_new_vehs_4decision = set()
self.main_new_turn_vehs = set()
self.last_step_all_vehs = set()
self.last_step_main_vehs = set()
self.over_sau_time = list(np.zeros(8))
self.straight_num = np.zeros(4)
self.shunt_num = np.zeros(4)
self.smart_num = np.zeros(4)
self.lanes_travel_time_dict = dict()
self.lanes_veh_Num_time_dict = dict()
self.lanes_MeanSpeed_dict = dict()
# self.travel_time_update_lanes = []
self.MeanSpeed_update_lanes = []
for lane in through_lane_id_list:
# self.travel_time_update_lanes.append(lane)
self.MeanSpeed_update_lanes.append(lane)
self.lanes_travel_time_dict[lane] = 500/normal_speed
self.lanes_veh_Num_time_dict[lane]=0
self.lanes_MeanSpeed_dict[lane] = [normal_speed]
self.update_state()
self.share_straight_travel_time = np.zeros(4)
self.share_reroute_travel_time = np.zeros(4)
self.real_g_ratio = 1/2 *np.ones(4)
def start_sumo(self, sumo_cmd_str):
traci.start(sumo_cmd_str)
def status_calculator(self):
# vehs_num,
# queue_len,
# current_phase,
# est_arrival_rate,
# ave_traval_time:
for lane in self.MeanSpeed_update_lanes:
# self.lanes_travel_time_dict[lane].append(np.clip(traci.lane.getTraveltime(lane),0,300))
Lane_veh_Num = self.lanes_veh_Num_time_dict[lane]
MeanSpeed = np.mean(self.lanes_MeanSpeed_dict[lane])
if MeanSpeed ==0:
est_traval_time =(Lane_veh_Num/70) *300 + 500/normal_speed
else:
est_traval_time = 500/MeanSpeed
self.lanes_travel_time_dict[lane]=np.clip(est_traval_time,0,300)
edge_NumVehiclesTracker = []
edge_QueueTracker = []
edge_arrival_rateTracker = []
edge_shunt_ave_traval_timeTracker = []
current_phaseTracker = traci.trafficlight.getPhase(node_light_7)
edge_straight_ave_traval_timeTracker = []
edge_straight_intelli_ave_traval_timeTracker = []
edge_outgoing_rateTracker = []
# ================ count vehicles in edge
for eff_lane_idx in range(len(intelli_lanes)):
straight_double_lanes = intelli_lanes[eff_lane_idx][0]
lane1_veh_num = traci.lane.getLastStepVehicleNumber(straight_double_lanes[0])/100
lane2_veh_num = traci.lane.getLastStepVehicleNumber(straight_double_lanes[1])/100
edge_NumVehiclesTracker.append(lane1_veh_num+lane2_veh_num)
for eff_lane_idx in range(len(intelli_lanes)):
leftTurn_single_lanes = intelli_lanes[eff_lane_idx][1]
lane3_veh_num = traci.lane.getLastStepVehicleNumber(leftTurn_single_lanes[0])/100
edge_NumVehiclesTracker.append(lane3_veh_num)
# ================= COUNT HALTED VEHICLES (I.E. QUEUE SIZE)
for eff_lane_idx in range(len(intelli_lanes)):
straight_double_lanes = intelli_lanes[eff_lane_idx][0]
lane1_veh_num = traci.lane.getLastStepHaltingNumber(straight_double_lanes[0])/100
lane2_veh_num = traci.lane.getLastStepHaltingNumber(straight_double_lanes[1])/100
edge_QueueTracker.append(lane1_veh_num+lane2_veh_num)
for eff_lane_idx in range(len(intelli_lanes)):
leftTurn_single_lanes = intelli_lanes[eff_lane_idx][1]
lane3_veh_num = traci.lane.getLastStepHaltingNumber(leftTurn_single_lanes[0])/100
edge_QueueTracker.append(lane3_veh_num)
# ================= Arrive Rate
for eff_loop_idx in range(len(intelli_loops)):
straight_double_lanes = intelli_lanes[eff_loop_idx][0]
straight_double_loops = intelli_loops[eff_loop_idx][0]
lane_arrive = np.zeros(2)
for loop_idx in range(len(straight_double_loops)):
loop_id = straight_double_loops[loop_idx]
lane_id = straight_double_lanes[loop_idx]
last_step_mean_speed = traci.inductionloop.getLastStepMeanSpeed(loop_id)
last_step_vehs_num = traci.lane.getLastStepVehicleNumber(lane_id)
if (last_step_mean_speed < 5) and (last_step_vehs_num > 70):
lane_arrive[loop_idx] = s/2
else:
lane_arrive[loop_idx]= np.mean(np.array(self.induction_loop_num[loop_id].queue))
edge_arrival_rateTracker.append(np.sum(lane_arrive))
for eff_loop_idx in range(len(intelli_loops)):
leftTurn_single_lanes = intelli_lanes[eff_loop_idx][1]
leftTurn_single_loops = intelli_loops[eff_loop_idx][1]
loop_id = leftTurn_single_loops[0]
lane_id = leftTurn_single_lanes[0]
last_step_mean_speed = traci.inductionloop.getLastStepMeanSpeed(loop_id)
last_step_vehs_num = traci.lane.getLastStepVehicleNumber(lane_id)
if (last_step_mean_speed < 5) and (last_step_vehs_num > 70):
lane_arrive = s/2
else:
lane_arrive= np.mean(np.array(self.induction_loop_num[loop_id].queue))
edge_arrival_rateTracker.append(lane_arrive)
# ================= Outgoing Rate
for eff_loop_idx in range(len(intelli_loops_outgoing)):
straight_double_lanes = intelli_lanes[eff_loop_idx][0]
straight_double_loops = intelli_loops_outgoing[eff_loop_idx][0]
lane_arrive = np.zeros(2)
for loop_idx in range(len(straight_double_loops)):
loop_id = straight_double_loops[loop_idx]
lane_id = straight_double_lanes[loop_idx]
last_step_mean_speed = traci.inductionloop.getLastStepMeanSpeed(loop_id)
last_step_vehs_num = traci.lane.getLastStepVehicleNumber(lane_id)
lane_arrive[loop_idx]= np.mean(np.array(self.induction_loop_num[loop_id].queue))
edge_outgoing_rateTracker.append(np.sum(lane_arrive))
for eff_loop_idx in range(len(intelli_loops_outgoing)):
leftTurn_single_lanes = intelli_lanes[eff_loop_idx][1]
leftTurn_single_loops = intelli_loops_outgoing[eff_loop_idx][1]
loop_id = leftTurn_single_loops[0]
lane_id = leftTurn_single_lanes[0]
last_step_mean_speed = traci.inductionloop.getLastStepMeanSpeed(loop_id)
last_step_vehs_num = traci.lane.getLastStepVehicleNumber(lane_id)
lane_arrive= np.mean(np.array(self.induction_loop_num[loop_id].queue))
edge_outgoing_rateTracker.append(lane_arrive)
for route_index in range(len(route_lane_id_list)):
shunt_route = route_lane_id_list[route_index][1]
route_travel_time = 0
for lanes_list in shunt_route:
lanes_travel = 0
for lane in lanes_list:
lane_travel = self.lanes_travel_time_dict[lane]
lanes_travel += lane_travel
lanes_travel = lanes_travel/len(lanes_list)
route_travel_time += lanes_travel
route_travel_time += 500/15
edge_shunt_ave_traval_timeTracker.append(route_travel_time/travel_time_scale)
for route_index in range(len(route_lane_id_list)):
straight_route = route_lane_id_list[route_index][0]
straight_route_stat_based = [straight_route[0], straight_route[2]]
route_travel_time = 0
for lanes_list in straight_route_stat_based:
lanes_travel = 0
for lane in lanes_list:
lane_travel = self.lanes_travel_time_dict[lane]
lanes_travel += lane_travel
lanes_travel = lanes_travel/len(lanes_list)
route_travel_time += lanes_travel
route_travel_time += 500/15
edge_straight_ave_traval_timeTracker.append(route_travel_time/travel_time_scale)
for route_index in range(len(route_lane_id_list)):
straight_route = route_lane_id_list[route_index][0]
straight_route_intelli = [straight_route[1]]
route_travel_time = 0
for lanes_list in straight_route_intelli:
lanes_travel = 0
for lane in lanes_list:
lanes_travel += self.lanes_travel_time_dict[lane]
lanes_travel = lanes_travel / len(lanes_list)
route_travel_time += lanes_travel
edge_straight_intelli_ave_traval_timeTracker.append(route_travel_time/travel_time_scale)
for eff_lane_idx in range(len(intelli_lanes)):
leftTurn_single_lanes = intelli_lanes[eff_lane_idx][1]
lane_id = leftTurn_single_lanes[0]
lane3_travel = self.lanes_travel_time_dict[lane_id]
edge_straight_intelli_ave_traval_timeTracker.append(lane3_travel/travel_time_scale)
return [edge_NumVehiclesTracker, edge_QueueTracker, current_phaseTracker, edge_arrival_rateTracker,
edge_shunt_ave_traval_timeTracker, edge_straight_ave_traval_timeTracker, edge_straight_intelli_ave_traval_timeTracker,
edge_outgoing_rateTracker]
def update_state(self):
self.status_tracker = self.status_calculator()
max_we = get_max_queue_length(['edge1-0_1', 'edge1-0_2', 'edge2-0_1', 'edge2-0_2'])
max_sn = get_max_queue_length(['edge3-0_1', 'edge3-0_2', 'edge4-0_1', 'edge4-0_2'])
if max_we > 50:
self.advise1 = 1 *(random.random()>0.5)
elif max_sn > 50:
self.advise1 = -1*(random.random()>0.5)
max_we_turn_left = get_max_queue_length(['edge1-0_3', 'edge2-0_3'])
max_sn_turn_left = get_max_queue_length(['edge3-0_3', 'edge4-0_3'])
if max_we_turn_left > 50:
self.advise3 = 1*(random.random()>0.5)
elif max_sn_turn_left > 50:
self.advise3 = -1*(random.random()>0.5)
self.state = State(vehs_num=np.reshape(np.array(self.status_tracker[0]), newshape=(1, 8)),
queue_len=np.reshape(np.array(self.status_tracker[1]), newshape=(1, 8)),
est_arrival_rate=np.reshape(np.array(self.status_tracker[3]), newshape=(1, 8)),
over_sau_time=np.reshape(np.array(self.over_sau_time)/300, newshape=(1, 8)),
ave_shunt_traval_time=np.reshape(np.array(self.status_tracker[4]), newshape=(1, 4)),
ave_straight_traval_time=np.reshape(np.array(self.status_tracker[5]), newshape=(1, 4)),
ave_itelli_traval_time=np.reshape(np.array(self.status_tracker[6]), newshape=(1, 8)),
current_phase=np.reshape(np.array(self.status_tracker[2]), newshape=(1, 1)),)
def travel_time_model_based(self, g_ratio, v, route_index):
if route_index>3:
s_tem = (s/2)
que_thre = 15
else:
s_tem = s
que_thre = 30
if self.status_tracker[1][route_index]*100 >que_thre:
c = self.status_tracker[7][route_index]
else:
c = s_tem * g_ratio
if c==0:
X = 2
else:
X = v/c
if g_ratio == 1:
uniform_delay = 0
else:
uniform_delay = (C/2)*((1-g_ratio)**2/(1-min(X, 1)*g_ratio))
if (X < varrho) :
if X == 0:
add_delay = 0
else:
add_delay = X**2/(2*v*(1-X))
else:
X0 = 0.67 + s_tem * g_ratio * C / 600
if c == 0:
add_delay = ((2 * self.over_sau_time[route_index] + Step_len) * 1 / 4) * (
(X - 1) + np.sqrt((X - 1) ** 2 + (12 * (X - X0) / (1 * (2 * self.over_sau_time[route_index] + Step_len)))))
else:
add_delay = ((2 * self.over_sau_time[route_index] + Step_len) * 1 / 4) * (
(X - 1) + np.sqrt((X - 1) ** 2 + (12 * (X - X0) / (c * (2 * self.over_sau_time[route_index] + Step_len)))))
total_travel_time = min(base_travel_time + uniform_delay + add_delay, 300)
return total_travel_time
def return_reward(self, g_ratio):
vehs_num = 0
travel_time = 0
travel_time_existing = 0
vehs_num_existing = 0
for route_index in range(4):
vehs_num += self.shunt_num[route_index] + self.straight_num[route_index]
travel_time += (
self.shunt_num[route_index] * (self.share_reroute_travel_time[route_index]/travel_time_scale) + self.straight_num[route_index] *
(self.share_straight_travel_time[route_index]/travel_time_scale))
travel_time_existing += (self.travel_time_model_based(
g_ratio[0] if route_index < 2 else g_ratio[2], self.status_tracker[3][route_index], route_index)/travel_time_scale * self.status_tracker[0][route_index]*100 + self.travel_time_model_based(
g_ratio[1] if route_index < 2 else g_ratio[3], self.status_tracker[3][4+route_index], 4+route_index)/travel_time_scale * self.status_tracker[0][4+route_index]*100)
if self.status_tracker[1][route_index]*100 >30:
c = self.status_tracker[7][route_index]
else:
c = (s * (g_ratio[0] if route_index < 2 else g_ratio[2]))
if self.status_tracker[3][route_index]> (c):
self.over_sau_time[route_index] = min(self.over_sau_time[route_index] + Step_len, 300*timeslot_factor)
else:
self.over_sau_time[route_index] = max(self.over_sau_time[route_index]-Step_len, 0)
if (self.status_tracker[1][route_index]*100 <15) and (self.status_tracker[0][route_index]*100 <15) and (self.over_sau_time[route_index]>60):
self.over_sau_time[route_index] = 0
if self.status_tracker[1][route_index+4]*100 >15:
c = self.status_tracker[7][route_index+4]
else:
c = ((s/2) * (g_ratio[1] if route_index < 2 else g_ratio[3]))
if self.status_tracker[3][4+route_index]>(c):
self.over_sau_time[4+route_index] = min(self.over_sau_time[4+route_index] + Step_len, 300*timeslot_factor)
else:
self.over_sau_time[4+route_index] = max(self.over_sau_time[4+route_index]-Step_len, 0)
if (self.status_tracker[1][route_index+4]*100 <7) and (self.status_tracker[0][route_index+4]*100 <7) and (self.over_sau_time[route_index+4]>60):
self.over_sau_time[route_index+4] = 0
vehs_num_existing += (self.status_tracker[0][route_index]+self.status_tracker[0][4+route_index])*100
if vehs_num > 0:
new_vehs_reward = 200/travel_time_scale - travel_time/vehs_num
else:
new_vehs_reward = 0
if vehs_num_existing > 0:
existing_vehs_reward = 50/travel_time_scale - travel_time_existing/vehs_num_existing
else:
existing_vehs_reward = 0
reward = (reward_weight*existing_vehs_reward + new_vehs_reward)/2
reward = max(min(reward, 1), -1)
return reward
def turn_right_ratio_based(self, ratio):
# ratio: including the turn ratio for four edges(1*4)
for veh_id in self.main_new_vehs_4decision:
edge_id = traci.vehicle.getRoadID(veh_id)
route_index = center_edges.index(edge_id)
# center_edges = ['edge1-0', 'edge2-0', 'edge3-0', 'edge4-0']
target_ratio = ratio[route_index]
current_total = self.shunt_num[route_index]+self.straight_num[route_index]
if self.shunt_num[route_index] == 0:
current_ratio = 0
else:
current_ratio = self.shunt_num[route_index] / (current_total)
rnd = np.random.rand(1)
self.all_vehs_info[veh_id][6]= route_index
if rnd < CAV_rate:
self.smart_num[route_index] += 1
if current_ratio < target_ratio:
self.shunt_num[route_index] += 1
self.all_vehs_info[veh_id][3] = 2
traci.vehicle.setRouteID(veh_id, routeID_list[route_index])
traci.vehicle.setColor(veh_id, (255, 0, 0))
self.all_vehs_info[veh_id][5] = self.share_reroute_travel_time[route_index]
else:
self.straight_num[route_index] += 1
self.all_vehs_info[veh_id][5] = self.share_straight_travel_time[route_index]
else:
self.straight_num[route_index] += 1
self.all_vehs_info[veh_id][5] = self.share_straight_travel_time[route_index]
def update_vehs_set(self):
self.main_vehs = set()
self.main_new_vehs_4decision = set()
self.current_all_vehs = set(traci.vehicle.getIDList())
self.new_vehs = self.current_all_vehs.symmetric_difference(
self.last_step_all_vehs.intersection(self.current_all_vehs)) # new vehicles
for veh_id in (self.current_all_vehs - self.new_vehs): # update accu. wait and travel time of existing vehicles
self.all_vehs_info[veh_id][0] = traci.vehicle.getAccumulatedWaitingTime(veh_id)
self.all_vehs_info[veh_id][2] = traci.simulation.getCurrentTime() - self.all_vehs_info[veh_id][1]
self.all_vehs_info[veh_id][4] += traci.vehicle.getFuelConsumption(veh_id)
for veh_id in self.current_all_vehs:
edge_id = traci.vehicle.getRoadID(veh_id)
if edge_id in center_edges:
self.main_vehs = self.main_vehs.union(set([veh_id])) # vehicles in main edge
# new vehicles in main edge
self.main_new_vehs = self.main_vehs.symmetric_difference(self.last_step_main_vehs.intersection(self.main_vehs))
# record the set for finding the new vehicle in next duration
self.last_step_all_vehs = self.current_all_vehs
self.last_step_main_vehs = self.main_vehs
# define the information about new vehicles
#Frame form[AccumulatedWaitingTime, EnteringTime, TravelTime, Flag(0:Not in Main Road, 1:Straight in Main Road, 2:Rerouted in Main Road 3: turn left), FuelConsumption, EstimatedTravelTime, EnterDirection(1:west,2,east,3:south,4:north)]
for veh_id in (self.new_vehs - self.main_new_vehs):
self.all_vehs_info[veh_id] = [traci.vehicle.getAccumulatedWaitingTime(veh_id),
traci.simulation.getCurrentTime(), traci.simulation.getCurrentTime(), 0, 0,-1,-1]
for veh_id in self.main_new_vehs:
type_id = traci.vehicle.getTypeID(veh_id)
if type_id == "Car":
self.main_new_vehs_4decision.add(veh_id)
self.all_vehs_info[veh_id] = [traci.vehicle.getAccumulatedWaitingTime(veh_id),
traci.simulation.getCurrentTime(), traci.simulation.getCurrentTime(), 1, 0,-1,-1]
elif type_id == "Car2": #left turn
self.all_vehs_info[veh_id] = [traci.vehicle.getAccumulatedWaitingTime(veh_id),
traci.simulation.getCurrentTime(), traci.simulation.getCurrentTime(), 3, 0,-1,-1]
else:
print("Car type error")
def induction_loop_count(self):
for loop_id in self.induction_loop_ID_list:
self.induction_loop_num[loop_id].put(traci.inductionloop.getLastStepVehicleNumber(loop_id))
self.induction_loop_num[loop_id].get() # 返回并删除队列头部元素
def sim_step(self, action_change_ratio):
traci.simulationStep()
self.current_phase_duration += 1
self.update_vehs_set()
self.turn_right_ratio_based(action_change_ratio) # part of vehicles turns right
self.induction_loop_count()
for lane in self.MeanSpeed_update_lanes:
Lane_veh_Num = traci.lane.getLastStepVehicleNumber(lane)
self.lanes_veh_Num_time_dict[lane]=Lane_veh_Num
if Lane_veh_Num<1:
MeanSpeed = normal_speed
else:
MeanSpeed = min(traci.lane.getLastStepMeanSpeed(lane),normal_speed)
if len(self.lanes_MeanSpeed_dict[lane])>=30:
del self.lanes_MeanSpeed_dict[lane][0]
self.lanes_MeanSpeed_dict[lane].append(MeanSpeed)
def take_action(self, action, dynamic_flag):
self.advise1 = 0
self.advise3 = 0
for lane in self.MeanSpeed_update_lanes:
self.lanes_MeanSpeed_dict[lane] = []
if len(action) == 8:
self.advise2 = list(np.zeros(4))
action_change_phase, action_change_ratio = 2*action[0:4], action[4:]
step = 0
last_dur_end_phase = traci.trafficlight.getPhase(node_light_7)
self.current_phase_duration = 0
action_change_phase_revise = action_change_phase*(action_change_phase>(6/Step_len))
selected_phase_list = []
action_selected_phase_revise = []
for phase_idx in range(action_change_phase_revise.size):
if action_change_phase_revise[phase_idx]>0:
selected_phase_list.append(phase_idx*2)
action_selected_phase_revise.append(action_change_phase_revise[phase_idx])
self.pre_g_ratio=copy.deepcopy(self.real_g_ratio)
self.real_g_ratio = np.round((action_change_phase_revise/np.sum(action_change_phase_revise))*Step_len)/Step_len
g_ratio = self.real_g_ratio
action_selected_phase_revise = np.array(action_selected_phase_revise)
action_selected_phase_revise = np.round((action_selected_phase_revise/np.sum(action_selected_phase_revise))*Step_len)
for route_index in range(4):
if len(self.model_based_TT[str(route_index)])>3:
del self.model_based_TT[str(route_index)][0]
self.model_based_TT[str(route_index)].append(self.travel_time_model_based(
g_ratio[0] if route_index < 2 else g_ratio[2], self.status_tracker[3][route_index], route_index))
if len(self.speed_based_TT[str(route_index)])>3:
del self.speed_based_TT[str(route_index)][0]
self.speed_based_TT[str(route_index)].append(self.status_tracker[6][route_index]*travel_time_scale)
self.share_straight_travel_time[route_index] = self.status_tracker[5][route_index]*travel_time_scale + (np.mean(self.model_based_TT[str(route_index)])+np.mean(self.speed_based_TT[str(route_index)]) )/2
self.share_reroute_travel_time[route_index] = self.status_tracker[4][route_index]*travel_time_scale
for phase_idx in range(len(selected_phase_list)):
if phase_idx ==0:
if last_dur_end_phase == selected_phase_list[phase_idx]:
for _ in range(int(action_selected_phase_revise[phase_idx]-3)):
self.sim_step(action_change_ratio)
step += 1
else:
traci.trafficlight.setPhase(node_light_7, last_dur_end_phase+1) # 3s黄灯
for _ in range(3):
self.sim_step(action_change_ratio)
step += 1
self.current_phase_duration = selected_phase_list[phase_idx]
traci.trafficlight.setPhase(node_light_7, selected_phase_list[phase_idx])
for _ in range(int(action_selected_phase_revise[phase_idx]-6)):
self.sim_step(action_change_ratio)
step += 1
else:
self.current_phase_duration = selected_phase_list[phase_idx]
traci.trafficlight.setPhase(node_light_7, selected_phase_list[phase_idx])
for _ in range(int(action_selected_phase_revise[phase_idx]-3)):
self.sim_step(action_change_ratio)
step += 1
if phase_idx ==(len(selected_phase_list)-1):
for _ in range(Step_len-step):
self.sim_step(action_change_ratio)
step += 1
else:
traci.trafficlight.setPhase(node_light_7, selected_phase_list[phase_idx]+1)
for _ in range(3):
self.sim_step(action_change_ratio)
step += 1
if step != Step_len:
print(f"step is {step} which is not equal to StepLength {Step_len}")
reward = self.return_reward(g_ratio)
self.straight_num = np.zeros(4)
self.shunt_num = np.zeros(4)
self.smart_num = np.zeros(4)
self.update_state()
if len(action) <= 2:
if np.mean(self.over_sau_time) > 280*timeslot_factor:
self.advise2 = 1*(random.random()>0.5)
else:
for index in range(4):
if self.over_sau_time[index] > 280*timeslot_factor:
self.advise2[index] = 1*(random.random()>0.5)
return reward
def close_sumo():
traci.close()
def get_max_queue_length(listLanes):
max_queue_length = 0
for lane in listLanes:
queue_length = traci.lane.getLastStepHaltingNumber(lane)
if max_queue_length < queue_length:
max_queue_length = queue_length
return max_queue_length
|
py
|
1a5cc39e481006deb5f3be3ef5f02b382f3649c9
|
import sys, os
this_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.realpath(this_dir + '/../magphase/src'))
import numpy as np
from matplotlib import pyplot as plt
import libutils as lu
import libaudio as la
import magphase as mp
from scikits.talkbox import lpc
from scipy.signal import lfilter
from scipy import interpolate
def lpc_to_mag(v_lpc, fft_len=4096):
'''
Computed the magnitude spectrum from LPC coefficients using approximation by FFT method.
'''
v_imp = np.r_[1, np.zeros(fft_len-1)]
v_imp_filt = lfilter(np.array([1.0]), v_lpc, v_imp)
v_mag = np.absolute(np.fft.fft(v_imp_filt))
v_mag = la.remove_hermitian_half(v_mag[None,:])[0]
return v_mag
def get_formant_locations_from_spec_env(v_sp_env):
'''
v_sp_env could be in db, log, or absolute value.
'''
v_mag_diff = np.diff(v_sp_env)
v_mag_diff[v_mag_diff>=0.0] = 1.0
v_mag_diff[v_mag_diff<0.0] = -1.0
v_mag_diff_diff = np.diff(v_mag_diff)
v_frmnts_bins = np.where(v_mag_diff_diff<0.0)[0] + 1
v_frmnts_gains = v_sp_env[v_frmnts_bins]
return v_frmnts_bins, v_frmnts_gains
def get_formant_locations_from_raw_long_frame(v_sig, v_pm, nx, fft_len):
'''
nx: frame index
'''
#v_sig, fs = la.read_audio_file(wavfile)
# Epoch detection:
#v_pm_sec, v_voi = la.reaper_epoch_detection(wavfile)
#v_pm = lu.round_to_int(v_pm_sec * fs)
# Raw-long Frame extraction:
v_frm_long = v_sig[v_pm[nx-2]:v_pm[nx+2]+1]
# Win:
left_len = v_pm[nx] - v_pm[nx-2]
right_len = v_pm[nx+2] - v_pm[nx]
v_win = la.gen_non_symmetric_win(left_len, right_len, np.hanning, b_norm=False)
v_frm_long_win = v_frm_long * v_win
# Spectrum:
v_mag = np.absolute(np.fft.fft(v_frm_long_win, n=fft_len))
v_mag_db = la.db(la.remove_hermitian_half(v_mag[None,:])[0])
# Formant extraction -LPC method:--------------------------------------------------
v_lpc, v_e, v_refl = lpc(v_frm_long_win, 120)
b_use_lpc_roots = False
if b_use_lpc_roots:
v_lpc_roots = np.roots(v_lpc)
v_lpc_angles = np.angle(v_lpc_roots)
v_lpc_angles = v_lpc_angles[v_lpc_angles>=0]
v_lpc_angles = np.sort(v_lpc_angles)
fft_len_half = 1 + fft_len / 2
v_lpc_roots_bins = v_lpc_angles * fft_len_half / np.pi
v_lpc_mag = lpc_to_mag(v_lpc, fft_len=fft_len)
v_lpc_mag_db = la.db(v_lpc_mag)
v_lpc_mag_db = v_lpc_mag_db - np.mean(v_lpc_mag_db) + np.mean(v_mag_db)
v_frmnts_bins, v_frmnts_gains_db = get_formant_locations_from_spec_env(v_lpc_mag_db)
# Getting bandwidth:
fft_len_half = 1 + fft_len / 2
v_vall_bins = get_formant_locations_from_spec_env(-v_lpc_mag_db)[0]
v_vall_bins = np.r_[0, v_vall_bins, fft_len_half-1]
nfrmnts = v_frmnts_bins.size
v_frmnts_bw = np.zeros(nfrmnts) - 1.0
for nx_f in xrange(nfrmnts):
#Left slope:
curr_frmnt_bin = v_frmnts_bins[nx_f]
curr_vall_l_bin = v_vall_bins[nx_f]
curr_vall_r_bin = v_vall_bins[nx_f+1]
curr_midp_l = int((curr_frmnt_bin + curr_vall_l_bin) / 2.0)
curr_midp_r = int((curr_frmnt_bin + curr_vall_r_bin) / 2.0)
# Protection:
if curr_midp_l==curr_frmnt_bin:
curr_midp_l = curr_vall_l_bin
if curr_midp_r==curr_frmnt_bin:
curr_midp_r = curr_vall_r_bin
#print(nx_f)
# 27 y 32
slope_l = (v_frmnts_gains_db[nx_f] - v_lpc_mag_db[curr_midp_l]) / (curr_frmnt_bin - curr_midp_l).astype(float)
slope_r = (v_frmnts_gains_db[nx_f] - v_lpc_mag_db[curr_midp_r]) / (curr_frmnt_bin - curr_midp_r).astype(float)
slope_ave = (slope_l - slope_r) / 2.0
v_frmnts_bw[nx_f] = 1.0 / slope_ave
# Filtering by bandwidth:
bw_thress = 7.0
v_frmnts_bins = v_frmnts_bins[v_frmnts_bw<bw_thress]
v_frmnts_gains_db = v_frmnts_gains_db[v_frmnts_bw<bw_thress]
v_frmnts_bw = v_frmnts_bw[v_frmnts_bw<bw_thress]
# Computing frame short:--------------------------------
# Win:
left_len_short = v_pm[nx] - v_pm[nx-1]
right_len_short = v_pm[nx+1] - v_pm[nx]
v_win_short = la.gen_non_symmetric_win(left_len_short, right_len_short, np.hanning, b_norm=False)
v_frm_short = v_sig[v_pm[nx-1]:v_pm[nx+1]+1]
v_frm_short_win = v_frm_short * v_win_short
shift = v_pm[nx] - v_pm[nx-1]
# Formant extraction - True envelope method:----------------------------------------
# Not finished.
#v_true_env_db = la.true_envelope(v_mag_db[None,:], in_type='db', ncoeffs=400, thres_db=0.1)[0]
if False:
plt.figure(); plt.plot(v_mag_db); plt.plot(v_lpc_mag_db); plt.grid(); plt.show()
return v_mag_db, v_lpc_mag_db, v_frmnts_bins, v_frmnts_gains_db, v_frmnts_bw, v_frm_short_win, shift
def formant_mapping(v_frmnts_bins_a, v_frmnts_gains_db_a, v_frmnts_bins_b, v_frmnts_gains_db_b, fft_len):
# Order according to gain:
v_order_gain_a = np.argsort(-v_frmnts_gains_db_a)
v_order_gain_b = np.argsort(-v_frmnts_gains_db_b)
v_frmnts_bins_ord_gain_a = v_frmnts_bins_a[v_order_gain_a]
v_frmnts_gains_db_ord_gain_a = v_frmnts_gains_db_a[v_order_gain_a]
v_frmnts_bins_ord_gain_b = v_frmnts_bins_b[v_order_gain_b]
v_frmnts_gains_db_ord_gain_b = v_frmnts_gains_db_b[v_order_gain_b]
nfrmnts = np.minimum(v_frmnts_bins_a.size, v_frmnts_bins_b.size)
v_frmnts_bins_ord_gain_b_dyn = v_frmnts_bins_ord_gain_b.copy()
v_map_a_to_b_ord_gain = np.zeros(nfrmnts, dtype='int') - 1
for nx_f in xrange(nfrmnts):
v_diffs = np.abs(v_frmnts_bins_ord_gain_a[nx_f] - v_frmnts_bins_ord_gain_b_dyn)
nx_chosen_b_frmnt = np.argmin(v_diffs)
v_map_a_to_b_ord_gain[nx_f] = nx_chosen_b_frmnt
v_frmnts_bins_ord_gain_b_dyn[nx_chosen_b_frmnt] = -fft_len # A really big number
# Cut down unnecessary elemnts:
v_frmnts_bins_ord_gain_a = v_frmnts_bins_ord_gain_a[:nfrmnts]
v_frmnts_gains_db_ord_gain_a = v_frmnts_gains_db_ord_gain_a[:nfrmnts]
# Reorder b vectors to match a:
v_frmnts_bins_b_match_a_ord_gain = v_frmnts_bins_ord_gain_b[v_map_a_to_b_ord_gain]
v_frmnts_gains_db_b_match_a_ord_gain = v_frmnts_gains_db_ord_gain_b[v_map_a_to_b_ord_gain]
# Order by frequency bins:
v_order_bins_a = np.argsort(v_frmnts_bins_ord_gain_a)
v_frmnts_bins_a = v_frmnts_bins_ord_gain_a[v_order_bins_a]
v_frmnts_bins_b_match_a = v_frmnts_bins_b_match_a_ord_gain[v_order_bins_a]
v_dists_bins = np.abs(v_frmnts_bins_a - v_frmnts_bins_b_match_a)
# Order according to frequency bin:
for nx_f in xrange(nfrmnts):
curr_bin_a = v_frmnts_bins_a[nx_f]
curr_bin_b = v_frmnts_bins_b_match_a[nx_f]
if curr_bin_a==-1:
continue
# Iteration per next locations:
for nx_f2 in xrange(nx_f+1, nfrmnts):
curr_bin_b2 = v_frmnts_bins_b_match_a[nx_f2]
# Si se cruzan:
if curr_bin_b2 < curr_bin_b:
# Si el 2 es mas largo (remove 2):
if v_dists_bins[nx_f2] > v_dists_bins[nx_f]:
v_frmnts_bins_a[nx_f2] = -1
# Si el 1 es mas largo:
else:
v_frmnts_bins_a[nx_f] = -1
continue
v_nx_frmnts_filt = np.where(v_frmnts_bins_a >= 0)[0]
v_frmnts_bins_a_filt = v_frmnts_bins_a[v_nx_frmnts_filt]
v_frmnts_bins_b_match_a_filt = v_frmnts_bins_b_match_a[v_nx_frmnts_filt]
# 7,9,10,19,20,27
# Debug:
if False:
plt.figure()
plt.plot(v_lpc_mag_db_a)
plt.plot(v_lpc_mag_db_b)
for nx_f in xrange(v_map_a_to_b_ord_gain.size):
plt.plot(np.r_[v_frmnts_bins_ord_gain_a[nx_f], v_frmnts_bins_ord_gain_b[v_map_a_to_b_ord_gain[nx_f]]], np.r_[v_frmnts_gains_db_ord_gain_a[nx_f], v_frmnts_gains_db_ord_gain_b[v_map_a_to_b_ord_gain[nx_f]]], 'k')
plt.grid()
plt.show()
plt.figure()
plt.plot(v_lpc_mag_db_a)
plt.plot(v_lpc_mag_db_b)
for nx_f in xrange(v_frmnts_bins_a_filt.size):
v_curr_x = np.r_[v_frmnts_bins_a_filt[nx_f], v_frmnts_bins_b_match_a_filt[nx_f]]
v_curr_y = np.r_[ v_lpc_mag_db_a[v_frmnts_bins_a_filt[nx_f]], v_lpc_mag_db_b[v_frmnts_bins_b_match_a_filt[nx_f]]]
plt.plot(v_curr_x, v_curr_y, 'k')
plt.grid()
plt.show()
return v_frmnts_bins_a_filt, v_frmnts_bins_b_match_a_filt
def warp_mag_spec(v_lpc_mag_db_a, v_frmnts_bins_a_filt, v_frmnts_bins_b_filt, fft_len, sp_weight):
# Weighting:
v_targ_frmnts_bins = v_frmnts_bins_a_filt * (1.0 - sp_weight) + v_frmnts_bins_b_filt * sp_weight
# Generate warping function:
fft_len_half = 1 + fft_len / 2
func_intrp = interpolate.interp1d(np.r_[0, v_frmnts_bins_a_filt, fft_len_half-1], np.r_[0, v_targ_frmnts_bins, fft_len_half-1], bounds_error=True, axis=0, kind='slinear')
v_lin_bins = np.arange(fft_len_half)
v_warp = func_intrp(v_lin_bins)
# Protection:
v_warp[-1] = v_lin_bins[-1]
# Do the warping:
func_intrp = interpolate.interp1d(v_warp, v_lpc_mag_db_a, bounds_error=True, axis=0, kind='slinear')
v_lpc_mag_db_a_warp = func_intrp(v_lin_bins)
return v_lpc_mag_db_a_warp
def fft_filter(v_frm_short_a, shift_a, v_spec_diff_db_a, fft_len):
# dB to absolute:
v_spec_diff_a = la.db(v_spec_diff_db_a, b_inv=True)
right_a = v_frm_short_a.size - shift_a
v_frm_short_a_ext = np.r_[np.zeros(fft_len/2 - shift_a) , v_frm_short_a , np.zeros(fft_len/2 - right_a)]
v_fft_frm_short_a_ext = np.fft.fft(v_frm_short_a_ext) * la.add_hermitian_half(v_spec_diff_a[None,:], data_type='mag')[0]
# To time domain:
v_frm_short_a_ext_filt = np.fft.ifft(v_fft_frm_short_a_ext).real
return v_frm_short_a_ext_filt
def compute_lossless_spec_feats(m_fft):
m_mag = np.absolute(m_fft)
# Protection against division by zero:
mb_mag_zeros = (m_mag==0.0)
m_div = m_mag.copy()
m_div[mb_mag_zeros] = 1.0
m_real = m_fft.real / m_div
m_imag = m_fft.imag / m_div
# Protection against division by zero (may be not necessary):
m_real[mb_mag_zeros] = 0.0
m_imag[mb_mag_zeros] = 0.0
return m_mag, m_real, m_imag
def synthesis_from_lossless(m_mag, m_real, m_imag, v_shift):
m_ph_cmpx = m_real + m_imag * 1j
### with protection against divide-by-zero:
m_ph_cmpx_mag = np.absolute(m_ph_cmpx)
m_ph_cmpx_mag[m_ph_cmpx_mag==0.0] = 1.0
m_fft = m_mag * m_ph_cmpx / m_ph_cmpx_mag
m_fft = la.add_hermitian_half(m_fft, data_type='complex')
m_frm = np.fft.ifft(m_fft).real
m_frm = np.fft.fftshift(m_frm, axes=1)
v_pm = la.shift_to_pm(v_shift)
v_syn_sig = mp.ola(m_frm,v_pm)
return v_syn_sig
def speech_interp(wavfile_a, wavfile_b, nx_strt_a, nx_strt_b, nframes, fft_len):
# MagPhase analysis:
m_mag_a, m_real_a, m_imag_a, v_f0_a, fs, v_shift_a = mp.analysis_lossless(wavfile_a)
m_mag_b, m_real_b, m_imag_b, v_f0_b, fs, v_shift_b = mp.analysis_lossless(wavfile_b)
v_pm_a = la.shift_to_pm(v_shift_a)
v_pm_b = la.shift_to_pm(v_shift_b)
v_sig_a, fs = la.read_audio_file(wavfile_a)
v_sig_b, fs = la.read_audio_file(wavfile_b)
fft_len_half = 1 + fft_len / 2
m_mag_interp = np.zeros((nframes, fft_len_half))
m_real_interp = np.zeros((nframes, fft_len_half))
m_imag_interp = np.zeros((nframes, fft_len_half))
v_shifts_interp = np.zeros(nframes, dtype='int')
for nx_frm in xrange(nframes):
sp_weight = nx_frm / (nframes-1.0)
nx_a = nx_strt_a + nx_frm
nx_b = nx_strt_b + nx_frm
# Get formants:
v_mag_db_a, v_lpc_mag_db_a, v_frmnts_bins_a, v_frmnts_gains_db_a, v_frmnts_bw_a, v_frm_short_a, shift_a = get_formant_locations_from_raw_long_frame(v_sig_a, v_pm_a, nx_a, fft_len)
v_mag_db_b, v_lpc_mag_db_b, v_frmnts_bins_b, v_frmnts_gains_db_b, v_frmnts_bw_b, v_frm_short_b, shift_b = get_formant_locations_from_raw_long_frame(v_sig_b, v_pm_b, nx_b, fft_len)
# Formant mapping:----------------------------------------------------------------
v_frmnts_bins_a_filt, v_frmnts_bins_b_filt = formant_mapping(v_frmnts_bins_a, v_frmnts_gains_db_a, v_frmnts_bins_b, v_frmnts_gains_db_b, fft_len)
# Warping:---------------------------------------------------------------------
# True envelope:
v_true_env_db_a = la.true_envelope(v_mag_db_a[None,:], in_type='db', ncoeffs=400, thres_db=0.1)[0]
v_true_env_db_b = la.true_envelope(v_mag_db_b[None,:], in_type='db', ncoeffs=400, thres_db=0.1)[0]
# Spectral Warping:
v_sp_env_db_a_warp = warp_mag_spec(v_true_env_db_a, v_frmnts_bins_a_filt, v_frmnts_bins_b_filt, fft_len, sp_weight)
v_sp_env_db_b_warp = warp_mag_spec(v_true_env_db_b, v_frmnts_bins_b_filt, v_frmnts_bins_a_filt, fft_len, (1-sp_weight))
#v_sp_env_db_a_warp = warp_mag_spec(v_lpc_mag_db_a, v_frmnts_bins_a_filt, v_frmnts_bins_b_filt, fft_len, sp_weight)
#v_sp_env_db_b_warp = warp_mag_spec(v_lpc_mag_db_b, v_frmnts_bins_b_filt, v_frmnts_bins_a_filt, fft_len, (1-sp_weight))
# Spectral envelope mix:-------------------------------------------------------
v_sp_env_db_targ = v_sp_env_db_a_warp * (1.0-sp_weight) + v_sp_env_db_b_warp * sp_weight
# Whitening:-----------------------------------------------------------------------------
v_mag_white_a = m_mag_a[nx_a,:] / la.db(v_true_env_db_a, b_inv=True)
v_mag_white_b = m_mag_b[nx_b,:] / la.db(v_true_env_db_b, b_inv=True)
# Impose spectral Env:------------------------------------------------------------------
v_sp_env_targ = la.db(v_sp_env_db_targ, b_inv=True)
v_mag_filt_a = v_mag_white_a * v_sp_env_targ
v_mag_filt_b = v_mag_white_b * v_sp_env_targ
# Mix Sources:------------------------------------------------------------------
v_mag_mix = v_mag_filt_a * (1.0-sp_weight) + v_mag_filt_b * sp_weight
v_real_mix = m_real_a[nx_a,:] * (1.0-sp_weight) + m_real_b[nx_b,:] * sp_weight
v_imag_mix = m_imag_a[nx_a,:] * (1.0-sp_weight) + m_imag_b[nx_b,:] * sp_weight
# Mix shifts:
shift_mix = lu.round_to_int(shift_a * (1.0-sp_weight) + shift_b * sp_weight)
# Save:
v_shifts_interp[nx_frm] = shift_mix
m_mag_interp[nx_frm, :] = v_mag_mix
m_real_interp[nx_frm, :] = v_real_mix
m_imag_interp[nx_frm, :] = v_imag_mix
if False:
plt.figure(); plt.plot(v_frm_short_a_ext_filt); plt.plot(v_frm_short_b_ext_filt); plt.grid(); plt.show()
plt.figure(); plt.plot(v_frm_short_a_ext_filt); plt.plot(v_frm_short_b_ext_filt); plt.plot(v_frm_short_ext_filt); plt.grid(); plt.show()
# Merge:
m_mag_merged = np.vstack((m_mag_a[:nx_strt_a,:] , m_mag_interp , m_mag_b[(nx_strt_b+nframes):,:]))
m_real_merged = np.vstack((m_real_a[:nx_strt_a,:] , m_real_interp , m_real_b[(nx_strt_b+nframes):,:]))
m_imag_merged = np.vstack((m_imag_a[:nx_strt_a,:] , m_imag_interp, m_imag_b[(nx_strt_b+nframes):,:]))
v_shift_merged = np.r_[ v_shift_a[:nx_strt_a] , v_shifts_interp, v_shift_b[(nx_strt_b+nframes):] ]
v_sig_merged = synthesis_from_lossless(m_mag_merged, m_real_merged, m_imag_merged, v_shift_merged)
return v_sig_merged, fs
def speech_interp_with_anchors(wavfile_a, wavfile_b, nx_strt_a, nx_strt_b, nframes, fft_len):
# MagPhase analysis:
m_mag_a, m_real_a, m_imag_a, v_f0_a, fs, v_shift_a = mp.analysis_lossless(wavfile_a)
m_mag_b, m_real_b, m_imag_b, v_f0_b, fs, v_shift_b = mp.analysis_lossless(wavfile_b)
v_pm_a = la.shift_to_pm(v_shift_a)
v_pm_b = la.shift_to_pm(v_shift_b)
v_sig_a, fs = la.read_audio_file(wavfile_a)
v_sig_b, fs = la.read_audio_file(wavfile_b)
# Get formants:
v_mag_db_a, v_lpc_mag_db_a, v_frmnts_bins_a, v_frmnts_gains_db_a, v_frmnts_bw_a, v_frm_short_a, shift_a = get_formant_locations_from_raw_long_frame(v_sig_a, v_pm_a, nx_strt_a, fft_len)
v_mag_db_b, v_lpc_mag_db_b, v_frmnts_bins_b, v_frmnts_gains_db_b, v_frmnts_bw_b, v_frm_short_b, shift_b = get_formant_locations_from_raw_long_frame(v_sig_b, v_pm_b, nx_strt_b+nframes-1, fft_len)
# Formant mapping:----------------------------------------------------------------
v_frmnts_bins_a_filt, v_frmnts_bins_b_filt = formant_mapping(v_frmnts_bins_a, v_frmnts_gains_db_a, v_frmnts_bins_b, v_frmnts_gains_db_b, fft_len)
# spec envelope anchors:---------------------------------------------------------------------
v_true_env_db_a = la.true_envelope(v_mag_db_a[None,:], in_type='db', ncoeffs=400, thres_db=0.1)[0]
v_true_env_db_b = la.true_envelope(v_mag_db_b[None,:], in_type='db', ncoeffs=400, thres_db=0.1)[0]
if False:
plt.figure(); plt.plot(v_mag_db_a); plt.plot(v_true_env_db_a); plt.grid(); plt.show()
plt.figure(); plt.plot(v_mag_db_b); plt.plot(v_true_env_db_b); plt.grid(); plt.show()
fft_len_half = 1 + fft_len / 2
m_mag_interp = np.zeros((nframes, fft_len_half))
m_real_interp = np.zeros((nframes, fft_len_half))
m_imag_interp = np.zeros((nframes, fft_len_half))
v_shifts_interp = np.zeros(nframes, dtype='int')
for nx_frm in xrange(nframes):
sp_weight = nx_frm / (nframes-1.0)
nx_a = nx_strt_a + nx_frm
nx_b = nx_strt_b + nx_frm
# Spectral Warping:
v_sp_env_db_curr_a_warp = warp_mag_spec(v_true_env_db_a, v_frmnts_bins_a_filt, v_frmnts_bins_b_filt, fft_len, sp_weight)
v_sp_env_db_curr_b_warp = warp_mag_spec(v_true_env_db_b, v_frmnts_bins_b_filt, v_frmnts_bins_a_filt, fft_len, (1-sp_weight))
#v_sp_env_db_a_warp = warp_mag_spec(v_lpc_mag_db_a, v_frmnts_bins_a_filt, v_frmnts_bins_b_filt, fft_len, sp_weight)
#v_sp_env_db_b_warp = warp_mag_spec(v_lpc_mag_db_b, v_frmnts_bins_b_filt, v_frmnts_bins_a_filt, fft_len, (1-sp_weight))
# Spectral envelope mix:-------------------------------------------------------
v_sp_env_db_curr_targ = v_sp_env_db_curr_a_warp * (1.0-sp_weight) + v_sp_env_db_curr_b_warp * sp_weight
# Whitening:-----------------------------------------------------------------------------
# Spectral envelope estimation:
# v_mag_db_a, v_lpc_mag_db_a, v_frmnts_bins_a, v_frmnts_gains_db_a, v_frmnts_bw_a, v_frm_short_a, shift_a = get_formant_locations_from_raw_long_frame(v_sig_a, v_pm_a, nx_a, fft_len)
v_mag_db_curr_a = get_formant_locations_from_raw_long_frame(v_sig_a, v_pm_a, nx_a, fft_len)[0]
v_mag_db_curr_b = get_formant_locations_from_raw_long_frame(v_sig_b, v_pm_b, nx_b, fft_len)[0]
v_true_env_db_curr_a = la.true_envelope(v_mag_db_curr_a[None,:], in_type='db', ncoeffs=400, thres_db=0.1)[0]
v_true_env_db_curr_b = la.true_envelope(v_mag_db_curr_b[None,:], in_type='db', ncoeffs=400, thres_db=0.1)[0]
v_mag_white_a = m_mag_a[nx_a,:] / la.db(v_true_env_db_curr_a, b_inv=True)
v_mag_white_b = m_mag_b[nx_b,:] / la.db(v_true_env_db_curr_b, b_inv=True)
#if sp_weight>=0.4: import ipdb; ipdb.set_trace(context=8) # breakpoint 6b3a7d8b //
if False:
plt.figure(); plt.plot(v_mag_db_curr_a); plt.plot(v_true_env_db_curr_a); plt.grid(); plt.show()
plt.figure(); plt.plot(v_true_env_db_curr_a); plt.plot(v_true_env_db_curr_b); plt.plot(v_sp_env_db_curr_targ); plt.grid(); plt.show()
plt.figure(); plt.plot(v_mag_db_curr_a); plt.plot(la.db(m_mag_a[nx_a,:])); plt.plot(la.db(v_mag_white_a)); plt.plot(v_true_env_db_curr_a); plt.grid(); plt.show()
# Impose spectral Env:------------------------------------------------------------------
v_sp_env_targ = la.db(v_sp_env_db_curr_targ, b_inv=True)
v_mag_filt_a = v_mag_white_a * v_sp_env_targ
v_mag_filt_b = v_mag_white_b * v_sp_env_targ
# Mix Sources:------------------------------------------------------------------
v_mag_mix = v_mag_filt_a * (1.0-sp_weight) + v_mag_filt_b * sp_weight
v_real_mix = m_real_a[nx_a,:] * (1.0-sp_weight) + m_real_b[nx_b,:] * sp_weight
v_imag_mix = m_imag_a[nx_a,:] * (1.0-sp_weight) + m_imag_b[nx_b,:] * sp_weight
# Mix shifts:
shift_mix = lu.round_to_int(shift_a * (1.0-sp_weight) + shift_b * sp_weight)
# Save:
v_shifts_interp[nx_frm] = shift_mix
m_mag_interp[nx_frm, :] = v_mag_mix
m_real_interp[nx_frm, :] = v_real_mix
m_imag_interp[nx_frm, :] = v_imag_mix
if False:
plt.figure(); plt.plot(v_frm_short_a_ext_filt); plt.plot(v_frm_short_b_ext_filt); plt.grid(); plt.show()
plt.figure(); plt.plot(v_frm_short_a_ext_filt); plt.plot(v_frm_short_b_ext_filt); plt.plot(v_frm_short_ext_filt); plt.grid(); plt.show()
# Merge:
m_mag_merged = np.vstack((m_mag_a[:nx_strt_a,:] , m_mag_interp , m_mag_b[(nx_strt_b+nframes):,:]))
m_real_merged = np.vstack((m_real_a[:nx_strt_a,:] , m_real_interp , m_real_b[(nx_strt_b+nframes):,:]))
m_imag_merged = np.vstack((m_imag_a[:nx_strt_a,:] , m_imag_interp, m_imag_b[(nx_strt_b+nframes):,:]))
v_shift_merged = np.r_[ v_shift_a[:nx_strt_a] , v_shifts_interp, v_shift_b[(nx_strt_b+nframes):] ]
v_sig_merged = synthesis_from_lossless(m_mag_merged, m_real_merged, m_imag_merged, v_shift_merged)
return v_sig_merged, fs
if __name__ == '__main__':
# INPUT:=====================================================================================================
fft_len = 4096
wavfile_a = '/home/felipe/Cloud/Education/UoE/Projects/speech_interp/data/wav/sim_48k/a_p0_1_shorter_48k.wav'
#wavfile_b = '/home/felipe/Cloud/Education/UoE/Projects/speech_interp/data/wav/sim_48k/a_p0_1_shorter_48k.wav'
wavfile_b = '/home/felipe/Cloud/Education/UoE/Projects/speech_interp/data/wav/sim_48k/o_p0_1_shorter_48k.wav'
wavfile_out = '/home/felipe/Cloud/Education/UoE/Projects/speech_interp/data/wav/sim_48k/interp7_anchor_ao.wav'
#nx_a = 30
#nx_b = 30
#sp_weight = 0.5
nx_strt_a = 50 # length: 338
nx_strt_b = 50 # length: 289
nframes = 200
# PROCESS:====================================================================================================
#v_sig_interp, fs = speech_interp(wavfile_a, wavfile_b, nx_strt_a, nx_strt_b, nframes, fft_len)
v_sig_interp, fs = speech_interp_with_anchors(wavfile_a, wavfile_b, nx_strt_a, nx_strt_b, nframes, fft_len)
# Write wavfile:
la.write_audio_file(wavfile_out, v_sig_interp, fs, norm=0.98)
if False:
plt.figure(); plt.plot(v_mag_db_a); plt.plot(5 + v_lpc_mag_db_a); plt.plot(v_sp_env_db_targ); plt.grid(); plt.show()
plt.figure(); plt.plot(v_mag_db_a); plt.plot(6.0 + v_lpc_mag_db_a); plt.grid(); plt.show()
plt.figure(); plt.plot(v_mag_db_a); plt.plot(v_mag_db_a - v_lpc_mag_db_a); plt.grid(); plt.show()
plt.figure(); plt.plot(v_mag_db_a); plt.plot(v_true_env_db_a); plt.grid(); plt.show()
plt.figure(); plt.plot(v_mag_db_a); plt.plot(v_mag_db_a - v_true_env_db_a); plt.grid(); plt.show()
plt.figure(); plt.plot(v_mag_db_a); plt.plot(v_mag_db_a - v_true_env_db_a); plt.plot(v_mag_db_a - v_lpc_mag_db_a); plt.grid(); plt.show()
plt.figure(); plt.plot(v_frm_short_a_ext); plt.plot(v_frm_short_a_ext_filt); plt.grid(); plt.show()
if False:
plt.figure()
plt.plot(v_lpc_mag_db_a)
plt.plot(v_lpc_mag_db_b)
#plt.plot(v_warp, v_lpc_mag_db_a)
plt.plot(v_sp_env_db_a_warp)
plt.plot(v_sp_env_db_b_warp)
plt.grid()
plt.show()
plt.figure(); plt.plot(v_sp_env_db_a_warp); plt.plot(v_sp_env_db_b_warp); plt.plot(v_sp_env_db_targ); plt.grid(); plt.show()
plt.figure(); plt.plot(v_lpc_mag_db_a); plt.plot(v_sp_env_db_targ); plt.plot(v_spec_diff_a); plt.grid(); plt.show()
|
py
|
1a5cc3af92fad0d93f01b516c2af5020725b6feb
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class P2SVpnServerConfiguration(SubResource):
"""P2SVpnServerConfiguration Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param p2_svpn_server_configuration_properties_name: The name of the
P2SVpnServerConfiguration that is unique within a VirtualWan in a resource
group. This name can be used to access the resource along with Paren
VirtualWan resource name.
:type p2_svpn_server_configuration_properties_name: str
:param vpn_protocols: VPN protocols for the P2SVpnServerConfiguration.
:type vpn_protocols: list[str or
~azure.mgmt.network.v2019_02_01.models.VpnGatewayTunnelingProtocol]
:param p2_svpn_server_config_vpn_client_root_certificates: VPN client root
certificate of P2SVpnServerConfiguration.
:type p2_svpn_server_config_vpn_client_root_certificates:
list[~azure.mgmt.network.v2019_02_01.models.P2SVpnServerConfigVpnClientRootCertificate]
:param p2_svpn_server_config_vpn_client_revoked_certificates: VPN client
revoked certificate of P2SVpnServerConfiguration.
:type p2_svpn_server_config_vpn_client_revoked_certificates:
list[~azure.mgmt.network.v2019_02_01.models.P2SVpnServerConfigVpnClientRevokedCertificate]
:param p2_svpn_server_config_radius_server_root_certificates: Radius
Server root certificate of P2SVpnServerConfiguration.
:type p2_svpn_server_config_radius_server_root_certificates:
list[~azure.mgmt.network.v2019_02_01.models.P2SVpnServerConfigRadiusServerRootCertificate]
:param p2_svpn_server_config_radius_client_root_certificates: Radius
client root certificate of P2SVpnServerConfiguration.
:type p2_svpn_server_config_radius_client_root_certificates:
list[~azure.mgmt.network.v2019_02_01.models.P2SVpnServerConfigRadiusClientRootCertificate]
:param vpn_client_ipsec_policies: VpnClientIpsecPolicies for
P2SVpnServerConfiguration.
:type vpn_client_ipsec_policies:
list[~azure.mgmt.network.v2019_02_01.models.IpsecPolicy]
:param radius_server_address: The radius server address property of the
P2SVpnServerConfiguration resource for point to site client connection.
:type radius_server_address: str
:param radius_server_secret: The radius secret property of the
P2SVpnServerConfiguration resource for point to site client connection.
:type radius_server_secret: str
:ivar provisioning_state: The provisioning state of the
P2SVpnServerConfiguration resource. Possible values are: 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:ivar p2_svpn_gateways: List of references to P2SVpnGateways.
:vartype p2_svpn_gateways:
list[~azure.mgmt.network.v2019_02_01.models.SubResource]
:param p2_svpn_server_configuration_properties_etag: A unique read-only
string that changes whenever the resource is updated.
:type p2_svpn_server_configuration_properties_etag: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'p2_svpn_gateways': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'p2_svpn_server_configuration_properties_name': {'key': 'properties.name', 'type': 'str'},
'vpn_protocols': {'key': 'properties.vpnProtocols', 'type': '[str]'},
'p2_svpn_server_config_vpn_client_root_certificates': {'key': 'properties.p2SVpnServerConfigVpnClientRootCertificates', 'type': '[P2SVpnServerConfigVpnClientRootCertificate]'},
'p2_svpn_server_config_vpn_client_revoked_certificates': {'key': 'properties.p2SVpnServerConfigVpnClientRevokedCertificates', 'type': '[P2SVpnServerConfigVpnClientRevokedCertificate]'},
'p2_svpn_server_config_radius_server_root_certificates': {'key': 'properties.p2SVpnServerConfigRadiusServerRootCertificates', 'type': '[P2SVpnServerConfigRadiusServerRootCertificate]'},
'p2_svpn_server_config_radius_client_root_certificates': {'key': 'properties.p2SVpnServerConfigRadiusClientRootCertificates', 'type': '[P2SVpnServerConfigRadiusClientRootCertificate]'},
'vpn_client_ipsec_policies': {'key': 'properties.vpnClientIpsecPolicies', 'type': '[IpsecPolicy]'},
'radius_server_address': {'key': 'properties.radiusServerAddress', 'type': 'str'},
'radius_server_secret': {'key': 'properties.radiusServerSecret', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'p2_svpn_gateways': {'key': 'properties.p2SVpnGateways', 'type': '[SubResource]'},
'p2_svpn_server_configuration_properties_etag': {'key': 'properties.etag', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, p2_svpn_server_configuration_properties_name: str=None, vpn_protocols=None, p2_svpn_server_config_vpn_client_root_certificates=None, p2_svpn_server_config_vpn_client_revoked_certificates=None, p2_svpn_server_config_radius_server_root_certificates=None, p2_svpn_server_config_radius_client_root_certificates=None, vpn_client_ipsec_policies=None, radius_server_address: str=None, radius_server_secret: str=None, p2_svpn_server_configuration_properties_etag: str=None, name: str=None, **kwargs) -> None:
super(P2SVpnServerConfiguration, self).__init__(id=id, **kwargs)
self.p2_svpn_server_configuration_properties_name = p2_svpn_server_configuration_properties_name
self.vpn_protocols = vpn_protocols
self.p2_svpn_server_config_vpn_client_root_certificates = p2_svpn_server_config_vpn_client_root_certificates
self.p2_svpn_server_config_vpn_client_revoked_certificates = p2_svpn_server_config_vpn_client_revoked_certificates
self.p2_svpn_server_config_radius_server_root_certificates = p2_svpn_server_config_radius_server_root_certificates
self.p2_svpn_server_config_radius_client_root_certificates = p2_svpn_server_config_radius_client_root_certificates
self.vpn_client_ipsec_policies = vpn_client_ipsec_policies
self.radius_server_address = radius_server_address
self.radius_server_secret = radius_server_secret
self.provisioning_state = None
self.p2_svpn_gateways = None
self.p2_svpn_server_configuration_properties_etag = p2_svpn_server_configuration_properties_etag
self.name = name
self.etag = None
|
py
|
1a5cc3be5513d1c326630ab9a1eb83ea5485134a
|
# -*- coding: utf-8 -*-
#
# FauxhibaDetector documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FauxhibaDetector'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FauxhibaDetectordoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'FauxhibaDetector.tex',
u'FauxhibaDetector Documentation',
u"Sonny Suciawan", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'FauxhibaDetector', u'FauxhibaDetector Documentation',
[u"Sonny Suciawan"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FauxhibaDetector', u'FauxhibaDetector Documentation',
u"Sonny Suciawan", 'FauxhibaDetector',
'Detects Counterfeit Cuban Cigars', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
py
|
1a5cc438ea2c37107b398d04b6d99d58baec3a78
|
import re
from typing import Any, Dict, List, Text
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.tokenizers import Token, Tokenizer
from rasa.nlu.training_data import Message, TrainingData
from rasa.nlu.constants import (
MESSAGE_RESPONSE_ATTRIBUTE,
MESSAGE_INTENT_ATTRIBUTE,
MESSAGE_TEXT_ATTRIBUTE,
MESSAGE_TOKENS_NAMES,
MESSAGE_ATTRIBUTES,
MESSAGE_SPACY_FEATURES_NAMES,
MESSAGE_VECTOR_FEATURE_NAMES,
)
class WhitespaceTokenizer(Tokenizer, Component):
provides = [MESSAGE_TOKENS_NAMES[attribute] for attribute in MESSAGE_ATTRIBUTES]
defaults = {
# Flag to check whether to split intents
"intent_tokenization_flag": False,
# Symbol on which intent should be split
"intent_split_symbol": "_",
# text will be tokenized with case sensitive as default
"case_sensitive": True,
}
def __init__(self, component_config: Dict[Text, Any] = None) -> None:
"""Construct a new tokenizer using the WhitespaceTokenizer framework."""
super().__init__(component_config)
# flag to check whether to split intents
self.intent_tokenization_flag = self.component_config.get(
"intent_tokenization_flag"
)
# split symbol for intents
self.intent_split_symbol = self.component_config["intent_split_symbol"]
self.case_sensitive = self.component_config["case_sensitive"]
def train(
self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any
) -> None:
for example in training_data.training_examples:
for attribute in MESSAGE_ATTRIBUTES:
if example.get(attribute) is not None:
example.set(
MESSAGE_TOKENS_NAMES[attribute],
self.tokenize(example.get(attribute), attribute),
)
def process(self, message: Message, **kwargs: Any) -> None:
message.set(
MESSAGE_TOKENS_NAMES[MESSAGE_TEXT_ATTRIBUTE], self.tokenize(message.text)
)
def tokenize(
self, text: Text, attribute: Text = MESSAGE_TEXT_ATTRIBUTE
) -> List[Token]:
if not self.case_sensitive:
text = text.lower()
# remove 'not a word character' if
if attribute != MESSAGE_INTENT_ATTRIBUTE:
words = re.sub(
# there is a space or an end of a string after it
r"[^\w#@&]+(?=\s|$)|"
# there is a space or beginning of a string before it
# not followed by a number
r"(\s|^)[^\w#@&]+(?=[^0-9\s])|"
# not in between numbers and not . or @ or & or - or #
# e.g. 10'000.00 or [email protected]
# and not url characters
r"(?<=[^0-9\s])[^\w._~:/?#\[\]()@!$&*+,;=-]+(?=[^0-9\s])",
" ",
text,
).split()
else:
words = (
text.split(self.intent_split_symbol)
if self.intent_tokenization_flag
else [text]
)
running_offset = 0
tokens = []
for word in words:
word_offset = text.index(word, running_offset)
word_len = len(word)
running_offset = word_offset + word_len
tokens.append(Token(word, word_offset))
return tokens
|
py
|
1a5cc4d46774bebec3d082dec8d26a261b7d91a4
|
########################################
# GlslTerminator #######################
########################################
class GlslTerminator:
"""Terminator class."""
def __init__(self, source):
"""Constructor."""
self.__terminator = source
def format(self, force):
"""Return formatted output."""
return self.__terminator
def getTerminator(self):
"""Access terminating character."""
return self.__terminator
def __eq__(self, other):
"""Equals operator."""
if is_glsl_terminator(other):
return self.__terminator == other.getTerminator()
return self.getTerminator() == other
def __ne__(self, other):
"""Not equals operator."""
return not (self == other)
def __str__(self):
"""String representation."""
return "GlslTerminator('%s')" % (self.__terminator)
########################################
# Functions ############################
########################################
def interpret_terminator(source):
"""Try to interpret a terminator."""
if source == ";":
return GlslTerminator(source)
return None
def is_glsl_terminator(op):
"""Tell if token is operator."""
return isinstance(op, GlslTerminator)
|
py
|
1a5cc5f02938f7238dff69eb7f5dfbde2839f285
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Stack and ParallelStack Ops."""
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(
arr, axis=(axis,)) for arr in np.split(
array, axis_len, axis=axis)
]
class StackOpTest(test.TestCase):
def randn(self, shape, dtype):
data = np.random.randn(*shape)
if dtype == np.bool_:
return data < 0 # Naive casting yields True with P(1)!
else:
return data.astype(dtype)
def testSimple(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
rank = len(shape)
for axis in range(-rank, rank):
for dtype in [np.bool_, np.float32, np.int32, np.int64]:
data = self.randn(shape, dtype)
xs = np_split_squeeze(data, axis)
# Stack back into a single tensorflow tensor
with self.subTest(shape=shape, axis=axis, dtype=dtype):
c = array_ops.stack(xs, axis=axis)
self.assertAllEqual(c, data)
def testSimpleParallelCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
def testSimpleParallelGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
def testConst(self):
np.random.seed(7)
with test_util.use_gpu():
# Verify that shape induction works with shapes produced via const stack
a = constant_op.constant([1, 2, 3, 4, 5, 6])
b = array_ops.reshape(a, array_ops.stack([2, 3]))
self.assertAllEqual(b.get_shape(), [2, 3])
# Check on a variety of shapes and types
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
for dtype in [np.bool_, np.float32, np.int16, np.int32, np.int64]:
with self.subTest(shape=shape, dtype=dtype):
data = self.randn(shape, dtype)
# Stack back into a single tensorflow tensor directly using np array
c = array_ops.stack(data)
if not context.executing_eagerly():
# This is implemented via a Const:
self.assertEqual(c.op.type, "Const")
self.assertAllEqual(c, data)
# Python lists also work for 1-D case:
if len(shape) == 1:
data_list = list(data)
cl = array_ops.stack(data_list)
if not context.executing_eagerly():
self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl, data)
def testConstParallelCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
def testConstParallelGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
def testGradientsAxis0(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape)
with self.subTest(shape=shape):
with self.cached_session():
def func(*xs):
return array_ops.stack(xs)
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
theoretical, numerical = gradient_checker_v2.compute_gradient(
func, xs)
self.assertAllClose(theoretical, numerical)
def testGradientsAxis1(self):
np.random.seed(7)
for shape in (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape)
out_shape = list(shape[1:])
out_shape.insert(1, shape[0])
with self.subTest(shape=shape):
with self.cached_session():
def func(*inp):
return array_ops.stack(inp, axis=1)
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
theoretical, numerical = gradient_checker_v2.compute_gradient(
func, xs)
self.assertAllClose(theoretical, numerical)
def testZeroSizeCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
# Verify that stack doesn't crash for zero size inputs
with test_util.device(use_gpu=False):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
def testZeroSizeGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
# Verify that stack doesn't crash for zero size inputs
with test_util.device(use_gpu=True):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
def testAxis0DefaultCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=False):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAxis0DefaultGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=True):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for shape in (3,), (2, 2, 3), (4, 1, 2, 2), (8, 2, 10):
rank = len(shape)
expected = self.randn(shape, np.float32)
for dtype in [np.bool_, np.float32, np.int32, np.int64]:
# For all the possible axis to split it, including negative indices.
for axis in range(-rank, rank):
test_arrays = np_split_squeeze(expected, axis)
with self.cached_session():
with self.subTest(shape=shape, dtype=dtype, axis=axis):
actual_pack = array_ops.stack(test_arrays, axis=axis)
self.assertEqual(expected.shape, actual_pack.get_shape())
actual_pack = self.evaluate(actual_pack)
actual_stack = array_ops.stack(test_arrays, axis=axis)
self.assertEqual(expected.shape, actual_stack.get_shape())
actual_stack = self.evaluate(actual_stack)
self.assertNDArrayNear(expected, actual_stack, 1e-6)
def testDimOutOfRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegex(ValueError,
r"Argument `axis` = 2 not in range \[-2, 2\)"):
array_ops.stack(t, axis=2)
def testDimOutOfNegativeRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegex(ValueError,
r"Argument `axis` = -3 not in range \[-2, 2\)"):
array_ops.stack(t, axis=-3)
def testComplex(self):
np.random.seed(7)
with self.session():
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
for dtype in [np.complex64, np.complex128]:
with self.subTest(shape=shape, dtype=dtype):
data = self.randn(shape, dtype)
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs)
self.assertAllEqual(self.evaluate(c), data)
class AutomaticStackingTest(test.TestCase):
def testSimple(self):
self.assertAllEqual([1, 0, 2],
ops.convert_to_tensor([1, constant_op.constant(0), 2]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([[0, 0, 0],
[0,
constant_op.constant(1), 0],
[0, 0, 0]]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([[0, 0, 0],
constant_op.constant([0, 1, 0]),
[0, 0, 0]]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([
constant_op.constant([0, 0, 0]),
constant_op.constant([0, 1, 0]),
constant_op.constant([0, 0, 0])
]))
def testWithNDArray(self):
with self.session():
result = ops.convert_to_tensor([[[0., 0.],
constant_op.constant([1., 1.])],
np.array(
[[2., 2.], [3., 3.]],
dtype=np.float32)])
self.assertAllEqual([[[0., 0.], [1., 1.]], [[2., 2.], [3., 3.]]],
self.evaluate(result))
def testDtype(self):
t_0 = ops.convert_to_tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([[0., 0., 0.], constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]])
self.assertEqual(dtypes.float64, t_1.dtype)
t_2 = ops.convert_to_tensor(
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
t_3 = ops.convert_to_tensor(
[[0., 0., 0.],
constant_op.constant([0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_3.dtype)
t_4 = ops.convert_to_tensor(
[constant_op.constant([0., 0., 0.], dtype=dtypes.float64)],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_4.dtype)
with self.assertRaises(TypeError):
ops.convert_to_tensor([
constant_op.constant(
[0., 0., 0.], dtype=dtypes.float32), constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
])
def testDtypeConversionWhenTensorDtypeMismatch(self):
t_0 = ops.convert_to_tensor([0., 0., 0.])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([0, 0, 0])
self.assertEqual(dtypes.int32, t_1.dtype)
t_2 = ops.convert_to_tensor([t_0, t_0, t_1], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
if __name__ == "__main__":
test.main()
|
py
|
1a5cc60a8e9fefda3b0e3dbeeda50940c7d58ec9
|
#!/usr/bin/env python
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import cv2
import numpy as np
import os,sys,timeit,json
from os import listdir
from os.path import isfile, join
import scipy.misc
import logging as log
import argparse
from vai.dpuv1.rt import xdnn, xdnn_io
from vai.dpuv1.utils.postproc import yolo
from yolo_utils import bias_selector, saveDetectionDarknetStyle, yolo_parser_args
from yolo_utils import draw_boxes, generate_colors
from get_mAP_darknet import calc_detector_mAP
def extant_file(x):
"""
'Type' for argparse - checks that file exists but does not open.
"""
if x == "-":
# skip file check and allow empty string
return ""
if not os.path.exists(x):
# Argparse uses the ArgumentTypeError to give a rejection message like:
# error: argument input: x does not exist
raise argparse.ArgumentTypeError("{0} does not exist".format(x))
return x
def prep_image(image_file, net_width, net_height, pix_scale, pad_val, img_transpose, ch_swp):
img = cv2.imread(image_file)
orig_shape = img.shape
height, width, __ = img.shape
newdim = max(height, width)
scalew = float(width) / newdim
scaleh = float(height) / newdim
maxdim = max(net_width, net_height)
neww = int(maxdim * scalew)
newh = int(maxdim * scaleh)
img = cv2.resize(img, (neww, newh))
if img.dtype != np.float32:
img = img.astype(np.float32, order='C')
img = img * pix_scale
height, width, channels = img.shape
newdim = max(height, width)
letter_image = np.zeros((newdim, newdim, channels))
letter_image[:, :, :] = pad_val
if newdim == width:
letter_image[(newdim-height)/2:((newdim-height)/2+height),0:width] = img
else:
letter_image[0:height,(newdim-width)/2:((newdim-width)/2+width)] = img
img = letter_image
img = np.transpose(img, (img_transpose[0], img_transpose[1], img_transpose[2]))
ch = 3*[None]
ch[0] = img[0,:,:]
ch[1] = img[1,:,:]
ch[2] = img[2,:,:]
img = np.stack((ch[ch_swp[0]],ch[ch_swp[1]],ch[ch_swp[2]]))
return img, orig_shape
def yolo_gpu_inference(backend_path,
image_dir,
deploy_model,
weights,
out_labels,
IOU_threshold,
scorethresh,
mean_value,
pxscale,
transpose,
channel_swap,
yolo_model,
num_classes, args):
# Setup the environment
images = xdnn_io.getFilePaths(args['images'])
if(args['golden'] or args['visualize']):
assert args['labels'], "Provide --labels to compute mAP."
assert args['results_dir'], "For accuracy measurements, provide --results_dir to save the detections."
labels = xdnn_io.get_labels(args['labels'])
colors = generate_colors(len(labels))
# Select postproc and biases
if args['yolo_version'] == 'v2': yolo_postproc = yolo.yolov2_postproc
elif args['yolo_version'] == 'v3': yolo_postproc = yolo.yolov3_postproc
biases = bias_selector(args)
import caffe
caffe.set_mode_cpu()
print(args)
if(args['gpu'] is not None):
caffe.set_mode_gpu()
caffe.set_device(args['gpu'])
net = caffe.Net(deploy_model, weights, caffe.TEST)
net_h, net_w = net.blobs['data'].data.shape[-2:]
args['net_h'] = net_h
args['net_w'] = net_w
for i,img in enumerate(images):
if((i+1)%100 == 0): print(i+1, "images processed")
raw_img, img_shape = xdnn_io.loadYoloImageBlobFromFile(img, net_h, net_w)
net.blobs['data'].data[...] = raw_img
out = net.forward()
caffeOutput = sorted(out.values(), key=lambda item: item.shape[-1])
boxes = yolo_postproc(caffeOutput, args, [img_shape], biases=biases)
print("{}. Detected {} boxes in {}".format(i, len(boxes[0]), img))
# Save the result
boxes = boxes[0]
if(args['results_dir']):
filename = os.path.splitext(os.path.basename(img))[0]
out_file_txt = os.path.join(args['results_dir'], filename + '.txt')
print("Saving {} boxes to {}".format(len(boxes), out_file_txt)); sys.stdout.flush()
saveDetectionDarknetStyle(out_file_txt, boxes, img_shape)
if(args['visualize']):
out_file_png = os.path.join(args['results_dir'], filename + '.png')
print("Saving result to {}".format(out_file_png)); sys.stdout.flush()
draw_boxes(img, boxes, labels, colors, out_file_png)
# draw_boxes(images[i],bboxes,class_names,colors=[(0,0,0)]*num_classes)
return len(images)
def main():
parser = argparse.ArgumentParser()
parser = yolo_parser_args(parser)
parser.add_argument('--deploymodel', help="network definition prototxt file in case of caffe",
required=True, type=extant_file, metavar="FILE")
parser.add_argument('--caffemodel', help="network weights caffe model file in case of caffe",
required=True, type=extant_file, metavar="FILE")
parser.add_argument('--images', nargs='*',
help='directory or raw image files to use as input', required=True, type=extant_file, metavar="FILE")
parser.add_argument('--labels', help='label ID', type=extant_file, metavar="FILE")
parser.add_argument('--golden', help='Ground truth directory', type=extant_file, metavar="FILE")
parser.add_argument('--mean_value', type=int, nargs=3, default=[0,0,0], # BGR for Caffe
help='image mean values ')
parser.add_argument('--pxscale', type=float, default=(1.0/255.0), help='pix cale value')
parser.add_argument('--transpose', type=int, default=[2,0,1], nargs=3, help="Passed to caffe.io.Transformer function set_transpose, default 2,0,1" )
parser.add_argument('--channel_swap', type=int, default=[2,1,0], nargs=3, help="Passed to caffe.io.Transformer function set_channel_swap, default 2,1,0")
parser.add_argument('--caffe_backend_path', help='caffe backend')
parser.add_argument('--gpu', type=int, default=None, help='GPU-ID to run Caffe inference on GPU')
args = parser.parse_args()
args = xdnn_io.make_dict_args(args)
num_images_processed = yolo_gpu_inference(args['caffe_backend_path'],
args['images'],
args['deploymodel'],
args['caffemodel'],
args['results_dir'],
args['iouthresh'],
args['scorethresh'],
args['mean_value'],
args['pxscale'],
args['transpose'],
args['channel_swap'],
args['yolo_model'],
args['classes'], args)
print('num images processed : ', num_images_processed)
# mAP calculation
if(args['golden']):
labels = xdnn_io.get_labels(args['labels'])
print()
print("Computing mAP score : ")
print("Class names are : {} ".format(labels))
mAP = calc_detector_mAP(args['results_dir'], args['golden'], len(labels), labels, args['prob_threshold'], args['mapiouthresh'], args['points'])
sys.stdout.flush()
if __name__ == '__main__':
main()
|
py
|
1a5cc694e4284f3e132220aaf910f23081ac07d0
|
# Generated by Django 2.2.10 on 2020-03-12 08:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("django_activiti", "0002_activiticonfig_tenant"),
]
operations = [
migrations.AddField(
model_name="activiticonfig",
name="enabled",
field=models.BooleanField(default=False, verbose_name="enabled"),
),
]
|
py
|
1a5cc76cfa1b468303dc67081ffa5950112f0482
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_block_lower_triangular as block_lower_triangular
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
def _block_lower_triangular_dense(expected_shape, blocks):
"""Convert a list of blocks into a dense blockwise lower-triangular matrix."""
rows = []
num_cols = 0
for row_blocks in blocks:
# Get the batch shape for the block.
batch_row_shape = array_ops.shape(row_blocks[0])[:-1]
num_cols += array_ops.shape(row_blocks[-1])[-1]
zeros_to_pad_after_shape = array_ops.concat(
[batch_row_shape, [expected_shape[-2] - num_cols]], axis=-1)
zeros_to_pad_after = array_ops.zeros(
zeros_to_pad_after_shape, dtype=row_blocks[-1].dtype)
row_blocks.append(zeros_to_pad_after)
rows.append(array_ops.concat(row_blocks, axis=-1))
return array_ops.concat(rows, axis=-2)
@test_util.run_all_in_graph_and_eager_modes
class SquareLinearOperatorBlockLowerTriangularTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def tearDown(self):
config.enable_tensor_float_32_execution(self.tf32_keep_)
def setUp(self):
self.tf32_keep_ = config.tensor_float_32_execution_enabled()
config.enable_tensor_float_32_execution(False)
# Increase from 1e-6 to 1e-5
self._atol[dtypes.float32] = 1e-5
self._atol[dtypes.complex64] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._rtol[dtypes.complex64] = 1e-5
super(SquareLinearOperatorBlockLowerTriangularTest, self).setUp()
@staticmethod
def use_blockwise_arg():
return True
@staticmethod
def skip_these_tests():
# Skipping since `LinearOperatorBlockLowerTriangular` is in general not
# self-adjoint.
return ["cholesky", "eigvalsh"]
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
return [
shape_info((0, 0)),
shape_info((1, 1)),
shape_info((1, 3, 3)),
shape_info((5, 5), blocks=[[(2, 2)], [(3, 2), (3, 3)]]),
shape_info((3, 7, 7),
blocks=[[(1, 2, 2)], [(1, 3, 2), (3, 3, 3)],
[(1, 2, 2), (1, 2, 3), (1, 2, 2)]]),
shape_info((2, 4, 6, 6),
blocks=[[(2, 1, 2, 2)], [(1, 4, 2), (4, 4, 4)]]),
]
def operator_and_matrix(
self, shape_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
expected_blocks = (
shape_info.__dict__["blocks"] if "blocks" in shape_info.__dict__
else [[list(shape_info.shape)]])
matrices = []
for i, row_shapes in enumerate(expected_blocks):
row = []
for j, block_shape in enumerate(row_shapes):
if i == j: # operator is on the diagonal
row.append(
linear_operator_test_util.random_positive_definite_matrix(
block_shape, dtype, force_well_conditioned=True))
else:
row.append(
linear_operator_test_util.random_normal(block_shape, dtype=dtype))
matrices.append(row)
lin_op_matrices = matrices
if use_placeholder:
lin_op_matrices = [[
array_ops.placeholder_with_default(
matrix, shape=None) for matrix in row] for row in matrices]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[linalg.LinearOperatorFullMatrix( # pylint:disable=g-complex-comprehension
l,
is_square=True,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
for l in row] for row in lin_op_matrices])
# Should be auto-set.
self.assertTrue(operator.is_square)
# Broadcast the shapes.
expected_shape = list(shape_info.shape)
broadcasted_matrices = linear_operator_util.broadcast_matrix_batch_dims(
[op for row in matrices for op in row]) # pylint: disable=g-complex-comprehension
matrices = [broadcasted_matrices[i * (i + 1) // 2:(i + 1) * (i + 2) // 2]
for i in range(len(matrices))]
block_lower_triangular_dense = _block_lower_triangular_dense(
expected_shape, matrices)
if not use_placeholder:
block_lower_triangular_dense.set_shape(expected_shape)
return operator, block_lower_triangular_dense
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[linalg.LinearOperatorFullMatrix(matrix)]],
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_block_lower_triangular_inverse_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)],
[linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True),
linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)]],
is_non_singular=True,
)
inverse = operator.inverse()
self.assertIsInstance(
inverse,
block_lower_triangular.LinearOperatorBlockLowerTriangular)
self.assertEqual(2, len(inverse.operators))
self.assertEqual(1, len(inverse.operators[0]))
self.assertEqual(2, len(inverse.operators[1]))
def test_tape_safe(self):
operator_1 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[1., 0.], [0., 1.]]),
is_self_adjoint=True,
is_positive_definite=True)
operator_2 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[2., 0.], [1., 0.]]))
operator_3 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[3., 1.], [1., 3.]]),
is_self_adjoint=True,
is_positive_definite=True)
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_3]],
is_self_adjoint=False,
is_positive_definite=True)
diagonal_grads_only = ["diag_part", "trace", "determinant",
"log_abs_determinant"]
self.check_tape_safe(operator, skip_options=diagonal_grads_only)
for y in diagonal_grads_only:
for diag_block in [operator_1, operator_3]:
with backprop.GradientTape() as tape:
grads = tape.gradient(getattr(operator, y)(), diag_block.variables)
for item in grads:
self.assertIsNotNone(item)
def test_is_non_singular_auto_set(self):
# Matrix with two positive eigenvalues, 11 and 8.
# The matrix values do not effect auto-setting of the flags.
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_2 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_3 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_3]],
is_positive_definite=False, # No reason it HAS to be False...
is_non_singular=None)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
with self.assertRaisesRegex(ValueError, "always non-singular"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_3]], is_non_singular=False)
operator_4 = linalg.LinearOperatorFullMatrix(
[[1., 0.], [2., 0.]], is_non_singular=False)
# A singular operator off of the main diagonal shouldn't raise
block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_4, operator_2]], is_non_singular=True)
with self.assertRaisesRegex(ValueError, "always singular"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_4]], is_non_singular=True)
def test_different_dtypes_raises(self):
operators = [
[linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3))],
[linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))]
]
with self.assertRaisesRegex(TypeError, "same dtype"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
def test_non_square_operator_raises(self):
operators = [
[linalg.LinearOperatorFullMatrix(rng.rand(3, 4), is_square=False)],
[linalg.LinearOperatorFullMatrix(rng.rand(4, 4)),
linalg.LinearOperatorFullMatrix(rng.rand(4, 4))]
]
with self.assertRaisesRegex(ValueError, "must be square"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
def test_empty_operators_raises(self):
with self.assertRaisesRegex(ValueError, "must be a list of >=1"):
block_lower_triangular.LinearOperatorBlockLowerTriangular([])
def test_operators_wrong_length_raises(self):
with self.assertRaisesRegex(ValueError, "must contain `2` blocks"):
block_lower_triangular.LinearOperatorBlockLowerTriangular([
[linalg.LinearOperatorFullMatrix(rng.rand(2, 2))],
[linalg.LinearOperatorFullMatrix(rng.rand(2, 2))
for _ in range(3)]])
def test_operators_mismatched_dimension_raises(self):
operators = [
[linalg.LinearOperatorFullMatrix(rng.rand(3, 3))],
[linalg.LinearOperatorFullMatrix(rng.rand(3, 4)),
linalg.LinearOperatorFullMatrix(rng.rand(3, 3))]
]
with self.assertRaisesRegex(ValueError, "must be the same as"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
def test_incompatible_input_blocks_raises(self):
matrix_1 = array_ops.placeholder_with_default(rng.rand(4, 4), shape=None)
matrix_2 = array_ops.placeholder_with_default(rng.rand(3, 4), shape=None)
matrix_3 = array_ops.placeholder_with_default(rng.rand(3, 3), shape=None)
operators = [
[linalg.LinearOperatorFullMatrix(matrix_1, is_square=True)],
[linalg.LinearOperatorFullMatrix(matrix_2),
linalg.LinearOperatorFullMatrix(matrix_3, is_square=True)]
]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
operators)
x = np.random.rand(2, 4, 5).tolist()
msg = ("dimension does not match" if context.executing_eagerly()
else "input structure is ambiguous")
with self.assertRaisesRegex(ValueError, msg):
operator.matmul(x)
if __name__ == "__main__":
linear_operator_test_util.add_tests(
SquareLinearOperatorBlockLowerTriangularTest)
test.main()
|
py
|
1a5cc8e8ec4ae5a48241515ddcaf464e3ceea9bd
|
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import abc
import os
import uuid
from typing import Dict
import six
from datadog_checks.dev.tooling.constants import get_root
from datadog_checks.dev.tooling.git import content_changed
from datadog_checks.dev.tooling.manifest_validator.schema import get_manifest_schema
from datadog_checks.dev.tooling.utils import (
get_metadata_file,
has_logs,
is_metric_in_metadata_file,
is_package,
parse_version_parts,
read_metadata_rows,
)
FIELDS_NOT_ALLOWED_TO_CHANGE = ["integration_id", "display_name", "guid"]
METRIC_TO_CHECK_EXCLUDE_LIST = {
'openstack.controller', # "Artificial" metric, shouldn't be listed in metadata file.
'riakcs.bucket_list_pool.workers', # RiakCS 2.1 metric, but metadata.csv lists RiakCS 2.0 metrics only.
}
class ValidationResult(object):
def __init__(self):
self.failed = False
self.fixed = False
self.messages = {'success': [], 'warning': [], 'failure': [], 'info': []}
def __str__(self):
return '\n'.join(['\n'.join(messages) for messages in self.messages.values()])
def __repr__(self):
return str(self)
@six.add_metaclass(abc.ABCMeta)
class ManifestValidator(object):
def __init__(self, is_extras=False, is_marketplace=False, check_in_extras=True, check_in_marketplace=True):
self.result = ValidationResult()
self.is_extras = is_extras
self.is_marketplace = is_marketplace
self.check_in_extras = check_in_extras
self.check_in_markeplace = check_in_marketplace
def should_validate(self):
if not self.is_extras and not self.is_marketplace:
return True
if self.is_extras and self.check_in_extras:
return True
if self.is_marketplace and self.check_in_markeplace:
return True
return False
def validate(self, check_name, manifest, should_fix):
# type: (str, Dict, bool) -> None
"""Validates the decoded manifest. Will perform inline changes if fix is true"""
raise NotImplementedError
def fail(self, error_message):
self.result.failed = True
self.result.messages['failure'].append(error_message)
def fix(self, problem, solution):
self.result.warning_msg = problem
self.result.success_msg = solution
self.result.fixed = True
self.result.failed = False
def __repr__(self):
return str(self.result)
class AttributesValidator(ManifestValidator):
""" attributes are valid"""
def validate(self, check_name, decoded, fix):
errors = sorted(get_manifest_schema().iter_errors(decoded), key=lambda e: e.path)
if errors:
for error in errors:
self.fail(f' {"->".join(map(str, error.absolute_path))} Error: {error.message}')
class GUIDValidator(ManifestValidator):
all_guids = {}
def validate(self, check_name, decoded, fix):
guid = decoded.get('guid')
if guid in self.all_guids:
output = f' duplicate `guid`: `{guid}` from `{self.all_guids[guid]}`'
if fix:
new_guid = uuid.uuid4()
self.all_guids[new_guid] = check_name
decoded['guid'] = new_guid
self.fix(output, f' new `guid`: {new_guid}')
else:
self.fail(output)
elif not guid or not isinstance(guid, str):
output = ' required non-null string: guid'
if fix:
new_guid = uuid.uuid4()
self.all_guids[new_guid] = check_name
decoded['guid'] = new_guid
self.fix(output, f' new `guid`: {new_guid}')
else:
self.fail(output)
else:
self.all_guids[guid] = check_name
return self.result
class ManifestVersionValidator(ManifestValidator):
def __init__(self, *args, **kwargs):
super(ManifestVersionValidator, self).__init__(*args, **kwargs)
self.root = get_root()
def validate(self, check_name, decoded, fix):
# manifest_version
correct_manifest_version = '1.0.0'
manifest_version = decoded.get('manifest_version')
version_parts = parse_version_parts(manifest_version)
if len(version_parts) != 3:
if not manifest_version:
output = ' required non-null string: manifest_version'
else:
output = f' invalid `manifest_version`: {manifest_version}'
if fix:
version_parts = parse_version_parts(correct_manifest_version)
decoded['manifest_version'] = correct_manifest_version
self.fix(output, f' new `manifest_version`: {correct_manifest_version}')
else:
self.fail(output)
if len(version_parts) == 3:
about_exists = os.path.isfile(
os.path.join(self.root, check_name, 'datadog_checks', check_name, '__about__.py')
)
if version_parts >= [1, 0, 0]:
if 'version' in decoded and about_exists:
output = ' outdated field: version'
if fix:
del decoded['version']
self.fix(output, ' removed field: version')
else:
self.fail(output)
elif about_exists:
output = f' outdated `manifest_version`: {manifest_version}'
if fix:
decoded['manifest_version'] = correct_manifest_version
self.fix(output, f' new `manifest_version`: {correct_manifest_version}')
if 'version' in decoded:
del decoded['version']
self.result.messages['success'].append(' removed field: version')
else:
self.fail(output)
else:
version = decoded.get('version')
version_parts = parse_version_parts(version)
if len(version_parts) != 3:
if not version:
output = ' required non-null string: version'
else:
output = f' invalid `version`: {version}'
self.fail(output)
class MaintainerValidator(ManifestValidator):
def validate(self, check_name, decoded, fix):
if not self.should_validate():
return
correct_maintainer = '[email protected]'
maintainer = decoded.get('maintainer')
if not maintainer.isascii():
self.fail(f' `maintainer` contains non-ascii character: {maintainer}')
return
if maintainer != correct_maintainer:
output = f' incorrect `maintainer`: {maintainer}'
if fix:
decoded['maintainer'] = correct_maintainer
self.fix(output, f' new `maintainer`: {correct_maintainer}')
else:
self.fail(output)
class NameValidator(ManifestValidator):
def validate(self, check_name, decoded, fix):
correct_name = check_name
name = decoded.get('name')
if not isinstance(name, str) or name.lower() != correct_name.lower():
output = f' incorrect `name`: {name}'
if fix:
decoded['name'] = correct_name
self.fix(output, f' new `name`: {correct_name}')
else:
self.fail(output)
class MetricsMetadataValidator(ManifestValidator):
def validate(self, check_name, decoded, fix):
# metrics_metadata
metadata_in_manifest = decoded.get('assets', {}).get('metrics_metadata')
metadata_file = get_metadata_file(check_name)
metadata_file_exists = os.path.isfile(metadata_file)
if not metadata_in_manifest and metadata_file_exists:
# There is a metadata.csv file but no entry in the manifest.json
self.fail(' metadata.csv exists but not defined in the manifest.json of {}'.format(check_name))
elif metadata_in_manifest and not metadata_file_exists:
# There is an entry in the manifest.json file but the referenced csv file does not exist.
self.fail(' metrics_metadata in manifest.json references a non-existing file: {}.'.format(metadata_file))
class MetricToCheckValidator(ManifestValidator):
def validate(self, check_name, decoded, _):
if not self.should_validate() or check_name == 'snmp' or check_name == 'moogsoft':
return
metadata_in_manifest = decoded.get('assets', {}).get('metrics_metadata')
# metric_to_check
metric_to_check = decoded.get('metric_to_check')
pricing = decoded.get('pricing', [])
if metric_to_check:
metrics_to_check = metric_to_check if isinstance(metric_to_check, list) else [metric_to_check]
if any(p.get('metric') in metrics_to_check for p in pricing):
return
for metric in metrics_to_check:
metric_integration_check_name = check_name
# snmp vendor specific integrations define metric_to_check
# with metrics from `snmp` integration
if check_name.startswith('snmp_') and not metadata_in_manifest:
metric_integration_check_name = 'snmp'
if (
not is_metric_in_metadata_file(metric, metric_integration_check_name)
and metric not in METRIC_TO_CHECK_EXCLUDE_LIST
):
self.fail(f' metric_to_check not in metadata.csv: {metric!r}')
elif metadata_in_manifest:
# if we have a metadata.csv file but no `metric_to_check` raise an error
metadata_file = get_metadata_file(check_name)
if os.path.isfile(metadata_file):
for _, row in read_metadata_rows(metadata_file):
# there are cases of metadata.csv files with just a header but no metrics
if row:
self.fail(' metric_to_check not included in manifest.json')
class SupportValidator(ManifestValidator):
def validate(self, check_name, decoded, fix):
if self.is_extras:
correct_support = 'contrib'
elif self.is_marketplace:
correct_support = 'partner'
else:
correct_support = 'core'
support = decoded.get('support')
if support != correct_support:
output = f' incorrect `support`: {support}'
if fix:
decoded['support'] = correct_support
self.fix(output, f' new `support`: {correct_support}')
else:
self.fail(output)
class IsPublicValidator(ManifestValidator):
def validate(self, check_name, decoded, fix):
correct_is_public = True
is_public = decoded.get('is_public')
if not isinstance(is_public, bool):
output = ' required boolean: is_public'
if fix:
decoded['is_public'] = correct_is_public
self.fix(output, f' new `is_public`: {correct_is_public}')
else:
self.fail(output)
class ImmutableAttributesValidator(ManifestValidator):
"""Ensure attributes haven't changed
Skip if the manifest is a new file (i.e. new integration)
"""
def validate(self, check_name, decoded, fix):
manifest_fields_changed = content_changed(file_glob=f"{check_name}/manifest.json")
if 'new file' not in manifest_fields_changed:
for field in FIELDS_NOT_ALLOWED_TO_CHANGE:
if field in manifest_fields_changed:
output = f'Attribute `{field}` is not allowed to be modified. Please revert to original value'
self.fail(output)
else:
self.result.messages['info'].append(
" skipping check for changed fields: integration not on default branch"
)
class LogsCategoryValidator(ManifestValidator):
"""If an integration defines logs it should have the log collection category"""
LOG_COLLECTION_CATEGORY = "log collection"
IGNORE_LIST = {
'docker_daemon',
'ecs_fargate', # Logs are provided by FireLens or awslogs
'cassandra_nodetool', # Logs are provided by cassandra
'jmeter',
'kafka_consumer', # Logs are provided by kafka
'kubernetes',
'pan_firewall',
'altostra',
'hasura_cloud',
'sqreen',
}
def validate(self, check_name, decoded, fix):
categories = decoded.get('categories')
check_has_logs = has_logs(check_name)
check_has_logs_category = self.LOG_COLLECTION_CATEGORY in categories
if check_has_logs == check_has_logs_category or check_name in self.IGNORE_LIST:
return
if check_has_logs:
output = ' required category: ' + self.LOG_COLLECTION_CATEGORY
if fix:
correct_categories = categories + [self.LOG_COLLECTION_CATEGORY]
decoded['categories'] = correct_categories
self.fix(output, f' new `categories`: {correct_categories}')
else:
self.fail(output)
else:
output = (
' This integration does not have logs, please remove the category: '
+ self.LOG_COLLECTION_CATEGORY
+ ' or define the logs properly'
)
self.fail(output)
class SupportedOSValidator(ManifestValidator):
"""If an integration contains python or logs configuration, the supported_os field should not be empty."""
def validate(self, check_name, decoded, _):
supported_os = decoded.get('supported_os')
check_has_logs = has_logs(check_name)
check_has_python = is_package(check_name)
if not supported_os and (check_has_logs or check_has_python):
output = f'Attribute `supported_os` in {check_name}/manifest.json should not be empty.'
self.fail(output)
def get_all_validators(is_extras, is_marketplace):
return [
AttributesValidator(),
GUIDValidator(),
ManifestVersionValidator(),
MaintainerValidator(is_extras, is_marketplace, check_in_extras=False, check_in_marketplace=False),
NameValidator(),
MetricsMetadataValidator(),
MetricToCheckValidator(),
SupportValidator(is_extras, is_marketplace),
IsPublicValidator(),
ImmutableAttributesValidator(),
LogsCategoryValidator(),
SupportedOSValidator(),
]
|
py
|
1a5cc90d5c66b90c35e79845240465d98d85bfb5
|
"""data-describe text.
This subpackage provides functionality to process and analyze unstructured, free-form text using text analytics & Natural Language Processing (NLP).
"""
from data_describe.text.text_preprocessing import ( # noqa: F401
tokenize,
to_lower,
remove_punct,
remove_digits,
remove_single_char_and_spaces,
remove_stopwords,
lemmatize,
stem,
bag_of_words_to_docs,
create_tfidf_matrix,
create_doc_term_matrix,
preprocess_texts,
ngram_freq,
filter_dictionary,
)
|
py
|
1a5cc91aa859e8fd84cd5a4d4b8e9ab0b0d4a55c
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_axis38.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [45642496, 45644416]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_y_axis({'line': {'color': 'yellow'},
'fill': {'color': 'red'}})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
py
|
1a5cc9514eaa057487ad0e11f1897fdf0fa65b4c
|
# -*- coding: utf-8 -*-
"""
.. _tut-bad-channels:
=====================
Handling bad channels
=====================
This tutorial covers manual marking of bad channels and reconstructing bad
channels based on good signals at other sensors.
As usual we'll start by importing the modules we need, and loading some example
data:
"""
# %%
import os
from copy import deepcopy
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
# %%
# Marking bad channels
# ^^^^^^^^^^^^^^^^^^^^
#
# Sometimes individual channels malfunction and provide data that is too noisy
# to be usable. MNE-Python makes it easy to ignore those channels in the
# analysis stream without actually deleting the data in those channels. It does
# this by
# keeping track of the bad channel indices in a list and looking at that list
# when doing analysis or plotting tasks. The list of bad channels is stored in
# the ``'bads'`` field of the :class:`~mne.Info` object that is attached to
# :class:`~mne.io.Raw`, :class:`~mne.Epochs`, and :class:`~mne.Evoked` objects.
print(raw.info['bads'])
# %%
# Here you can see that the :file:`.fif` file we loaded from disk must have
# been keeping track of channels marked as "bad" — which is good news, because
# it means any changes we make to the list of bad channels will be preserved if
# we save our data at intermediate stages and re-load it later. Since we saw
# above that ``EEG 053`` is one of the bad channels, let's look at it alongside
# some other EEG channels to see what's bad about it. We can do this using the
# standard :meth:`~mne.io.Raw.plot` method, and instead of listing the channel
# names one by one (``['EEG 050', 'EEG 051', ...]``) we'll use a `regular
# expression`_ to pick all the EEG channels between 050 and 059 with the
# :func:`~mne.pick_channels_regexp` function (the ``.`` is a wildcard
# character):
picks = mne.pick_channels_regexp(raw.ch_names, regexp='EEG 05.')
raw.plot(order=picks, n_channels=len(picks))
# %%
# We can do the same thing for the bad MEG channel (``MEG 2443``). Since we
# know that Neuromag systems (like the one used to record the example data) use
# the last digit of the MEG channel number to indicate sensor type, here our
# `regular expression`_ will pick all the channels that start with 2 and end
# with 3:
picks = mne.pick_channels_regexp(raw.ch_names, regexp='MEG 2..3')
raw.plot(order=picks, n_channels=len(picks))
# %%
# Notice first of all that the channels marked as "bad" are plotted in a light
# gray color in a layer behind the other channels, to make it easy to
# distinguish them from "good" channels. The plots make it clear that ``EEG
# 053`` is not picking up scalp potentials at all, and ``MEG 2443`` looks like
# it's got a lot more internal noise than its neighbors — its signal is a few
# orders of magnitude greater than the other MEG channels, making it a clear
# candidate for exclusion.
#
# If you want to change which channels are marked as bad, you can edit
# ``raw.info['bads']`` directly; it's an ordinary Python :class:`list` so the
# usual list methods will work:
original_bads = deepcopy(raw.info['bads'])
raw.info['bads'].append('EEG 050') # add a single channel
raw.info['bads'].extend(['EEG 051', 'EEG 052']) # add a list of channels
bad_chan = raw.info['bads'].pop(-1) # remove the last entry in the list
raw.info['bads'] = original_bads # change the whole list at once
# %%
# .. sidebar:: Blocking execution
#
# If you want to build an interactive bad-channel-marking step into an
# analysis script, be sure to include the parameter ``block=True`` in your
# call to ``raw.plot()`` or ``epochs.plot()``. This will pause the script
# while the plot is open, giving you time to mark bad channels before
# subsequent analysis or plotting steps are executed. This can be
# especially helpful if your script loops over multiple subjects.
#
# You can also interactively toggle whether a channel is marked "bad" in the
# plot windows of ``raw.plot()`` or ``epochs.plot()`` by clicking on the
# channel name along the vertical axis (in ``raw.plot()`` windows you can also
# do this by clicking the channel's trace in the plot area). The ``bads`` field
# gets updated immediately each time you toggle a channel, and will retain its
# modified state after the plot window is closed.
#
# The list of bad channels in the :class:`mne.Info` object's ``bads`` field is
# automatically taken into account in dozens of functions and methods across
# the MNE-Python codebase. This is done consistently with a parameter
# ``exclude='bads'`` in the function or method signature. Typically this
# ``exclude`` parameter also accepts a list of channel names or indices, so if
# you want to *include* the bad channels you can do so by passing
# ``exclude=[]`` (or some other list of channels to exclude). For example:
# default is exclude='bads':
good_eeg = mne.pick_types(raw.info, meg=False, eeg=True)
all_eeg = mne.pick_types(raw.info, meg=False, eeg=True, exclude=[])
print(np.setdiff1d(all_eeg, good_eeg))
print(np.array(raw.ch_names)[np.setdiff1d(all_eeg, good_eeg)])
# %%
# When to look for bad channels
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# You can start looking for bad channels during the experiment session when the
# data is being acquired. If you notice any flat or excessively noisy channels,
# you can note them in your experiment log or protocol sheet. If your system
# computes online averages, these can be a good way to spot bad channels as
# well. After the data has been collected, you can do a more thorough check for
# bad channels by browsing the raw data using :meth:`mne.io.Raw.plot`, without
# any projectors or ICA applied. Finally, you can compute offline averages
# (again with projectors, ICA, and EEG referencing disabled) to look for
# channels with unusual properties. Here's an example of ERP/F plots where the
# bad channels were not properly marked:
raw2 = raw.copy()
raw2.info['bads'] = []
events = mne.find_events(raw2, stim_channel='STI 014')
epochs = mne.Epochs(raw2, events=events)['2'].average().plot()
# %%
# The bad EEG channel is not so obvious, but the bad gradiometer is easy to
# see.
#
# Remember, marking bad channels should be done as early as possible in the
# analysis pipeline. When bad channels are marked in a :class:`~mne.io.Raw`
# object, the markings will be automatically transferred through the chain of
# derived object types: including :class:`~mne.Epochs` and :class:`~mne.Evoked`
# objects, but also :class:`noise covariance <mne.Covariance>` objects,
# :class:`forward solution computations <mne.Forward>`, :class:`inverse
# operators <mne.minimum_norm.InverseOperator>`, etc. If you don't notice the
# badness until later stages of your analysis pipeline, you'll probably need to
# go back and re-run the pipeline, so it's a good investment of time to
# carefully explore the data for bad channels early on.
#
#
# Why mark bad channels at all?
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Many analysis computations can be strongly affected by the presence of bad
# channels. For example, a malfunctioning channel with completely flat signal
# will have zero channel variance, which will cause noise estimates to be
# unrealistically low. This low noise estimate will lead to a strong channel
# weight in the estimate of cortical current, and because the channel is flat,
# the magnitude of cortical current estimates will shrink dramatically.
#
# Conversely, very noisy channels can also cause problems. For example, they
# can lead to too many epochs being discarded based on signal amplitude
# rejection thresholds, which in turn can lead to less robust estimation of the
# noise covariance across sensors. Noisy channels can also interfere with
# :term:`SSP` computations, because the projectors will be
# spatially biased in the direction of the noisy channel, which can cause
# adjacent good channels to be suppressed. ICA is corrupted by noisy channels
# for similar reasons. On the other hand, when performing machine learning
# analyses, bad channels may have limited, if any impact (i.e., bad channels
# will be uninformative and therefore ignored / deweighted by the algorithm).
#
#
# Interpolating bad channels
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# In some cases simply excluding bad channels is sufficient (for example, if
# you plan only to analyze a specific sensor ROI, and the bad channel is
# outside that ROI). However, in cross-subject analyses it is often helpful to
# maintain the same data dimensionality for all subjects, and there is no
# guarantee that the same channels will be bad for all subjects. It is possible
# in such cases to remove each channel that is bad for even a single subject,
# but that can lead to a dramatic drop in data rank (and ends up discarding a
# fair amount of clean data in the process). In such cases it is desirable to
# reconstruct bad channels by interpolating its signal based on the signals of
# the good sensors around them.
#
#
# How interpolation works
# ~~~~~~~~~~~~~~~~~~~~~~~
#
# Interpolation of EEG channels in MNE-Python is done using the spherical
# spline method :footcite:`PerrinEtAl1989`, which projects the sensor
# locations onto a unit sphere
# and interpolates the signal at the bad sensor locations based on the signals
# at the good locations. Mathematical details are presented in
# :ref:`channel-interpolation`. Interpolation of MEG channels uses the field
# mapping algorithms used in computing the :ref:`forward solution
# <tut-forward>`.
#
#
# Interpolation in MNE-Python
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Interpolating bad channels in :class:`~mne.io.Raw` objects is done with the
# :meth:`~mne.io.Raw.interpolate_bads` method, which automatically applies the
# correct method (spherical splines or field interpolation) to EEG and MEG
# channels, respectively (there is a corresponding method
# :meth:`mne.Epochs.interpolate_bads` that works for :class:`~mne.Epochs`
# objects). To illustrate how it works, we'll start by cropping the raw object
# to just three seconds for easier plotting:
raw.crop(tmin=0, tmax=3).load_data()
# %%
# By default, :meth:`~mne.io.Raw.interpolate_bads` will clear out
# ``raw.info['bads']`` after interpolation, so that the interpolated channels
# are no longer excluded from subsequent computations. Here, for illustration
# purposes, we'll prevent that by specifying ``reset_bads=False`` so that when
# we plot the data before and after interpolation, the affected channels will
# still plot in red:
eeg_data = raw.copy().pick_types(meg=False, eeg=True, exclude=[])
eeg_data_interp = eeg_data.copy().interpolate_bads(reset_bads=False)
for title, data in zip(['orig.', 'interp.'], [eeg_data, eeg_data_interp]):
with mne.viz.use_browser_backend('matplotlib'):
fig = data.plot(butterfly=True, color='#00000022', bad_color='r')
fig.subplots_adjust(top=0.9)
fig.suptitle(title, size='xx-large', weight='bold')
# %%
# Note that we used the ``exclude=[]`` trick in the call to
# :meth:`~mne.io.Raw.pick_types` to make sure the bad channels were not
# automatically dropped from the selection. Here is the corresponding example
# with the interpolated gradiometer channel; since there are more channels
# we'll use a more transparent gray color this time:
grad_data = raw.copy().pick_types(meg='grad', exclude=[])
grad_data_interp = grad_data.copy().interpolate_bads(reset_bads=False)
for data in (grad_data, grad_data_interp):
data.plot(butterfly=True, color='#00000009', bad_color='r')
# %%
# Summary
# ^^^^^^^
#
# Bad channel exclusion or interpolation is an important step in EEG/MEG
# preprocessing. MNE-Python provides tools for marking and interpolating bad
# channels; the list of which channels are marked as "bad" is propagated
# automatically through later stages of processing. For an even more automated
# approach to bad channel detection and interpolation, consider using the
# `autoreject package`_, which interfaces well with MNE-Python-based pipelines.
#
#
# References
# ^^^^^^^^^^
#
# .. footbibliography::
#
#
# .. LINKS
#
# .. _`regular expression`: https://www.regular-expressions.info/
# .. _`autoreject package`: http://autoreject.github.io/
|
py
|
1a5ccaf449027f04b69f09a0c11a55ceda4d2536
|
from time import sleep
from typing import Iterable, Optional
from .shared.networking import ConnectionSettings, NetworkConnection
from .shared.configuration import Configuration
from .shared.logs import get_logger, initialize
from .messaging.broker import Broker, BrokerSettings
from .messaging.logging_broker import LoggingBroker
from .computing.facade import get_computational_problem
from .computing.base import Subproblem, SubproblemResult, SubproblemPool
from .computing.domain_commands import DomainCommand, PruneCommand
from .app import ApplicationSettings, ComputationManager, EmptySubproblemPoolError
from .messaging.commands import CommandMapper
from .messaging.command_handler import CommandHandler, CommandNotRegisteredException
from time import time
ResultCommand: type = None
RegisterCommand: type = None
def main(computation_manager: ComputationManager):
config = Configuration(__package__) \
.add_json_file('config.json')
app_settings = config.get('Application').bind_as(ApplicationSettings)
broker_settings = config.get('Broker').bind_as(BrokerSettings)
logger = get_logger(__package__)
mode_name = 'active' if app_settings.active_mode else 'passive'
logger.info(f'Codeine started in {mode_name} mode.')
handler = create_command_handler(computation_manager.pool)
broker = create_broker(broker_settings, create_command_mapper())
broker.start()
broker.discover_network()
subproblem: Optional[Subproblem] = None
active_mode = app_settings.active_mode
any_free_subproblems = True
ttt = time()
try:
while True:
if computation_manager.pool.not_started_pool:
any_free_subproblems = True
if time() - ttt > 5:
display_pool(computation_manager.pool, logger)
broker.discover_network()
broker.broadcast(ProgressCommand(*computation_manager.get_progress()))
ttt = time()
if active_mode and any_free_subproblems:
if requested_subproblem_drop(subproblem, computation_manager):
subproblem.stop()
logger.info(f'Subproblem drop requested.')
if subproblem is None:
try:
subproblem = computation_manager.create_random()
subproblem.start()
identifier = subproblem.identifier
broker.broadcast(RegisterCommand(identifier))
logger.info(f'Subproblem #{identifier} has started.')
except EmptySubproblemPoolError:
logger.warning('No more subproblems to take.')
any_free_subproblems = False
elif not subproblem.is_alive():
identifier = subproblem.identifier
if computation_manager.pool.get_id_in_progress_locally() is None:
logger.info(f'Subproblem #{identifier} has been dropped.')
else:
result = subproblem.result
computation_manager.handle_completed(subproblem)
broadcast_result(subproblem, broker)
logger.info(f'Subproblem #{identifier} has ended (result: {result}).')
subproblem = None
results = computation_manager.pool.results
if computation_manager.stop_condition_is_met():
active_mode = False
logger.info(f'Stop condition is met: {results}')
elif computation_manager.all_subproblems_finished():
any_free_subproblems = False
logger.info(f'All subproblems finished: {results}')
for payload in broker.get_payloads():
try:
logger.info(f'Received command from {payload.address}: {payload.command}')
responses = handler.handle(payload)
for response in responses:
broker.send(response)
except CommandNotRegisteredException as exc:
logger.error(f'Unregistered command received from {payload.address}: {exc}')
logger.info(computation_manager.pool.results)
if not broker.is_alive():
break
sleep(0.01)
except KeyboardInterrupt:
pass
except BaseException as exc:
logger.exception(f'An unexpected exception has occurred: {exc}')
logger.info('Gracefully stopping Codeine...')
broker.stop()
broker.join()
if subproblem:
subproblem.stop()
subproblem.join()
logger.info('Gracefully stopped.')
def requested_subproblem_drop(subproblem, computation_manager) -> bool:
return (computation_manager.pool.get_id_in_progress_locally() is None
and subproblem is not None)
def create_broker(broker_settings: BrokerSettings, mapper: CommandMapper) -> Broker:
logger = get_logger('broker')
connection = NetworkConnection(broker_settings.connection)
broker = LoggingBroker(connection, logger, mapper, broker_settings)
broker.on_prune(lambda addr: PruneCommand(addr))
return broker
def create_command_mapper() -> CommandMapper:
return CommandMapper() \
.register(ResultCommand) \
.register(RegisterCommand) \
.register(DropCommand) \
.register(ProgressCommand)
def create_command_handler(pool: SubproblemPool) -> CommandHandler:
return CommandHandler() \
.register(DomainCommand, pool)
def broadcast_result(subproblem: Subproblem, broker: Broker):
command = ResultCommand(subproblem.identifier, subproblem.result)
broker.broadcast(command)
def display_pool(pool: SubproblemPool, logger):
logger.info(f'[Not started] {pool.not_started_pool}')
logger.info(f'[In progress] {pool.in_progress_pool}')
logger.info(f' [Solved] {pool.results}')
if __name__ == '__main__':
initialize()
PROBLEM = get_computational_problem()
ResultCommand = PROBLEM.result_command_type
RegisterCommand = PROBLEM.register_command_type
DropCommand = PROBLEM.drop_command_type
ProgressCommand = PROBLEM.progress_command_type
main(ComputationManager(PROBLEM))
|
py
|
1a5ccb476c5f0b7858bb867eefa02483407c5d76
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]>
"""
import SimpleXMLRPCServer
import threading
import time
import pickle
import pyalgotrade.logger
class AutoStopThread(threading.Thread):
def __init__(self, server):
threading.Thread.__init__(self)
self.__server = server
def run(self):
while self.__server.jobsPending():
time.sleep(1)
self.__server.stop()
class Results(object):
"""The results of the strategy executions."""
def __init__(self, parameters, result):
self.__parameters = parameters
self.__result = result
def getParameters(self):
"""Returns a sequence of parameter values."""
return self.__parameters
def getResult(self):
"""Returns the result for a given set of parameters."""
return self.__result
class Job(object):
def __init__(self, strategyParameters):
self.__strategyParameters = strategyParameters
self.__bestResult = None
self.__bestParameters = None
self.__id = id(self)
def getId(self):
return self.__id
def getNextParameters(self):
ret = None
if len(self.__strategyParameters):
ret = self.__strategyParameters.pop()
return ret
def getBestParameters(self):
return self.__bestParameters
def getBestResult(self):
return self.__bestResult
def getBestWorkerName(self):
return self.__bestWorkerName
def setBestResult(self, result, parameters, workerName):
self.__bestResult = result
self.__bestParameters = parameters
self.__bestWorkerName = workerName
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
rpc_paths = ('/PyAlgoTradeRPC',)
class Server(SimpleXMLRPCServer.SimpleXMLRPCServer):
defaultBatchSize = 200
def __init__(self, address, port, autoStop=True):
SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, (address, port), requestHandler=RequestHandler, logRequests=False, allow_none=True)
self.__instrumentsAndBars = None # Pickle'd instruments and bars for faster retrieval.
self.__barsFreq = None
self.__activeJobs = {}
self.__activeJobsLock = threading.Lock()
self.__parametersLock = threading.Lock()
self.__bestJob = None
self.__parametersIterator = None
self.__logger = pyalgotrade.logger.getLogger("server")
if autoStop:
self.__autoStopThread = AutoStopThread(self)
else:
self.__autoStopThread = None
self.register_introspection_functions()
self.register_function(self.getInstrumentsAndBars, 'getInstrumentsAndBars')
self.register_function(self.getBarsFrequency, 'getBarsFrequency')
self.register_function(self.getNextJob, 'getNextJob')
self.register_function(self.pushJobResults, 'pushJobResults')
self.__forcedStop = False
def __getNextParams(self):
ret = []
# Get the next set of parameters.
with self.__parametersLock:
if self.__parametersIterator is not None:
try:
for i in xrange(Server.defaultBatchSize):
ret.append(self.__parametersIterator.next())
except StopIteration:
self.__parametersIterator = None
return ret
def getLogger(self):
return self.__logger
def getInstrumentsAndBars(self):
return self.__instrumentsAndBars
def getBarsFrequency(self):
return str(self.__barsFreq)
def getBestJob(self):
return self.__bestJob
def getNextJob(self):
ret = None
params = []
# Get the next set of parameters.
params = self.__getNextParams()
# Map the active job
if len(params):
ret = Job(params)
with self.__activeJobsLock:
self.__activeJobs[ret.getId()] = ret
return pickle.dumps(ret)
def jobsPending(self):
if self.__forcedStop:
return False
with self.__parametersLock:
jobsPending = self.__parametersIterator is not None
with self.__activeJobsLock:
activeJobs = len(self.__activeJobs) > 0
return jobsPending or activeJobs
def pushJobResults(self, jobId, result, parameters, workerName):
jobId = pickle.loads(jobId)
result = pickle.loads(result)
parameters = pickle.loads(parameters)
workerName = pickle.loads(workerName)
job = None
# Get the active job and remove the mapping.
with self.__activeJobsLock:
try:
job = self.__activeJobs[jobId]
del self.__activeJobs[jobId]
except KeyError:
# The job's results were already submitted.
return
# Save the job with the best result
if self.__bestJob is None or result > self.__bestJob.getBestResult():
job.setBestResult(result, parameters, workerName)
self.__bestJob = job
self.getLogger().info("Partial result %s with parameters: %s from %s" % (result, parameters, workerName))
def stop(self):
self.shutdown()
def serve(self, barFeed, strategyParameters):
ret = None
try:
# Initialize instruments, bars and parameters.
self.getLogger().info("Loading bars")
loadedBars = []
for dateTime, bars in barFeed:
loadedBars.append(bars)
instruments = barFeed.getRegisteredInstruments()
self.__instrumentsAndBars = pickle.dumps((instruments, loadedBars))
self.__barsFreq = barFeed.getFrequency()
self.__parametersIterator = iter(strategyParameters)
if self.__autoStopThread:
self.__autoStopThread.start()
self.getLogger().info("Waiting for workers")
self.serve_forever()
if self.__autoStopThread:
self.__autoStopThread.join()
# Show the best result.
bestJob = self.getBestJob()
if bestJob:
self.getLogger().info("Best final result %s with parameters: %s from client %s" % (bestJob.getBestResult(), bestJob.getBestParameters(), bestJob.getBestWorkerName()))
ret = Results(bestJob.getBestParameters(), bestJob.getBestResult())
else:
self.getLogger().error("No jobs processed")
finally:
self.__forcedStop = True
return ret
def serve(barFeed, strategyParameters, address, port):
"""Executes a server that will provide bars and strategy parameters for workers to use.
:param barFeed: The bar feed that each worker will use to backtest the strategy.
:type barFeed: :class:`pyalgotrade.barfeed.BarFeed`.
:param strategyParameters: The set of parameters to use for backtesting. An iterable object where **each element is a tuple that holds parameter values**.
:param address: The address to listen for incoming worker connections.
:type address: string.
:param port: The port to listen for incoming worker connections.
:type port: int.
:rtype: A :class:`Results` instance with the best results found.
"""
s = Server(address, port)
return s.serve(barFeed, strategyParameters)
|
py
|
1a5ccbbafea0aa7b298219b830e50d9c4a75a196
|
#!c:\users\lgale\pycharmprojects\test\venv\scripts\python.exe
# $Id: rst2odt_prepstyles.py 8346 2019-08-26 12:11:32Z milde $
# Author: Dave Kuhlman <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
# Author: Michael Schutte <[email protected]>
from __future__ import print_function
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print(__doc__, file=sys.stderr)
print("Usage: %s STYLE_FILE.odt\n" % sys.argv[0], file=sys.stderr)
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
|
py
|
1a5ccc0067200a724cb9513b9f9c409cb9c07be5
|
import siliconcompiler
import multiprocessing
import os
import pytest
# unit routine
def run_design(datadir, design, N, job):
chip = siliconcompiler.Chip(loglevel='INFO')
chip.set('design', design)
chip.add('source', os.path.join(datadir, f'{design}.v'))
chip.set('param', 'N', str(N))
chip.set('jobname', job)
chip.set('relax', True)
chip.set('quiet', True)
chip.set('steplist', ['import', 'syn'])
chip.target("asicflow_freepdk45")
chip.run()
@pytest.mark.eda
@pytest.mark.quick
def test_doe(scroot):
'''Test running multiple experiments sweeping different parameters in
parallel using multiprocessing library.'''
datadir = os.path.join(scroot, 'third_party', 'designs', 'oh', 'stdlib', 'hdl')
design = 'oh_add'
N = [4, 8, 16, 32, 64, 128]
# Define parallel processingg
processes = []
for i in range(len(N)):
job = 'job' + str(i)
processes.append(multiprocessing.Process(target=run_design,
args=(datadir,
design,
str(N[i]),
job
)))
# Boiler plate start and join
for p in processes:
p.start()
for p in processes:
p.join()
# Post-processing data
chip = siliconcompiler.Chip()
prev_area = 0
for i in range(len(N)):
jobname = 'job'+str(i)
chip.read_manifest(f"build/{design}/{jobname}/syn/0/outputs/{design}.pkg.json", job=jobname)
area = chip.get('metric','syn','0','cellarea','real', job=jobname)
# expect to have increasing area as we increase adder width
assert area > prev_area
prev_area = area
if __name__ == "__main__":
from tests.fixtures import scroot
test_doe(scroot())
|
py
|
1a5ccc324788d207e384b0eea435433a8597a4c6
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c
from esphome.const import CONF_FREQUENCY, CONF_ID
DEPENDENCIES = ["i2c"]
MULTI_CONF = True
pca9685_ns = cg.esphome_ns.namespace("pca9685")
PCA9685Output = pca9685_ns.class_("PCA9685Output", cg.Component, i2c.I2CDevice)
CONFIG_SCHEMA = (
cv.Schema(
{
cv.GenerateID(): cv.declare_id(PCA9685Output),
cv.Required(CONF_FREQUENCY): cv.All(
cv.frequency, cv.Range(min=23.84, max=1525.88)
),
}
)
.extend(cv.COMPONENT_SCHEMA)
.extend(i2c.i2c_device_schema(0x40))
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID], config[CONF_FREQUENCY])
await cg.register_component(var, config)
await i2c.register_i2c_device(var, config)
|
py
|
1a5cccb7b117a9d5d2e4e5d8025af3938f9f19b2
|
from machine import I2C, Pin
from i2c_lcd import I2cLcd
i2c = I2C(scl=Pin(22), sda=Pin(21), freq=400000)
lcd = I2cLcd(i2c, 0x27, 2, 16)
lcd.clear()
lcd.putstr("* Hello World!\n* F*ck you 2020!")
|
py
|
1a5ccde46e02f67a4cf092a7b389f7b30215243a
|
import setuptools
from scrape_songs.__version__ import __version__
with open("README.md", 'r') as f:
long_description = f.read()
setuptools.setup(
name="scrape-songs",
version=__version__,
python_requires=">=3.7",
install_requires=['scrapy', 'wikipedia'],
description="A tool used to collect lists of song" \
" names from albums on wikipedia and format them.",
author="QualityHammer",
author_email="[email protected]",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/QualityHammer/Whats-on-this-Album",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
"console_scripts": ["scrape_songs=scrape_songs.client:run"]
}
)
|
py
|
1a5cce9c590b1a1f6c5d8803e028c2d64b137813
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
tf2onnx.tf2onnx - range op conversion
"""
import numpy as np
from onnx.onnx_pb import TensorProto
from tf2onnx import utils
# pylint: disable=unused-argument,missing-docstring
def make_range_const(ctx, start, limit, delta, output, scope_name, shape, dtype):
"""make Range subgraph if all inputs are const."""
# T range = Range(T start, T limit, T delta)
# V v_final_and_scan_outputs = Loop(int64 M, B cond, V v_initial)
base_name = utils.make_name(scope_name)
start = ctx.get_node_by_output(start).get_tensor_value(as_list=False)
limit = ctx.get_node_by_output(limit).get_tensor_value(as_list=False)
delta = ctx.get_node_by_output(delta).get_tensor_value(as_list=False)
val = np.arange(start, limit, delta, dtype=start.dtype)
const_range = ctx.make_const(base_name, val)
ctx.make_node("Identity", [const_range.output[0]], shapes=[shape], dtypes=[dtype], outputs=[output])
def make_range_non_const(ctx, start, limit, delta, output, scope_name, shape, dtype):
"""make Range subgraph."""
# T range = Range(T start, T limit, T delta)
# V v_final_and_scan_outputs = Loop(int64 M, B cond, V v_initial)
base_name = utils.make_name(scope_name)
# trip_count
diff_node = ctx.make_node("Sub",
[limit, start],
op_name_scope=base_name,
name=utils.make_name("diff"))
diff_output = diff_node.output[0]
delta_cast = delta
if dtype in [TensorProto.INT32, TensorProto.INT64]:
cast_node = ctx.make_node("Cast", [diff_output], op_name_scope=base_name,
name="cast_diff", attr={"to": TensorProto.FLOAT})
diff_output = cast_node.output[0]
cast_node = ctx.make_node("Cast", [delta], op_name_scope=base_name, name="cast_delta",
attr={"to": TensorProto.FLOAT})
delta_cast = cast_node.output[0]
div_node = ctx.make_node("Div", [diff_output, delta_cast], op_name_scope=base_name, name="div")
ceil_node = ctx.make_node("Ceil", [div_node.output[0]], op_name_scope=base_name, name="ceil")
trip_count_node = ctx.make_node("Cast", [ceil_node.output[0]], op_name_scope=base_name, name="trip_cnt",
attr={"to": TensorProto.INT64})
# cond
# Use initializer here since Constant OP before opset 9 does not support bool type
cond_name = "{}_cond".format(base_name)
ctx.make_const(cond_name, np.ones((), dtype=bool))
# body
g = ctx.create_new_graph_with_same_config()
g.make_node("Identity", ["cond"], outputs=["cond_out"])
g.make_node("Add", ["prev", delta], outputs=["current"], name=utils.make_name("add"))
g.make_node("Identity", ["prev"], outputs=["range"])
g.add_graph_input("i", TensorProto.INT64, [])
g.add_graph_input("cond", TensorProto.BOOL, [])
g.add_graph_input("prev", dtype, [])
g.add_graph_output("cond_out", TensorProto.BOOL, [])
g.add_graph_output("current", dtype, [])
g.add_graph_output("range", dtype, [])
# loop
loop_inputs = [trip_count_node.output[0], cond_name, start]
loop_node = ctx.make_node("Loop", loop_inputs, output_count=2, op_name_scope=base_name, name="loop")
loop_node.set_body_graph_as_attr("body", g)
ctx.make_node("Identity", [loop_node.output[1]], name=base_name, shapes=[shape],
dtypes=[dtype], outputs=[output])
def make_range(ctx, start, limit, delta, output, scope_name, shape, dtype):
if all(ctx.get_node_by_output(n).is_const() for n in [start, limit, delta]) is True:
make_range_const(ctx, start, limit, delta, output, scope_name, shape, dtype)
else:
make_range_non_const(ctx, start, limit, delta, output, scope_name, shape, dtype)
def range_op7(ctx, node, name, args):
"""Range."""
# T range = Range(T start, T limit, T delta)
# V v_final_and_scan_outputs = Loop(int64 M, B cond, V v_initial)
dtype = node.get_attr_int("Tidx")
shape = node.output_shapes[0]
utils.make_sure(dtype is not None, "Tidx of %s is None", node.name)
ctx.remove_node(node.name)
make_range(ctx, node.input[0], node.input[1], node.input[2],
node.output[0], name, shape, dtype)
|
py
|
1a5ccef2b09ffdc7d1bf10bd96efd02e6b072203
|
from django.contrib import admin
from . import models
class BonderAdmin(admin.ModelAdmin):
list_display = (
'id',
'semiconductor',
'name',
'slug',
'description',
'product_code',
'model',
'condition',
'warranty',
'seller',
'manufacturer',
'image',
'availability',
'price',
'created',
'name',
'slug',
'created',
'updated'
)
admin.site.register(models.Bonder, BonderAdmin)
|
py
|
1a5ccf10dc03cb013839289306d49141c5183b87
|
import os, sys
import lesscpy
from shutil import copyfile, rmtree
from jupyter_core.paths import jupyter_config_dir, jupyter_data_dir
from glob import glob
from tempfile import mkstemp
# path to local site-packages/jupyterthemes
package_dir = os.path.dirname(os.path.realpath(__file__))
# path to user jupyter-themes dir
user_dir = os.path.join(os.path.expanduser('~'), '.jupyter-themes')
# path to save tempfile with style_less before reading/compiling
_, tempfile = mkstemp('.less')
_, vimtemp = mkstemp('.less')
# path to install custom.css file (~/.jupyter/custom/)
jupyter_home = jupyter_config_dir()
jupyter_data = jupyter_data_dir()
jupyter_custom = os.path.join(jupyter_home, 'custom')
jupyter_custom_fonts = os.path.join(jupyter_custom, 'fonts')
jupyter_customcss = os.path.join(jupyter_custom, 'custom.css')
jupyter_customjs = os.path.join(jupyter_custom, 'custom.js')
jupyter_nbext = os.path.join(jupyter_data, 'nbextensions')
# theme colors, layout, and font directories
layouts_dir = os.path.join(package_dir, 'layout')
styles_dir = os.path.join(package_dir, 'styles')
styles_dir_user = os.path.join(user_dir, 'styles')
fonts_dir = os.path.join(package_dir, 'fonts')
defaults_dir = os.path.join(package_dir, 'defaults')
# default custom.css/js files to override JT on reset
defaultCSS = os.path.join(defaults_dir, 'custom.css')
defaultJS = os.path.join(defaults_dir, 'custom.js')
# layout files for notebook, codemirror, cells, mathjax, & vim ext
nb_style = os.path.join(layouts_dir, 'notebook.less')
cm_style = os.path.join(layouts_dir, 'codemirror.less')
cl_style = os.path.join(layouts_dir, 'cells.less')
ex_style = os.path.join(layouts_dir, 'extras.less')
vim_style = os.path.join(layouts_dir, 'vim.less')
comp_style = os.path.join(layouts_dir, 'completer.less')
theme_name_file = os.path.join(jupyter_custom, 'current_theme.txt')
def fileOpen(filename, mode):
if sys.version_info[0]==3:
return open(filename, mode, encoding='utf8', errors='ignore')
else:
return open(filename, mode)
def check_directories():
# Ensure all install dirs exist
if not os.path.isdir(jupyter_home):
os.makedirs(jupyter_home)
if not os.path.isdir(jupyter_custom):
os.makedirs(jupyter_custom)
if not os.path.isdir(jupyter_custom_fonts):
os.makedirs(jupyter_custom_fonts)
if not os.path.isdir(jupyter_data):
os.makedirs(jupyter_data)
if not os.path.isdir(jupyter_nbext):
os.makedirs(jupyter_nbext)
def less_to_css(style_less):
""" write less-compiled css file to jupyter_customcss in jupyter_dir
"""
with fileOpen(tempfile, 'w') as f:
f.write(style_less)
os.chdir(package_dir)
style_css = lesscpy.compile(tempfile)
style_css += '\n\n'
return style_css
def write_final_css(style_css):
# install style_css to .jupyter/custom/custom.css
with fileOpen(jupyter_customcss, 'w') as custom_css:
custom_css.write(style_css)
def install_precompiled_theme(theme):
# for Python 3.5, install selected theme from precompiled defaults
compiled_dir = os.path.join(styles_dir, 'compiled')
compiled_dir_user = os.path.join(styles_dir_user, 'compiled')
if (os.path.isdir(compiled_dir_user) and
'{}.css'.format(theme) in os.listdir(compiled_dir_user)):
theme_src = os.path.join(compiled_dir_user, '{}.css'.format(theme))
else:
theme_src = os.path.join(compiled_dir, '{}.css'.format(theme))
theme_dst = os.path.join(jupyter_custom, 'custom.css')
copyfile(theme_src, theme_dst)
def send_fonts_to_jupyter(font_file_path):
fname = font_file_path.split(os.sep)[-1]
copyfile(font_file_path, os.path.join(jupyter_custom_fonts, fname))
def delete_font_files():
for fontfile in os.listdir(jupyter_custom_fonts):
abspath = os.path.join(jupyter_custom_fonts, fontfile)
os.remove(abspath)
def convert_fontsizes(fontsizes):
# if triple digits, move decimal (105 --> 10.5)
fontsizes = [str(fs) for fs in fontsizes]
for i, fs in enumerate(fontsizes):
if len(fs) >= 3:
fontsizes[i] = '.'.join([fs[:-1], fs[-1]])
elif int(fs) > 25:
fontsizes[i] = '.'.join([fs[0], fs[-1]])
return fontsizes
def set_font_properties(style_less,
nbfont=None,
tcfont=None,
monofont=None,
monosize=11,
tcfontsize=13,
nbfontsize=13,
prfontsize=95,
dffontsize=93,
outfontsize=85,
mathfontsize=100,
dfonts=False):
"""Parent function for setting notebook, text/md, and
codecell font-properties
"""
fontsizes = [monosize, nbfontsize, tcfontsize, prfontsize, dffontsize, outfontsize]
monosize, nbfontsize, tcfontsize, prfontsize, dffontsize, outfontsize = convert_fontsizes(fontsizes)
if dfonts==True:
monofont, tcfont, nbfont = ['monospace', 'sans-serif', 'sans-serif']
else:
if monofont is not None:
monofont, monofpath = stored_font_dicts(monofont)
style_less = import_fonts(style_less, monofont, monofpath)
else:
monofont='monospace'
if tcfont is not None:
tcfont, tcfontpath = stored_font_dicts(tcfont)
style_less = import_fonts(style_less, tcfont, tcfontpath)
else:
tcfont='sans-serif'
if nbfont is not None:
if nbfont == 'proxima':
nbfont, tcfont = ["'Proxima Nova'"]*2
style_less = proxima_nova_imports(style_less)
else:
nbfont, nbfontpath = stored_font_dicts(nbfont)
style_less = import_fonts(style_less, nbfont, nbfontpath)
else:
nbfont='sans-serif'
style_less += '/* Set Font-Type and Font-Size Variables */\n'
# font names and fontfamily info for codecells, notebook & textcells
style_less += '@monofont: {}; \n'.format(monofont)
style_less += '@notebook-fontfamily: {}; \n'.format(nbfont)
style_less += '@text-cell-fontfamily: {}; \n'.format(tcfont)
# font size for codecells, main notebook, notebook-sub, & textcells
style_less += '@monofontsize: {}pt; \n'.format(monosize)
style_less += '@monofontsize-sub: {}pt; \n'.format(float(monosize) - 1)
style_less += '@nb-fontsize: {}pt; \n'.format(nbfontsize)
style_less += '@nb-fontsize-sub: {}pt; \n'.format(float(nbfontsize) - 1)
style_less += '@text-cell-fontsize: {}pt; \n'.format(tcfontsize)
style_less += '@df-header-fontsize: {}pt; \n'.format(float(dffontsize) + 1)
style_less += '@df-fontsize: {}pt; \n'.format(dffontsize)
style_less += '@output-font-size: {}pt; \n'.format(outfontsize)
style_less += '@prompt-fontsize: {}pt; \n'.format(prfontsize)
style_less += '@mathfontsize: {}%; \n'.format(mathfontsize)
style_less += '\n\n'
style_less += '/* Import Theme Colors and Define Layout Variables */\n'
return style_less
def import_fonts(style_less, fontname, font_subdir):
"""Copy all custom fonts to ~/.jupyter/custom/fonts/ and
write import statements to style_less
"""
ftype_dict = {'woff2': 'woff2',
'woff': 'woff',
'ttf': 'truetype',
'otf': 'opentype',
'svg': 'svg'}
define_font = (
"@font-face {{font-family: {fontname};\n\tfont-weight:"
"{weight};\n\tfont-style: {style};\n\tsrc: local('{fontname}'),"
"\n\turl('fonts{sepp}{fontfile}') format('{ftype}');}}\n")
fontname = fontname.split(',')[0]
fontpath = os.path.join(fonts_dir, font_subdir)
for fontfile in os.listdir(fontpath):
if '.txt' in fontfile or 'DS_' in fontfile:
continue
weight = 'normal'
style = 'normal'
if 'medium' in fontfile:
weight = 'medium'
elif 'ital' in fontfile:
style = 'italic'
ft = ftype_dict[fontfile.split('.')[-1]]
style_less += define_font.format(
fontname=fontname,
weight=weight,
style=style,
sepp='/',
fontfile=fontfile,
ftype=ft)
send_fonts_to_jupyter(os.path.join(fontpath, fontfile))
return style_less
def style_layout(style_less,
theme='grade3',
cursorwidth=2,
cursorcolor='default',
cellwidth='980',
lineheight=170,
margins='auto',
vimext=False,
toolbar=False,
nbname=False,
kernellogo=False,
altprompt=False,
altmd=False,
altout=False,
hideprompt=False):
"""Set general layout and style properties of text and code cells"""
# write theme name to ~/.jupyter/custom/ (referenced by jtplot.py)
with fileOpen(theme_name_file, 'w') as f:
f.write(theme)
if (os.path.isdir(styles_dir_user) and
'{}.less'.format(theme) in os.listdir(styles_dir_user)):
theme_relpath = os.path.relpath(
os.path.join(styles_dir_user, theme), package_dir)
else:
theme_relpath = os.path.relpath(
os.path.join(styles_dir, theme), package_dir)
style_less += '@import "{}";\n'.format(theme_relpath)
textcell_bg = '@cc-input-bg'
promptText = '@input-prompt'
promptBG = '@cc-input-bg'
promptPadding = '.25em'
promptBorder = '2px solid @prompt-line'
tcPromptBorder = '2px solid @tc-prompt-std'
promptMinWidth = 11.5
outpromptMinWidth = promptMinWidth +.5 # remove + 3 since it will overlay output print() text
tcPromptWidth = promptMinWidth + .5
tcPromptFontsize = "@prompt-fontsize"
ccOutputBG = '@cc-output-bg-default'
if theme in ['grade3', 'gispo']:
textcell_bg = '@notebook-bg'
if altprompt:
promptPadding = '.1em'
promptMinWidth = 8
outpromptMinWidth = promptMinWidth + .5
tcPromptWidth = promptMinWidth + .5
promptText = 'transparent'
tcPromptBorder = '2px solid transparent'
if altmd:
textcell_bg = '@notebook-bg'
tcPromptBorder = '2px dotted @tc-border-selected'
if altout:
ccOutputBG = '@notebook-bg'
if margins != 'auto':
margins = '{}px'.format(margins)
if '%' not in cellwidth:
cellwidth = str(cellwidth) + 'px'
style_less += '@container-margins: {};\n'.format(margins)
style_less += '@cell-width: {}; \n'.format(cellwidth)
style_less += '@cc-line-height: {}%; \n'.format(lineheight)
style_less += '@text-cell-bg: {}; \n'.format(textcell_bg)
style_less += '@cc-prompt-width: {}ex; \n'.format(promptMinWidth)
style_less += '@cc-prompt-bg: {}; \n'.format(promptBG)
style_less += '@cc-output-bg: {}; \n'.format(ccOutputBG)
style_less += '@prompt-text: {}; \n'.format(promptText)
style_less += '@prompt-padding: {}; \n'.format(promptPadding)
style_less += '@prompt-border: {}; \n'.format(promptBorder)
style_less += '@prompt-min-width: {}ex; \n'.format(promptMinWidth)
style_less += '@out-prompt-min-width: {}ex; \n'.format(outpromptMinWidth)
style_less += '@tc-prompt-width: {}ex; \n'.format(tcPromptWidth)
style_less += '@tc-prompt-border: {}; \n'.format(tcPromptBorder)
style_less += '@cursor-width: {}px; \n'.format(cursorwidth)
style_less += '@cursor-info: @cursor-width solid {}; \n'.format(
cursorcolor)
style_less += '@tc-prompt-fontsize: {}; \n'.format(tcPromptFontsize)
style_less += '\n\n'
# read-in notebook.less (general nb style)
with fileOpen(nb_style, 'r') as notebook:
style_less += notebook.read() + '\n'
# read-in cells.less (cell layout)
with fileOpen(cl_style, 'r') as cells:
style_less += cells.read() + '\n'
# read-in extras.less (misc layout)
with fileOpen(ex_style, 'r') as extras:
style_less += extras.read() + '\n'
# read-in codemirror.less (syntax-highlighting)
with fileOpen(cm_style, 'r') as codemirror:
style_less += codemirror.read() + '\n'
with fileOpen(comp_style, 'r') as codemirror:
style_less += codemirror.read() + '\n'
style_less += toggle_settings(
toolbar, nbname, hideprompt, kernellogo) + '\n'
if vimext:
set_vim_style(theme)
return style_less
def toggle_settings(
toolbar=False, nbname=False, hideprompt=False, kernellogo=False):
"""Toggle main notebook toolbar (e.g., buttons), filename,
and kernel logo."""
toggle = ''
if toolbar:
toggle += 'div#maintoolbar {margin-left: -4px !important;}\n'
toggle += '.toolbar.container {width: 100% !important;}\n'
else:
toggle += 'div#maintoolbar {display: none !important;}\n'
if nbname:
toggle += ('span.save_widget span.filename {margin-left: 8px; height: initial;'
'font-size: 100%; color: @nb-name-fg; background-color:'
'@cc-input-bg;}\n')
toggle += ('span.save_widget span.filename:hover {color:'
'@nb-name-hover; background-color: @cc-input-bg;}\n')
toggle += ('#menubar {padding-top: 4px; background-color:'
'@notebook-bg;}\n')
else:
toggle += '#header-container {display: none !important;}\n'
if hideprompt:
toggle += 'div.prompt.input_prompt {display: none !important;}\n'
toggle += 'div.prompt.output_prompt {width: 5ex !important;}\n'
toggle += 'div.out_prompt_overlay.prompt:hover {width: 5ex !important; min-width: 5ex !important;}\n'
toggle += (
'.CodeMirror-gutters, .cm-s-ipython .CodeMirror-gutters'
'{ position: absolute; left: 0; top: 0; z-index: 3; width: 2em; '
'display: inline-block !important; }\n')
toggle += ('div.cell.code_cell .input { border-left: 5px solid @cm-gutters !important; border-bottom-left-radius: 5px; border-top-left-radius: 5px; }\n')
if kernellogo:
toggle += '@kernel-logo-display: block;'
else:
toggle += '@kernel-logo-display: none;'
return toggle
def proxima_nova_imports(style_less):
style_less += """@font-face {
font-family: 'Proxima Nova Bold';
src: url('fonts/Proxima Nova Alt Bold-webfont.eot');
src: url('fonts/Proxima Nova Alt Bold-webfont.eot?#iefix') format('embedded-opentype'),
url('fonts/Proxima Nova Alt Bold-webfont.woff2') format('woff2'),
url('fonts/Proxima Nova Alt Bold-webfont.woff') format('woff'),
url('fonts/Proxima Nova Alt Bold-webfont.ttf') format('truetype'),
url('fonts/Proxima Nova Alt Bold-webfont.svg#proxima_nova_altbold') format('svg');
font-weight: 600;
font-style: normal;
}
@font-face {
font-family: 'Proxima Nova';
src: url('fonts/Proxima Nova Alt Regular-webfont.eot');
src: url('fonts/Proxima Nova Alt Regular-webfont.eot?#iefix') format('embedded-opentype'),
url('fonts/Proxima Nova Alt Regular-webfont.woff') format('woff'),
url('fonts/Proxima Nova Alt Regular-webfont.ttf') format('truetype'),
url('fonts/Proxima Nova Alt Regular-webfont.svg#proxima_nova_altregular') format('svg');
font-weight: 400;
font-style: normal;
}"""
font_subdir = os.path.join(fonts_dir, "sans-serif/proximasans")
fontpath = os.path.join(fonts_dir, font_subdir)
for fontfile in os.listdir(font_subdir):
send_fonts_to_jupyter(os.path.join(fontpath, fontfile))
return style_less
def set_mathjax_style(style_css, mathfontsize):
"""Write mathjax settings, set math fontsize
"""
jax_style = """<script>
MathJax.Hub.Config({
"HTML-CSS": {
/*preferredFont: "TeX",*/
/*availableFonts: ["TeX", "STIX"],*/
styles: {
scale: %d,
".MathJax_Display": {
"font-size": %s,
}
}
}
});\n</script>
""" % (int(mathfontsize), '"{}%"'.format(str(mathfontsize)))
style_css += jax_style
return style_css
def set_vim_style(theme):
"""Add style and compatibility with vim notebook extension"""
vim_jupyter_nbext = os.path.join(jupyter_nbext, 'vim_binding')
if not os.path.isdir(vim_jupyter_nbext):
os.makedirs(vim_jupyter_nbext)
vim_less = '@import "styles{}";\n'.format(''.join([os.sep, theme]))
with open(vim_style, 'r') as vimstyle:
vim_less += vimstyle.read() + '\n'
with open(vimtemp, 'w') as vtemp:
vtemp.write(vim_less)
os.chdir(package_dir)
vim_css = lesscpy.compile(vimtemp)
vim_css += '\n\n'
# install vim_custom_css to ...nbextensions/vim_binding/vim_binding.css
vim_custom_css = os.path.join(vim_jupyter_nbext, 'vim_binding.css')
with open(vim_custom_css, 'w') as vim_custom:
vim_custom.write(vim_css)
def reset_default(verbose=False):
"""Remove custom.css and custom fonts"""
paths = [jupyter_custom, jupyter_nbext]
for fpath in paths:
custom = '{0}{1}{2}.css'.format(fpath, os.sep, 'custom')
try:
os.remove(custom)
except Exception:
pass
try:
delete_font_files()
except Exception:
check_directories()
delete_font_files()
copyfile(defaultCSS, jupyter_customcss)
copyfile(defaultJS, jupyter_customjs)
if os.path.exists(theme_name_file):
os.remove(theme_name_file)
if verbose:
print("Reset css and font defaults in:\n{} &\n{}".format(*paths))
def set_nb_theme(name):
"""Set theme from within notebook """
from IPython.core.display import HTML
styles_dir = os.path.join(package_dir, 'styles/compiled/')
css_path = glob('{0}/{1}.css'.format(styles_dir, name))[0]
customcss = open(css_path, "r").read()
return HTML(''.join(['<style> ', customcss, ' </style>']))
def get_colors(theme='grade3', c='default', get_dict=False):
if theme in ['grade3', 'gispo']:
cdict = {'default': '#ff711a',
'b': '#1e70c7',
'o': '#ff711a',
'r': '#e22978',
'p': '#AA22FF',
'g': '#2ecc71'}
else:
cdict = {'default': '#0095ff',
'b': '#0095ff',
'o': '#ff914d',
'r': '#DB797C',
'p': '#c776df',
'g': '#94c273'}
cdict['x'] = '@cc-input-fg'
if get_dict:
return cdict
return cdict[c]
def get_alt_prompt_text_color(theme):
altColors = {'grade3': '#FF7823',
'oceans16': '#667FB1',
'chesterish': '#0b98c8',
'onedork': '#94c273',
'monokai': '#94c273'}
return altColors[theme]
def stored_font_dicts(fontcode, get_all=False):
fonts = {'mono':
{'anka': ['Anka/Coder', 'anka-coder'],
'anonymous': ['Anonymous Pro', 'anonymous-pro'],
'aurulent': ['Aurulent Sans Mono', 'aurulent'],
'bitstream': ['Bitstream Vera Sans Mono', 'bitstream-vera'],
'bpmono': ['BPmono', 'bpmono'],
'code': ['Code New Roman', 'code-new-roman'],
'consolamono': ['Consolamono', 'consolamono'],
'cousine': ['Cousine', 'cousine'],
'dejavu': ['DejaVu Sans Mono', 'dejavu'],
'droidmono': ['Droid Sans Mono', 'droidmono'],
'fira': ['Fira Mono', 'fira'],
'firacode': ['Fira Code', 'firacode'],
'generic': ['Generic Mono', 'generic'],
'hack': ['Hack', 'hack'],
'hasklig': ['Hasklig', 'hasklig'],
'iosevka' : ['Iosevka', 'iosevka'],
'inputmono': ['Input Mono', 'inputmono'],
'inconsolata': ['Inconsolata-g', 'inconsolata-g'],
'liberation': ['Liberation Mono', 'liberation'],
'meslo': ['Meslo', 'meslo'],
'office': ['Office Code Pro', 'office-code-pro'],
'oxygen': ['Oxygen Mono', 'oxygen'],
'roboto': ['Roboto Mono', 'roboto'],
'saxmono': ['saxMono', 'saxmono'],
'source': ['Source Code Pro', 'source-code-pro'],
'sourcemed': ['Source Code Pro Medium', 'source-code-medium'],
'ptmono': ['PT Mono', 'ptmono'],
'ubuntu': ['Ubuntu Mono', 'ubuntu']},
'sans':
{'droidsans': ['Droid Sans', 'droidsans'],
'karla': ['Karla', 'karla'],
'opensans': ['Open Sans', 'opensans'],
'ptsans': ['PT Sans', 'ptsans'],
'sourcesans': ['Source Sans Pro', 'sourcesans'],
'robotosans': ['Roboto', 'robotosans'],
'latosans': ['Lato', 'latosans'],
'exosans': ['Exo_2', 'exosans'],
'proxima': ['Proxima Nova', 'proximasans']},
'serif':
{'ptserif': ['PT Serif', 'ptserif'],
'ebserif': ['EB Garamond', 'ebserif'],
'loraserif': ['Lora', 'loraserif'],
'merriserif': ['Merriweather', 'merriserif'],
'crimsonserif': ['Crimson Text', 'crimsonserif'],
'georgiaserif': ['Georgia', 'georgiaserif'],
'neutonserif': ['Neuton', 'neutonserif'],
'cardoserif': ['Cardo Serif', 'cardoserif'],
'goudyserif': ['Goudy Serif', 'goudyserif']}}
if get_all:
return fonts
if fontcode in list(fonts['mono']):
fontname, fontdir = fonts['mono'][fontcode]
fontfam = 'monospace'
elif fontcode in list(fonts['sans']):
fontname, fontdir = fonts['sans'][fontcode]
fontfam = 'sans-serif'
elif fontcode in list(fonts['serif']):
fontname, fontdir = fonts['serif'][fontcode]
fontfam = 'serif'
else:
print("\n\tOne of the fonts you requested is not available\n\tSetting all fonts to default")
return ''
fontdir = os.sep.join([fontfam, fontdir])
return '"{}", {}'.format(fontname, fontfam), fontdir
|
py
|
1a5ccf2afc9a192a9347b2dea7a47105916d93e2
|
"""Initialization of error handling."""
from ..errors import handlers
from ..errors.handlers import bp
_all = [bp, handlers]
|
py
|
1a5ccfd941676f8c6cb3a2feea53e1086b4b6c66
|
#!/usr/bin/env python3
"""
Script creating archive tables and moving the data from non-archive tables to these archive tables.
(for example, all records that are older than 8 days are moved to archive tables)
Note: might be not needed
Requires:
pip3 install psycopg2
sudo apt-get install python3-dateutil
(c) Copyright 2015 Tigran Avanesov, SnT, University of Luxembourg
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import json
import psycopg2
from psycopg2 import extras
import logging
log = logging.getLogger(os.path.basename(__file__))
log.setLevel(logging.DEBUG)
loggerfilehandler = logging.FileHandler('/var/log/postgreslib-backup.log')
loggerfilehandler.setLevel(logging.DEBUG)
# create console handler with a higher log level
loggerconsolehandler = logging.StreamHandler()
#loggerconsolehandler.setLevel(logging.ERROR)
loggerconsolehandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
loggerfilehandler.setFormatter(formatter)
loggerconsolehandler.setFormatter(formatter)
# add the handlers to the logger
log.addHandler(loggerfilehandler)
log.addHandler(loggerconsolehandler)
import re
import dateutil.parser
import datetime
def i_dont_read_personal_data(ip):
#keep 2 first blocks only
return ":".join(( ".".join(ip.split(".")[:2]) ).split(':')[:2])
class ACDCExpDBBackup:
# a regexp to extract experiment, tool, partner
#
headerexp = re.compile("\s*\[(.*?)\]\s*\[(.*?)\]\s*\[(.*?)\](.*)")
# if these keys appear in report, they will be stripped, if the corresponding option is set
keys_to_strip = ['sample_b64']
new_value_for_stripped = 'stripped'
def __init__(me, dbname, user, password, host='localhost', port=5432,rewrite = False, ensure_tables = True, commit_every = 100):
"""
host: host running mongo server
port: port for mongo server
dbname: database name
...
rewrite: remove the database if existed
commit_every: do a commit every commit_evey insertions
"""
me.dbn = dbname
me.dbclient = None
me.commit_every = commit_every
me.inscounter = 0
try:
me.dbclient = psycopg2.connect(database=dbname, user = user, password=password, host = host, port = port)
except psycopg2.DatabaseError as e:
log.error(e)
sys.exit(1)
cur = me.dbclient.cursor()
me.cur = cur
# creating tables
me.tables = {} # keys are tables, values are fields (fields = dict field: type)
me.tables['attack'] = {
"Id": "SERIAL PRIMARY KEY",
"meta_api_key_id": "smallint",
"meta_country_code": "VARCHAR(3)",
"meta_id": "integer",
"meta_tld": "VARCHAR(64)",
"meta_reported_at": "timestamp",
"meta_domain": "VARCHAR(255)",
"meta_ip": "VARCHAR(10)", # stripped
"meta_status": "VARCHAR(16)",
"meta_asn": "integer",
"experiment" : "smallint references experiments(Id)",
"partner" : "smallint references partners(Id)",
"tool" : "smallint references tools(Id)",
#"report_category": "smallint references report_categories(Id)",
"report_type": "VARCHAR(255)",
"ts": "timestamp",
"source_key": "smallint references source_keys(Id)",
"source_value": "VARCHAR(255)",
"confidence_level": "real",
"version": "smallint",
"report_subcategory": "smallint references report_subcategories(Id)",
"ip_protocol_number": "smallint",
"ip_version": "smallint",
"report_id": "VARCHAR(63)",
"duration": "integer",
"reported_at": "timestamp",
"botnet": "VARCHAR(63)",
"alternate_format_type": "smallint references alternate_format_types(Id)",
"src_ip": "VARCHAR(10)", # stripped
"src_mode": "smallint references modes(Id)",
"dst_ip": "VARCHAR(10)", # stripped
"dst_mode": "smallint references modes(Id)",
"src_port": "integer",
"dst_port": "integer",
"sample_filename": "VARCHAR(255)",
"sample_sha256": "VARCHAR(72)",
"malicious_uri": "VARCHAR(255)",
"subject_text": "VARCHAR(128)"
}
# maybe botnet should be a reference instead of varchar
me.tables['bot'] = {
"Id": "SERIAL PRIMARY KEY",
"meta_api_key_id": "smallint",
"meta_country_code": "VARCHAR(3)",
"meta_id": "integer",
"meta_tld": "VARCHAR(64)",
"meta_reported_at": "timestamp",
"meta_domain": "VARCHAR(255)",
"meta_ip": "VARCHAR(10)",
"meta_status": "VARCHAR(16)",
"meta_asn": "integer",
"experiment" : "smallint references experiments(Id)",
"partner" : "smallint references partners(Id)",
"tool" : "smallint references tools(Id)",
#"report_category": "smallint references report_categories(Id)",
"report_type": "VARCHAR(255)",
"ts": "timestamp",
"source_key": "smallint references source_keys(Id)",
"source_value": "VARCHAR(255)",
"confidence_level": "real",
"version": "smallint",
"report_subcategory": "smallint references report_subcategories(Id)",
"report_id": "VARCHAR(63)",
"duration": "integer",
"reported_at": "timestamp",
"botnet": "VARCHAR(63)",
"alternate_format_type": "smallint references alternate_format_types(Id)",
"ip_version": "smallint",
"ip_protocol_number": "smallint",
"src_ip_v4": "VARCHAR(8)",
"src_ip_v6": "VARCHAR(10)",
"src_mode": "smallint references modes(Id)",
"src_port": "integer",
"c2_ip_v4": "VARCHAR(8)",
"c2_ip_v6": "VARCHAR(10)",
"c2_mode": "smallint references modes(Id)",
"c2_port": "integer",
"sample_sha256": "VARCHAR(72)",
"fast_flux_uri": "VARCHAR(255)",
}
me.tables['botnet']={
"Id": "SERIAL PRIMARY KEY",
"meta_api_key_id": "smallint",
"meta_country_code": "VARCHAR(3)",
"meta_id": "integer",
"meta_tld": "VARCHAR(64)",
"meta_reported_at": "timestamp",
"meta_domain": "VARCHAR(255)",
"meta_ip": "VARCHAR(10)",
"meta_status": "VARCHAR(16)",
"meta_asn": "integer",
"experiment" : "smallint references experiments(Id)",
"partner" : "smallint references partners(Id)",
"tool" : "smallint references tools(Id)",
#"report_category": "smallint references report_categories(Id)",
"report_type": "VARCHAR(255)",
"source_key": "smallint references source_keys(Id)",
"source_value": "VARCHAR(255)",
"version": "smallint",
"report_id": "VARCHAR(63)",
"reported_at": "timestamp",
"report_subcategory": "smallint references report_subcategories(Id)",
}
me.tables['c2_server']={
"Id": "SERIAL PRIMARY KEY",
"meta_api_key_id": "smallint",
"meta_country_code": "VARCHAR(3)",
"meta_id": "integer",
"meta_tld": "VARCHAR(64)",
"meta_reported_at": "timestamp",
"meta_domain": "VARCHAR(255)",
"meta_ip": "VARCHAR(10)",
"meta_status": "VARCHAR(16)",
"meta_asn": "integer",
"experiment" : "smallint references experiments(Id)",
"partner" : "smallint references partners(Id)",
"tool" : "smallint references tools(Id)",
#"report_category": "smallint references report_categories(Id)",
"report_type": "VARCHAR(255)",
"ts": "timestamp",
"source_key": "smallint references source_keys(Id)",
"source_value": "VARCHAR(255)",
"confidence_level": "real",
"version": "smallint",
"report_subcategory": "smallint references report_subcategories(Id)",
"report_id": "VARCHAR(63)",
"duration": "integer",
"reported_at": "timestamp",
"botnet": "VARCHAR(63)",
"alternate_format_type": "smallint references alternate_format_types(Id)",
"ip_version": "smallint",
"ip_protocol_number": "smallint",
"c2_ip_v4": "VARCHAR(8)",
"c2_ip_v6": "VARCHAR(10)",
"c2_mode": "smallint references modes(Id)",
"c2_port": "integer",
}
me.tables['fast_flux'] = {
"Id": "SERIAL PRIMARY KEY",
"meta_api_key_id": "smallint",
"meta_country_code": "VARCHAR(3)",
"meta_id": "integer",
"meta_tld": "VARCHAR(64)",
"meta_reported_at": "timestamp",
"meta_domain": "VARCHAR(255)",
"meta_ip": "VARCHAR(10)",
"meta_status": "VARCHAR(16)",
"meta_asn": "integer",
"experiment" : "smallint references experiments(Id)",
"partner" : "smallint references partners(Id)",
"tool" : "smallint references tools(Id)",
#"report_category": "smallint references report_categories(Id)",
"report_type": "VARCHAR(255)",
"ts": "timestamp",
"source_key": "smallint references source_keys(Id)",
"source_value": "VARCHAR(255)",
"confidence_level": "real",
"version": "smallint",
"report_id": "VARCHAR(63)",
"duration": "integer",
"reported_at": "timestamp",
"botnet": "VARCHAR(63)",
"alternate_format_type": "smallint references alternate_format_types(Id)",
}
me.tables['malicious_uri'] ={
"Id": "SERIAL PRIMARY KEY",
"meta_api_key_id": "smallint",
"meta_country_code": "VARCHAR(3)",
"meta_id": "integer",
"meta_tld": "VARCHAR(64)",
"meta_reported_at": "timestamp",
"meta_domain": "VARCHAR(255)",
"meta_ip": "VARCHAR(10)",
"meta_status": "VARCHAR(16)",
"meta_asn": "integer",
"experiment" : "smallint references experiments(Id)",
"partner" : "smallint references partners(Id)",
"tool" : "smallint references tools(Id)",
#"report_category": "smallint references report_categories(Id)",
"report_type": "VARCHAR(255)",
"ts": "timestamp",
"source_key": "smallint references source_keys(Id)",
"source_value": "VARCHAR(255)",
"confidence_level": "real",
"version": "smallint",
"report_id": "VARCHAR(63)",
"report_subcategory": "smallint references report_subcategories(Id)",
"duration": "integer",
"reported_at": "timestamp",
"botnet": "VARCHAR(63)",
"alternate_format_type": "smallint references alternate_format_types(Id)",
"ip_version": "smallint",
"src_ip_v4": "VARCHAR(8)",
"src_ip_v6": "VARCHAR(10)",
"src_mode": "smallint references modes(Id)",
"sample_filename": "VARCHAR(255)",
"sample_sha256": "VARCHAR(72)",
"exploits": "smallint default 0",
}
# exploits : number of exploits
me.tables['malware']={
"Id": "SERIAL PRIMARY KEY",
"meta_api_key_id": "smallint",
"meta_country_code": "VARCHAR(3)",
"meta_id": "integer",
"meta_tld": "VARCHAR(64)",
"meta_reported_at": "timestamp",
"meta_domain": "VARCHAR(255)",
"meta_ip": "VARCHAR(10)",
"meta_status": "VARCHAR(16)",
"meta_asn": "integer",
"experiment" : "smallint references experiments(Id)",
"partner" : "smallint references partners(Id)",
"tool" : "smallint references tools(Id)",
#"report_category": "smallint references report_categories(Id)",
"report_type": "VARCHAR(255)",
"ts": "timestamp",
"source_key": "smallint references source_keys(Id)",
"source_value": "VARCHAR(255)",
"confidence_level": "real",
"version": "smallint",
"report_id": "VARCHAR(63)",
"reported_at": "timestamp",
"botnet": "VARCHAR(63)",
"alternate_format_type": "smallint references alternate_format_types(Id)",
"mime_type": "VARCHAR(255)",
"sample_hashes": "VARCHAR(255)",
"exploits": "smallint default 0",
}
# exploits : number of exploits; sample hashes is a stringified array
me.tables['spam_campaign']= {
"Id": "SERIAL PRIMARY KEY",
"meta_api_key_id": "smallint",
"meta_country_code": "VARCHAR(3)",
"meta_id": "integer",
"meta_tld": "VARCHAR(64)",
"meta_reported_at": "timestamp",
"meta_domain": "VARCHAR(255)",
"meta_ip": "VARCHAR(10)",
"meta_status": "VARCHAR(16)",
"meta_asn": "integer",
"experiment" : "smallint references experiments(Id)",
"partner" : "smallint references partners(Id)",
"tool" : "smallint references tools(Id)",
#"report_category": "smallint references report_categories(Id)",
"report_type": "VARCHAR(255)",
"ts": "timestamp",
"source_key": "smallint references source_keys(Id)",
"source_value": "VARCHAR(255)",
"confidence_level": "real",
"version": "smallint",
"report_id": "VARCHAR(63)",
"report_subcategory": "smallint references report_subcategories(Id)",
"duration": "integer",
"reported_at": "timestamp",
"botnet": "VARCHAR(63)",
"alternate_format_type": "smallint references alternate_format_types(Id)",
"sample_filename": "VARCHAR(255)",
"sample_sha256": "VARCHAR(72)",
"malicious_uri": "VARCHAR(255)",
}
# exploits : number of exploits
me.tables['vulnerable_uri'] = {
"Id": "SERIAL PRIMARY KEY",
"meta_api_key_id": "smallint",
"meta_country_code": "VARCHAR(3)",
"meta_id": "integer",
"meta_tld": "VARCHAR(64)",
"meta_reported_at": "timestamp",
"meta_domain": "VARCHAR(255)",
"meta_ip": "VARCHAR(10)",
"meta_status": "VARCHAR(16)",
"meta_asn": "integer",
"experiment" : "smallint references experiments(Id)",
"partner" : "smallint references partners(Id)",
"tool" : "smallint references tools(Id)",
#"report_category": "smallint references report_categories(Id)",
"report_type": "VARCHAR(255)",
"ts": "timestamp",
"source_key": "smallint references source_keys(Id)",
"source_value": "VARCHAR(255)",
"confidence_level": "real",
"version": "smallint",
"src_ip_v4": "VARCHAR(8)",
"src_ip_v6": "VARCHAR(10)",
"src_mode": "smallint references modes(Id)",
"vulnerabilities": "smallint default 0",
"report_id": "VARCHAR(63)",
"duration": "integer",
"reported_at": "timestamp",
"alternate_format_type": "smallint references alternate_format_types(Id)",
"ip_version": "smallint",
}
# vulnerabilities : number of vuln
# makeing tables prefixed with arch_
keyz= list(me.tables.keys())
for k in keyz:
me.tables["arch_"+k] = me.tables.pop(k)
if ensure_tables:
for t in me.tables:
cur.execute("Create Table If Not Exists %s("%t + ",".join("%s %s"%(key,me.tables[t][key]) for key in me.tables[t].keys()) +");")
me.dbclient.commit()
# creating indexes
generic_indexes = ['ts', 'reported_at', 'partner', 'tool', 'experiment', 'report_subcategory', 'confidence_level', 'meta_asn', 'meta_country_code']
double_indexes = ['reported_at']
tripple_indexes = ['experiment']
for t in me.tables:
for idx in generic_indexes:
# botnet does not have timestamp field
try:
cur.execute("Create index idx_%s_%s on %s(%s);"%(t,idx,t,idx))
log.info('Index created in table %s for field %s', t, idx)
except Exception as e:
log.warning('Could not create index : %s', e)
finally:
me.dbclient.commit()
if idx not in ['ts', 'reported_at', 'meta_reported_at']:
for didx in double_indexes:
try:
cur.execute("Create index idx_%s_%s_%s on %s(%s, %s);"%(t,didx,idx,t,didx,idx))
log.info('Index created in table %s for fields %s, %s', t, didx, idx)
except Exception as e:
log.warning('Could not create index : %s', e)
finally:
me.dbclient.commit()
# tripple idx
if idx not in tripple_indexes:
for tidx in tripple_indexes:
try:
cur.execute("Create index idx_%s_%s_%s_%s on %s(%s, %s, %s);"%(t,didx,tidx,idx,t,didx,tidx,idx))
log.info('Index created in table %s for fields %s, %s, %s', t, didx, tidx, idx)
except Exception as e:
log.warning('Could not create index : %s', e)
finally:
me.dbclient.commit()
#for t in me.tables:
# for idx in generic_indexes:
# # botnet does not have timestamp field
# try:
# cur.execute("Create index idx_%s_%s on %s(%s);"%(t,idx,t,idx))
# log.info('Index created in table %s for field %s', t, idx)
# except Exception as e:
# log.warning('Could not create index : %s', e)
# finally:
# me.dbclient.commit()
try:
cur.execute("Create index idx_arch_malware_ts_samples on arch_malware(ts, source_value);") # note! source_key is always "malware" in spec...
log.info('Index created in table arch_malware for fields ts and source_value')
except Exception as e:
log.warning('Could not create index : %s', e)
finally:
me.dbclient.commit()
try:
cur.execute("Create index idx_arch_malware_ts_samples on arch_malware(ts, mime_type);")
log.info('Index created in table arch_malware for fields ts and mime_type')
except Exception as e:
log.warning('Could not create index : %s', e)
finally:
me.dbclient.commit()
me.dbclient.commit()
if rewrite:
raise NotImplementedError
#if dbname in me.dbclient.database_names():
# me.dbclient.drop_database(dbname)
@staticmethod
def __parse_exp(report):
if report is None:
log.warning("No report_type found! ")
# e.g. "report_type": "[DDOS][HONEYNET][TID] Login attack by TI+D Kippo honeypot report"
m = ACDCExpDBBackup.headerexp.match(report)
res = {}
if m is None:
log.warning("Could not determine experiment, tool and partner from report_type")
else:
res['experiment'], res['tool'], res['partner'], *desc = m.groups()
return res
@staticmethod
def default_start_end(start, end):
"""
if start = None then start = end - 1 week
end and start format are like '2015-02-05 00:00:00'; time part can be dropped = > 0:0:0
"""
if end is None:
endt = (datetime.date.today() + datetime.timedelta(days=1))
end = endt.strftime("%Y-%m-%d")
if start is None:
start = (endt - datetime.timedelta(days=7)).strftime("%Y-%m-%d")
return start, end
def arch_old_data(me, oldness = 8, field = 'reported_at'):
"""
moves data from table to arch_table which is older (wrt field field) than oldness days
"""
me.cur.execute("analyze;")
dictcur = me.dbclient.cursor(cursor_factory=psycopg2.extras.DictCursor)
res = {}
condition = field + " < (now() - INTERVAL '%s"%(oldness) +" days')::date"
for t in db.tables:
origtable = t[5:]
log.info("")
log.info(t + "\n" + 42*"-")
move = "insert into %(archtable)s ("%(dict(archtable=t)) + ",".join(db.tables[t].keys()) +") select " + ",".join(db.tables[t].keys()) + " from " + origtable + " where " + condition + ";"
log.info(move)
dictcur.execute(move)
log.info("")
delete = "delete from %s"%(origtable) + " where " + condition +";"
log.info(delete)
dictcur.execute(delete)
me.dbclient.commit()
#dictcur.execute("select count(1) from %s where (%s < '%s') and (%s >= '%s');" %(table, field, end, field, start ))
#c = dictcur.fetchone()
#c = c.get('count')
#res[table] = c
dictcur.close()
me.cur.execute("analyze;")
return res
if __name__ == "__main__":
import doctest
doctest.testmod()
db=ACDCExpDBBackup(host='localhost', user='acdcuser', password='Uo9re0so', dbname= 'acdcexp', ensure_tables= False)
db.arch_old_data(oldness=7);
sys.exit(0)
db.dbclient.commit()
|
py
|
1a5cd03f67cfe3a48e1899732bc82e5e32912604
|
import numpy
# Expected input: Image from gaussian pyramid
def divide_picture_to_windows(picture):
height = picture.shape[0] + 1
width = picture.shape[1] + 1
x_step = 48
y_step = 48
height_of_window = 48
width_of_window = 48
list_of_windows = []
count = 0
for y in range(0, height - height_of_window, y_step):
for x in range(0, width - width_of_window, x_step):
# print(x,y)
count = count + 1
window = numpy.zeros((height_of_window, width_of_window, 3))
for j in range(height_of_window):
for i in range(width_of_window):
window[j, i] = picture[y + j, x + i]
# print("Picture pixel:", window[j, i])
list_of_windows.append(window)
# Save picture
# scipy.misc.imsave("windows/window" + str(count), thinned_image)
windows = numpy.zeros((count, height_of_window, width_of_window, 3))
for i in range(count):
windows[i] = list_of_windows[i]
return windows
def convertWindowToArray(window):
array = numpy.zeros(200)
count = 0
for y in range(10):
for x in range(20):
array[count] = window[y, x]
count = count + 1
return array
if __name__ == "__main__":
pass
|
py
|
1a5cd0483536a458398921559b1d4c3b58e51c09
|
import logging
import os
import time
from pyvirtualdisplay import Display
log = logging.getLogger(__name__ + str(os.getpid()))
class VirtualScreen:
def __init__(self, visible=0, size=(1920, 1080)):
"""
Init an instance of virtual display
:param visible: whether visible on screen, 0 for false, 1 for true
:param size: virtual display size in pixels, as tuple form: (width, height)
"""
self.display = Display(visible=visible, size=size)
log.info("Virtual display set up, visible: {}, size: {}".
format(False if not visible else True, size))
self.display.start()
time.sleep(1)
def __enter__(self):
log.info("Created virtual display instance.")
return self.display
def __exit__(self, exc_type, exc_val, exc_tb):
if self.display:
self.display.stop()
log.info("Virtual display stopped.")
|
py
|
1a5cd0a5c564a12406db87b727f2048dada3b539
|
from django.contrib import admin
# Register your models here.
from .models import AddLokasi, Pulau, Vaksin, Provinsi
admin.site.register(AddLokasi)
admin.site.register(Pulau)
admin.site.register(Vaksin)
admin.site.register(Provinsi)
|
py
|
1a5cd0c5c8f5ffd58b3561b209e2e254b5f7ee83
|
from django.apps import AppConfig
class PlayerManagementConfig(AppConfig):
name = 'player_management'
|
py
|
1a5cd19a84b4510aa19d17e64d6c0e3787ae4c20
|
from honeygrove.config import Config
import os
import re
import xml.etree.ElementTree as ET
class FilesystemParser:
honeytoken_directory = str(Config.folder.honeytoken_files)
cd_pattern = "^cd \S+$"
mkdir_pattern = "^mkdir \S+$"
touch_pattern = "^touch \S+$"
ls_pattern = "^ls \S+$"
def __init__(self, xml_path=Config.folder.filesystem):
with open(str(xml_path)) as f:
try:
self.start_path = f.readline().split("--")[1].split(",") # read first line and parse
self.start_path = list(map(int, self.start_path)) # letters-numbers to list
except Exception:
self.start_path = [] # if nothing given, the "/" is the root-/user directory
# The current position in the tree as list
self.current_pos = self.start_path
self.xml_path = xml_path
self.tree = ET.parse(str(self.xml_path))
self.root = self.tree.getroot()
if self.root.attrib['name'] != "/":
self.mode = "DOS"
else:
self.mode = "UNIX"
# Saves the user directory path (to show it as "-")
self.user_path = self.get_current_path()
self.add_honeytoken_files()
def get_position(self, path):
"""
Specifies the position to a given path
:param path: the path which position shall be determined
:return:
"""
path = self.get_absolute_path(path)
if not self.valid_path(path):
raise Exception("Invalid path")
position = []
if path == "/":
return position
for element in path.split("/")[1:]: # da wir mit absoluten Pfaden arbeiten, ist das erste Element ""
children = [c.attrib['name'] for c in self.get_element(position)]
position.append(children.index(element))
return position
def get_path(self, position):
"""
Gives path for a position
:param position: The position, the path has to be determined for
:return:
"""
path = ""
current = self.root
if position == []:
return "/"
for i in position:
current = current[i]
if current.attrib['name'] != "/": # "root-/" brauchen wir nicht, ist schon da
path += "/" + current.attrib['name']
return path
def get_element(self, position):
"""
Gives the element from the XML-tree
:param position: Position of the element
:return:
"""
current = self.root
for i in position:
current = current[i]
return current
def get_absolute_path(self, rel_path: str):
"""
Changes a (absolute or relevant) path into a absolute Path and converts commands like ".."
:param rel_path: The path to be converted
:return: the absolute path to path
"""
if not rel_path:
return ""
if self.mode == "DOS":
if re.match(r"\w:\\", rel_path[0:2]):
rel_path = rel_path[3:]
rel_path = rel_path.replace("\\", "/")
if rel_path == "/":
return rel_path
if rel_path[0] == "~":
rel_path = rel_path.replace("~", self.user_path)
if rel_path[0] != "/": # if its a absolute path, we don't have to add a prefix
rel_path = self.get_current_path() + "/" + rel_path
# Deletes stuff like ///, /./, ./ or /.
rel_path = re.sub(r"([^\.]|^)(\./)", r"/",
rel_path) # "beginning of the line" or " not .", followed by any amount of "./"
rel_path = re.sub(r"(/\.)$", r"/", rel_path) # the same for the end of the line
rel_path = re.sub(r"/{2,}", r"/", rel_path) # ///// goes to /
folders = rel_path.split("/")
folders = list(filter(None, folders))
i = 0
while i < len(folders):
f = folders[i]
if f == "..":
if i > 0:
folders.pop(i - 1)
folders.pop(i - 1) # same index because the list slipped by 1
else:
folders.pop(i)
i = 0
else:
i += 1
return "/" + "/".join(folders)
def tree_contains(self, file_name):
"""
Checks if a name exists somewhere in the tree
:param file_name:
:return:
"""
found = False
for child in self.root.findall('.//'):
if child.attrib['name'] == file_name:
found = True
break
return found
def add_honeytoken_files(self):
"""
Adds the file names from the honeytokenfiles folder if files with given names not already exist
"""
for file in os.listdir(self.honeytoken_directory):
if not self.tree_contains(str(file)):
self.touch(self.user_path + "/" + file)
def get_current_path(self):
"""returns the current path as String"""
return self.get_path(self.current_pos)
def get_formatted_path(self):
"""
Returns the current path as platform adjusted, returnable String
:return:
"""
path = self.get_current_path()
if self.user_path == "/":
return path # if / is configured as user directory, nothing shall be replaced
if self.mode == "DOS":
return "C:" + path.replace("/", "\\")
if self.user_path in path:
path = path.replace(self.user_path, '~')
return path
def mkdir(self, path):
"""Creates a new folder at the given path"""
return self.create(path, "dir")
def touch(self, path):
"""
Creates a new file at the given path
"""
try:
return self.create(path, "file")
except Exception as e:
return e
def create(self, path, tag):
"""
Creates a new node
:param path: Path (with filename) to the ne node
:param tag: Type (file or directory)
:return:
"""
path = self.get_absolute_path(path)
split = path.split("/")
file_path = "/".join(split[:-1])
file_name = split[-1]
if file_name in self.ls(file_path) or file_name == ".":
if tag == "dir":
return "mkdir: cannot create directory '" + file_name + "': File exists"
else:
return # hall not be created again
file_position = self.get_position(file_path)
ET.SubElement(self.get_element(file_position), tag, {"name": file_name})
def ls(self, path=''):
"""Lists all children"""
if path:
path = self.get_absolute_path(path)
pos = self.get_position(path)
else:
pos = self.current_pos
element = self.get_element(pos)
response = ""
for child in element:
response += child.attrib['name'] + '\n'
return response
def cd(self, path):
"""
Changes the position in the data tree
:param path (absolute or relative path)
:return None or a error message
"""
if not path:
return
input = path
path = self.get_absolute_path(path)
if not self.valid_path(path):
return input + ": No such file or directory"
self.current_pos = self.get_position(path)
return
def valid_path(self, path, tag=''):
"""
Determines if a given path exists
:param path: the path to be checked
:param tag: if tag is given, it'll be checked if the tag of the element is at the position path =tag
"""
path = self.get_absolute_path(path) # just in case
if tag != 'file' and path == "/":
return True
pos = []
res = True
for p in path.split("/")[1:]:
children = [c.attrib['name'] for c in self.get_element(pos)]
if p in children:
pos.append(children.index(p))
else:
res = False
if not (tag == '' or self.get_element(pos).tag == tag): # not valid if the tag is not the desired
res = False
return res
def valid_directory(self, path):
"""Determines if the given path of current_pos leads to a folder"""
return self.valid_path(path, 'dir')
def valid_file(self, path):
"""Determines if the given path of current_pos leads to a file"""
return self.valid_path(path, 'file')
def delete(self, path):
"""
Searches for a given file and deletes it if it exists
:param path: the path to the file to be deleted
:return:
"""
if path == ".." or path == ".":
return "rm: refusing to remove '.' or '..' directory: skipping '" + path + "'"
path = self.get_absolute_path(path)
if not self.valid_path(path):
return
child_name = path.split("/")[-1]
parent_path = "/".join(path.split("/")[:-1])
parent = self.get_element(self.get_position(parent_path))
for child in parent:
if child.attrib.get('name') == child_name:
parent.remove(child)
def rename(self, from_path, to_name):
"""
Changes the name of a given file
:param from_path: path to the file to be renamedPfad zur umzubenennenden Datei
:param to_name: new name
:return:
"""
self.move(from_path, to_name) # rename is actually just a special case of move
def move(self, sourcepath, targetpath):
"""
Moves a file from one position to another
:param sourcepath: the path to the file to be moved
:param targetpath: the destination path (with new filename)
:return:
"""
sourcepath = self.get_absolute_path(sourcepath)
targetpath = self.get_absolute_path(targetpath)
split = targetpath.split("/")
parentpath = "/" + "/".join(split[1:-1])
element = self.get_element(self.get_position(sourcepath))
sourcetype = element.tag
if not self.valid_directory(parentpath):
return "Directory not found."
else:
if self.valid_path(targetpath):
targettype = self.get_element(self.get_position(targetpath)).tag
if targettype != sourcetype:
return "Not possible"
parent = self.get_element(self.get_position(parentpath))
self.delete(sourcepath)
element.attrib['name'] = targetpath.split("/")[-1]
parent.append(element)
def cat(self, path):
"""
Returns the content of the file as String
:param path: the path to the file
:return:
"""
path = self.get_absolute_path(path)
if not self.valid_path(path):
raise Exception("File not found")
if not self.valid_file(path):
raise Exception("Is a directory")
filename = path.split("/")[-1]
for f in os.listdir(self.honeytoken_directory):
if f == filename:
with open(self.honeytoken_directory + "/" + f, "r") as fp:
data = fp.read()
return data
|
py
|
1a5cd1ae8781ee5c88f7585e676273afd21406d0
|
import numpy as np
import logging as log
from Game_model import *
class Ai_controler():
def __init__(self):
self.board = Game_model()
log.basicConfig(filename="logfile.log", level=log.INFO)
def ai_move(self):
while Game_model.get_current_state == "Not done jet":
log.info(self.board.get_grid())
self.board.move_up()
if self.board.changed != True:
self.board.move_right()
if self.board.changed != True:
self.board.move_down()
if self.board.changed != True:
self.board.move_left()
if __name__ == "__main__":
ai = Ai_controler()
ai.ai_move()
|
py
|
1a5cd1fa9001299382feddabe44749f797c5ba46
|
#!/usr/bin/python
#
# tester.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import math
import sys
import os
import struct
import threading
import time
import random
import time
import traceback
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..')]
import fdb
fdb.api_version(int(sys.argv[2]))
from fdb import six
from fdb.impl import strinc
import fdb.tuple
from directory_extension import DirectoryExtension
from cancellation_timeout_tests import test_timeouts
from cancellation_timeout_tests import test_db_timeouts
from cancellation_timeout_tests import test_cancellation
from cancellation_timeout_tests import test_retry_limits
from cancellation_timeout_tests import test_db_retry_limits
from cancellation_timeout_tests import test_combinations
from size_limit_tests import test_size_limit_option, test_get_approximate_size
random.seed(0)
if len(sys.argv) == 4:
db = fdb.open(sys.argv[3])
else:
db = fdb.open()
class Stack:
def __init__(self):
self.stack = []
def __repr__(self):
return repr(self.stack)
def __str__(self):
return str(self.stack)
def __len__(self):
return len(self.stack)
def __getitem__(self, idx):
return self.stack[idx]
def __setitem__(self, idx, val):
self.stack[idx] = val
def push(self, idx, val):
self.stack.insert(0, (idx, val))
def pop(self, count=None, with_idx=False):
c = count
if c is None:
c = 1
raw = self.stack[:c]
del self.stack[:c]
for i in range(len(raw)):
if isinstance(raw[i][1], fdb.Future):
try:
val = raw[i][1].wait()
if val is None or (hasattr(val, 'present') and not val.present()):
raw[i] = (raw[i][0], b'RESULT_NOT_PRESENT')
else:
raw[i] = (raw[i][0], val)
except fdb.FDBError as e:
# print('ERROR: %r' % e)
raw[i] = (raw[i][0], fdb.tuple.pack((b'ERROR', str(e.code).encode('ascii'))))
if count is None:
if with_idx:
return raw[0]
else:
return raw[0][1]
else:
if with_idx:
return raw
else:
return [item[1] for item in raw]
class Instruction:
def __init__(self, tr, stack, op, index, isDatabase=False, isSnapshot=False):
self.tr = tr
self.stack = stack
self.op = op
self.index = index
self.isDatabase = isDatabase
self.isSnapshot = isSnapshot
def pop(self, count=None, with_idx=False):
return self.stack.pop(count, with_idx)
def push(self, val):
self.stack.push(self.index, val)
def test_db_options(db):
db.options.set_location_cache_size(100001)
db.options.set_max_watches(100001)
db.options.set_datacenter_id("dc_id")
db.options.set_machine_id("machine_id")
db.options.set_snapshot_ryw_enable()
db.options.set_snapshot_ryw_disable()
db.options.set_transaction_logging_max_field_length(1000)
db.options.set_transaction_timeout(100000)
db.options.set_transaction_timeout(0)
db.options.set_transaction_timeout(0)
db.options.set_transaction_max_retry_delay(100)
db.options.set_transaction_size_limit(100000)
db.options.set_transaction_retry_limit(10)
db.options.set_transaction_retry_limit(-1)
db.options.set_transaction_causal_read_risky()
db.options.set_transaction_include_port_in_address()
@fdb.transactional
def test_options(tr):
tr.options.set_priority_system_immediate()
tr.options.set_priority_batch()
tr.options.set_causal_read_risky()
tr.options.set_causal_write_risky()
tr.options.set_read_your_writes_disable()
tr.options.set_read_system_keys()
tr.options.set_access_system_keys()
tr.options.set_transaction_logging_max_field_length(1000)
tr.options.set_timeout(60 * 1000)
tr.options.set_retry_limit(50)
tr.options.set_max_retry_delay(100)
tr.options.set_used_during_commit_protection_disable()
tr.options.set_debug_transaction_identifier('my_transaction')
tr.options.set_log_transaction()
tr.options.set_read_lock_aware()
tr.options.set_lock_aware()
tr.options.set_include_port_in_address()
tr.get(b'\xff').wait()
def check_watches(db, watches, expected):
for i, watch in enumerate(watches):
if watch.is_ready() or expected:
try:
watch.wait()
if not expected:
assert False, "Watch %d is ready" % i
except fdb.FDBError as e:
tr = db.create_transaction()
tr.on_error(e).wait()
return False
return True
def test_watches(db):
while True:
db[b'w0'] = b'0'
db[b'w3'] = b'3'
watches = [None]
@fdb.transactional
def txn1(tr):
watches[0] = tr.watch(b'w0')
tr.set(b'w0', b'0')
assert not watches[0].is_ready()
txn1(db)
watches.append(db.clear_and_watch(b'w1'))
watches.append(db.set_and_watch(b'w2', b'2'))
watches.append(db.get_and_watch(b'w3'))
assert watches[3][0] == b'3'
watches[3] = watches[3][1]
time.sleep(1)
if not check_watches(db, watches, False):
continue
del db[b'w1']
time.sleep(5)
if not check_watches(db, watches, False):
continue
db[b'w0'] = b'a'
db[b'w1'] = b'b'
del db[b'w2']
db.bit_xor(b'w3', b'\xff\xff')
if check_watches(db, watches, True):
return
@fdb.transactional
def test_locality(tr):
tr.options.set_timeout(60 * 1000)
tr.options.set_read_system_keys() # We do this because the last shard (for now, someday the last N shards) is in the /FF/ keyspace
# This isn't strictly transactional, thought we expect it to be given the size of our database
boundary_keys = list(fdb.locality.get_boundary_keys(tr, b'', b'\xff\xff')) + [b'\xff\xff']
end_keys = [tr.get_key(fdb.KeySelector.last_less_than(k)) for k in boundary_keys[1:]]
start_addresses = [fdb.locality.get_addresses_for_key(tr, k) for k in boundary_keys[:-1]]
end_addresses = [fdb.locality.get_addresses_for_key(tr, k) for k in end_keys]
if [set(s.wait()) for s in start_addresses] != [set(e.wait()) for e in end_addresses]:
raise Exception("Locality not internally consistent.")
def test_predicates():
assert fdb.predicates.is_retryable(fdb.FDBError(1020))
assert not fdb.predicates.is_retryable(fdb.FDBError(10))
class Tester:
tr_map = {}
tr_map_lock = threading.RLock()
def __init__(self, db, prefix):
self.db = db
self.instructions = self.db[fdb.tuple.range((prefix,))]
self.stack = Stack()
self.tr_name = prefix
Tester.tr_map[self.tr_name] = None
self.last_version = 0
self.threads = []
self.directory_extension = DirectoryExtension()
def push_range(self, inst, iter, prefix_filter=None):
kvs = []
for k, v in iter:
if prefix_filter is None or k.startswith(prefix_filter):
kvs += [k, v]
inst.push(fdb.tuple.pack(tuple(kvs)))
@staticmethod
@fdb.transactional
def wait_empty(tr, prefix):
res = tr.get_range_startswith(prefix, 1).to_list()
if len(res) == 1:
raise fdb.FDBError(1020)
@fdb.transactional
def log_stack(self, tr, prefix, entries):
for i, (idx, el) in entries.items():
pk = prefix + fdb.tuple.pack((i, idx))
pv = fdb.tuple.pack((el,))
tr.set(pk, pv[:40000])
def current_transaction(self):
with Tester.tr_map_lock:
return Tester.tr_map[self.tr_name]
def new_transaction(self):
with Tester.tr_map_lock:
Tester.tr_map[self.tr_name] = self.db.create_transaction()
def switch_transaction(self, name):
self.tr_name = name
with Tester.tr_map_lock:
if self.tr_name not in Tester.tr_map:
self.new_transaction()
def run(self):
for idx, i in enumerate(self.instructions):
op_tuple = fdb.tuple.unpack(i.value)
op = op_tuple[0]
# print("Stack is %r" % self.stack)
# if op != "PUSH" and op != "SWAP":
# print("%d. Instruction is %s" % (idx, op))
isDatabase = op.endswith(six.u('_DATABASE'))
isSnapshot = op.endswith(six.u('_SNAPSHOT'))
if isDatabase:
op = op[:-9]
obj = self.db
elif isSnapshot:
op = op[:-9]
obj = self.current_transaction().snapshot
else:
obj = self.current_transaction()
inst = Instruction(obj, self.stack, op, idx, isDatabase, isSnapshot)
try:
if inst.op == six.u("PUSH"):
inst.push(op_tuple[1])
elif inst.op == six.u("DUP"):
inst.stack.push(*self.stack[0])
elif inst.op == six.u("EMPTY_STACK"):
self.stack = Stack()
elif inst.op == six.u("SWAP"):
idx = inst.pop()
self.stack[0], self.stack[idx] = self.stack[idx], self.stack[0]
elif inst.op == six.u("POP"):
inst.pop()
elif inst.op == six.u("SUB"):
a, b = inst.pop(2)
inst.push(a - b)
elif inst.op == six.u("CONCAT"):
a, b = inst.pop(2)
inst.push(a + b)
elif inst.op == six.u("WAIT_FUTURE"):
old_idx, item = inst.pop(with_idx=True)
inst.stack.push(old_idx, item)
elif inst.op == six.u("NEW_TRANSACTION"):
self.new_transaction()
elif inst.op == six.u("USE_TRANSACTION"):
self.switch_transaction(inst.pop())
elif inst.op == six.u("ON_ERROR"):
inst.push(inst.tr.on_error(inst.pop()))
elif inst.op == six.u("GET"):
key = inst.pop()
num = random.randint(0, 2)
if num == 0:
f = obj[key]
elif num == 1:
f = obj.get(key)
else:
f = obj.__getitem__(key)
if f == None:
inst.push(b'RESULT_NOT_PRESENT')
else:
inst.push(f)
elif inst.op == six.u("GET_ESTIMATED_RANGE_SIZE"):
begin, end = inst.pop(2)
estimatedSize = obj.get_estimated_range_size_bytes(begin, end).wait()
inst.push(b"GOT_ESTIMATED_RANGE_SIZE")
elif inst.op == six.u("GET_KEY"):
key, or_equal, offset, prefix = inst.pop(4)
result = obj.get_key(fdb.KeySelector(key, or_equal, offset))
if result.startswith(prefix):
inst.push(result)
elif result < prefix:
inst.push(prefix)
else:
inst.push(strinc(prefix))
elif inst.op == six.u("GET_RANGE"):
begin, end, limit, reverse, mode = inst.pop(5)
if limit == 0 and mode == -1 and random.random() < 0.5:
if reverse:
r = obj[begin:end:-1]
else:
r = obj[begin:end]
else:
r = obj.get_range(begin, end, limit, reverse, mode)
self.push_range(inst, r)
elif inst.op == six.u("GET_RANGE_STARTS_WITH"):
prefix, limit, reverse, mode = inst.pop(4)
self.push_range(inst, obj.get_range_startswith(prefix, limit, reverse, mode))
elif inst.op == six.u("GET_RANGE_SELECTOR"):
begin_key, begin_or_equal, begin_offset, end_key, end_or_equal, end_offset, limit, reverse, mode, prefix = inst.pop(10)
beginSel = fdb.KeySelector(begin_key, begin_or_equal, begin_offset)
endSel = fdb.KeySelector(end_key, end_or_equal, end_offset)
if limit == 0 and mode == -1 and random.random() < 0.5:
if reverse:
r = obj[beginSel:endSel:-1]
else:
r = obj[beginSel:endSel]
else:
r = obj.get_range(beginSel, endSel, limit, reverse, mode)
self.push_range(inst, r, prefix_filter=prefix)
elif inst.op == six.u("GET_READ_VERSION"):
self.last_version = obj.get_read_version().wait()
inst.push(b"GOT_READ_VERSION")
elif inst.op == six.u("SET"):
key, value = inst.pop(2)
if random.random() < 0.5:
obj[key] = value
else:
obj.set(key, value)
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("LOG_STACK"):
prefix = inst.pop()
entries = {}
while len(self.stack) > 0:
stack_index = len(self.stack) - 1
entries[stack_index] = inst.pop(with_idx=True)
if len(entries) == 100:
self.log_stack(self.db, prefix, entries)
entries = {}
self.log_stack(self.db, prefix, entries)
elif inst.op == six.u("ATOMIC_OP"):
opType, key, value = inst.pop(3)
getattr(obj, opType.lower())(key, value)
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("SET_READ_VERSION"):
inst.tr.set_read_version(self.last_version)
elif inst.op == six.u("CLEAR"):
if random.random() < 0.5:
del obj[inst.pop()]
else:
obj.clear(inst.pop())
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("CLEAR_RANGE"):
begin, end = inst.pop(2)
num = random.randint(0, 2)
if num == 0:
del obj[begin:end]
elif num == 1:
obj.clear_range(begin, end)
else:
obj.__delitem__(slice(begin, end))
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("CLEAR_RANGE_STARTS_WITH"):
obj.clear_range_startswith(inst.pop())
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("READ_CONFLICT_RANGE"):
inst.tr.add_read_conflict_range(inst.pop(), inst.pop())
inst.push(b"SET_CONFLICT_RANGE")
elif inst.op == six.u("WRITE_CONFLICT_RANGE"):
inst.tr.add_write_conflict_range(inst.pop(), inst.pop())
inst.push(b"SET_CONFLICT_RANGE")
elif inst.op == six.u("READ_CONFLICT_KEY"):
inst.tr.add_read_conflict_key(inst.pop())
inst.push(b"SET_CONFLICT_KEY")
elif inst.op == six.u("WRITE_CONFLICT_KEY"):
inst.tr.add_write_conflict_key(inst.pop())
inst.push(b"SET_CONFLICT_KEY")
elif inst.op == six.u("DISABLE_WRITE_CONFLICT"):
inst.tr.options.set_next_write_no_write_conflict_range()
elif inst.op == six.u("COMMIT"):
inst.push(inst.tr.commit())
elif inst.op == six.u("RESET"):
inst.tr.reset()
elif inst.op == six.u("CANCEL"):
inst.tr.cancel()
elif inst.op == six.u("GET_COMMITTED_VERSION"):
self.last_version = inst.tr.get_committed_version()
inst.push(b"GOT_COMMITTED_VERSION")
elif inst.op == six.u("GET_APPROXIMATE_SIZE"):
approximate_size = inst.tr.get_approximate_size().wait()
inst.push(b"GOT_APPROXIMATE_SIZE")
elif inst.op == six.u("GET_VERSIONSTAMP"):
inst.push(inst.tr.get_versionstamp())
elif inst.op == six.u("TUPLE_PACK"):
count = inst.pop()
items = inst.pop(count)
inst.push(fdb.tuple.pack(tuple(items)))
elif inst.op == six.u("TUPLE_PACK_WITH_VERSIONSTAMP"):
prefix = inst.pop()
count = inst.pop()
items = inst.pop(count)
if not fdb.tuple.has_incomplete_versionstamp(items) and random.random() < 0.5:
inst.push(b"ERROR: NONE")
else:
try:
packed = fdb.tuple.pack_with_versionstamp(tuple(items), prefix=prefix)
inst.push(b"OK")
inst.push(packed)
except ValueError as e:
if str(e).startswith("No incomplete"):
inst.push(b"ERROR: NONE")
else:
inst.push(b"ERROR: MULTIPLE")
elif inst.op == six.u("TUPLE_UNPACK"):
for i in fdb.tuple.unpack(inst.pop()):
inst.push(fdb.tuple.pack((i,)))
elif inst.op == six.u("TUPLE_SORT"):
count = inst.pop()
items = inst.pop(count)
unpacked = map(fdb.tuple.unpack, items)
if six.PY3:
sorted_items = sorted(unpacked, key=fdb.tuple.pack)
else:
sorted_items = sorted(unpacked, cmp=fdb.tuple.compare)
for item in sorted_items:
inst.push(fdb.tuple.pack(item))
elif inst.op == six.u("TUPLE_RANGE"):
count = inst.pop()
items = inst.pop(count)
r = fdb.tuple.range(tuple(items))
inst.push(r.start)
inst.push(r.stop)
elif inst.op == six.u("ENCODE_FLOAT"):
f_bytes = inst.pop()
f = struct.unpack(">f", f_bytes)[0]
if not math.isnan(f) and not math.isinf(f) and not f == -0.0 and f == int(f):
f = int(f)
inst.push(fdb.tuple.SingleFloat(f))
elif inst.op == six.u("ENCODE_DOUBLE"):
d_bytes = inst.pop()
d = struct.unpack(">d", d_bytes)[0]
inst.push(d)
elif inst.op == six.u("DECODE_FLOAT"):
f = inst.pop()
f_bytes = struct.pack(">f", f.value)
inst.push(f_bytes)
elif inst.op == six.u("DECODE_DOUBLE"):
d = inst.pop()
d_bytes = struct.pack(">d", d)
inst.push(d_bytes)
elif inst.op == six.u("START_THREAD"):
t = Tester(self.db, inst.pop())
thr = threading.Thread(target=t.run)
thr.start()
self.threads.append(thr)
elif inst.op == six.u("WAIT_EMPTY"):
prefix = inst.pop()
Tester.wait_empty(self.db, prefix)
inst.push(b"WAITED_FOR_EMPTY")
elif inst.op == six.u("UNIT_TESTS"):
try:
test_db_options(db)
test_options(db)
test_watches(db)
test_cancellation(db)
test_retry_limits(db)
test_db_retry_limits(db)
test_timeouts(db)
test_db_timeouts(db)
test_combinations(db)
test_locality(db)
test_predicates()
test_size_limit_option(db)
test_get_approximate_size(db)
except fdb.FDBError as e:
print("Unit tests failed: %s" % e.description)
traceback.print_exc()
raise Exception("Unit tests failed: %s" % e.description)
elif inst.op.startswith(six.u('DIRECTORY_')):
self.directory_extension.process_instruction(inst)
else:
raise Exception("Unknown op %s" % inst.op)
except fdb.FDBError as e:
# print('ERROR: %r' % e)
inst.stack.push(idx, fdb.tuple.pack((b"ERROR", str(e.code).encode('ascii'))))
# print(" to %s" % self.stack)
# print()
[thr.join() for thr in self.threads]
if __name__ == '__main__':
t = Tester(db, sys.argv[1].encode('ascii'))
t.run()
|
py
|
1a5cd497ec9c3e22326a1639cfed468c652cda25
|
import unittest
from unittest.mock import MagicMock
from colorchanger import colorchanger
class MyTestCase(unittest.TestCase):
@staticmethod
def test_set_hue_color():
# Given
hue_light_id = 1
rgb_color = (0, 255, 0)
colorchanger.hue_bridge.set_light = MagicMock(return_value=None)
xy = colorchanger.converter.rgb_to_xy(0, 255, 0)
# When
colorchanger.set_hue_color(hue_light_id, rgb_color)
# Then
colorchanger.hue_bridge.set_light.assert_called_with(hue_light_id, 'xy', xy)
if __name__ == '__main__':
unittest.main()
|
py
|
1a5cd4b47bc01dfe123208c383715a7f3c706a18
|
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('prometheus/', include('django_prometheus.urls')),
]
if settings.DEBUG:
# When you are using the dev docker-compose, you need to serve the media files somehow
urlpatterns = static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + urlpatterns
|
py
|
1a5cd54fc002ab3ed6e04b4ee14a2bb3077e1e3d
|
from .BertTextEncoder import BertTextEncoder
from .FeatureNets import SubNet, TextSubNet
from .AlignNets import AlignSubNet
|
py
|
1a5cd5628618d1fb1e0894a4402645f57945f2bd
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for app_dev_linter.py."""
from __future__ import annotations
import io
import multiprocessing
import os
from core import utils
from core.tests import test_utils
from . import other_files_linter
from . import pre_commit_linter
NAME_SPACE = multiprocessing.Manager().Namespace()
PROCESSES = multiprocessing.Manager().dict()
NAME_SPACE.files = pre_commit_linter.FileCache()
FILE_CACHE = NAME_SPACE.files
LINTER_TESTS_DIR = os.path.join(os.getcwd(), 'scripts', 'linters', 'test_files')
class CustomLintChecksManagerTests(test_utils.LinterTestBase):
"""Tests for CustomLintChecksManager."""
def setUp(self):
super(CustomLintChecksManagerTests, self).setUp()
self.verbose_mode_enabled = False
self.dependencies_file = io.StringIO(
'{\"dependencies\":{\"frontend\":{\"guppy\":'
'{\"version\": \"0.1\"},\"skulpt-dist\":{\"version\": \"0.2\"}'
',\"midiJs\":{\"version\": \"0.4\"}}}}')
self.package_file = io.StringIO(
'{\"dependencies\":{\"nerdamer\":\"^0.6\"}}')
self.files_in_typings_dir = [
'guppy-defs-0.1.d.ts',
'skulpt-defs-0.2.d.ts',
'midi-defs-0.4.d.ts',
'nerdamer-defs-0.6.d.ts'
]
def mock_open_file(path, unused_permissions):
if path == other_files_linter.DEPENDENCIES_JSON_FILE_PATH:
return self.dependencies_file
elif path == other_files_linter.PACKAGE_JSON_FILE_PATH:
return self.package_file
def mock_listdir(unused_path):
return self.files_in_typings_dir
self.open_file_swap = self.swap(
utils, 'open_file', mock_open_file)
self.listdir_swap = self.swap(os, 'listdir', mock_listdir)
def test_check_valid_pattern_in_app_dev_yaml(self):
def mock_readlines(unused_self, unused_filepath):
return (
'# Just a comment',
'# Third party files:',
'- third_party/static/bootstrap-4.3.1/')
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
with readlines_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_skip_files_in_app_dev_yaml()
expected_error_messages = ['SUCCESS App dev file check passed']
self.assertEqual(
error_messages.get_report(), expected_error_messages)
self.assertEqual('App dev file', error_messages.name)
self.assertFalse(error_messages.failed)
def test_check_invalid_pattern_in_app_dev_yaml(self):
def mock_readlines(unused_self, unused_filepath):
return (
'# Third party files:', '- third_party/static/bootstrap-4.3/')
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
with readlines_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_skip_files_in_app_dev_yaml()
self.assertEqual(len(error_messages.get_report()), 2)
self.assertTrue(
'Pattern on line 2 doesn\'t match any file or directory' in
error_messages.get_report()[0])
self.assertEqual('App dev file', error_messages.name)
self.assertTrue(error_messages.failed)
def test_check_valid_pattern(self):
def mock_readlines(unused_self, unused_filepath):
return (
'// This is a comment.',
'plugins: [',
' new HtmlWebpackPlugin({',
' chunks: [\'about\'],',
' filename: \'about-page.mainpage.html\',',
' meta: defaultMeta,',
' template: commonPrefix + \'/pages/about-page/about-page'
'.mainpage.html\',',
' minify: htmlMinifyConfig,',
' inject: false', '}),]'
)
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
with readlines_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_webpack_config_file()
expected_error_messages = [
'SUCCESS Webpack config file check passed']
self.assertEqual(
error_messages.get_report(), expected_error_messages)
self.assertEqual('Webpack config file', error_messages.name)
self.assertFalse(error_messages.failed)
def test_check_invalid_pattern_with_some_keys_missing(self):
def mock_readlines(unused_self, unused_filepath):
return (
'plugins: [',
' new HtmlWebpackPlugin({',
' chunks: [\'about\'],',
' filename: \'about-page.mainpage.html\',',
' minify: htmlMinifyConfig,',
' inject: false', '}),]'
)
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
with readlines_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_webpack_config_file()
expected_error_messages = [
'Line 2: The following keys: meta, template are missing in '
'HtmlWebpackPlugin block in webpack.common.config.ts',
'FAILED Webpack config file check failed']
self.assertEqual(
error_messages.get_report(), expected_error_messages)
self.assertEqual('Webpack config file', error_messages.name)
self.assertTrue(error_messages.failed)
def test_check_invalid_pattern_without_all_keys(self):
def mock_readlines(unused_self, unused_filepath):
return (
'plugins: [',
' new HtmlWebpackPlugin({',
'}),]'
)
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
with readlines_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_webpack_config_file()
expected_error_messages = [
'Line 2: The following keys: chunks, filename, meta, template,'
' minify, inject are missing in HtmlWebpackPlugin block in '
'webpack.common.config.ts', 'FAILED Webpack config file check'
' failed']
self.assertEqual(
error_messages.get_report(), expected_error_messages)
self.assertEqual('Webpack config file', error_messages.name)
self.assertTrue(error_messages.failed)
def test_check_third_party_libs_type_defs(self):
expected_error_messages = [
'SUCCESS Third party type defs check passed']
with self.open_file_swap, self.listdir_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_third_party_libs_type_defs()
self.assertEqual(
error_messages.get_report(), expected_error_messages)
self.assertEqual('Third party type defs', error_messages.name)
self.assertFalse(error_messages.failed)
def test_check_third_party_libs_type_defs_verbose(self):
self.verbose_mode_enabled = True
expected_error_messages = [
'SUCCESS Third party type defs check passed']
with self.open_file_swap, self.listdir_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_third_party_libs_type_defs()
self.assertEqual(
error_messages.get_report(), expected_error_messages)
self.assertEqual('Third party type defs', error_messages.name)
self.assertFalse(error_messages.failed)
def test_check_third_party_libs_type_defs_multiple(self):
self.files_in_typings_dir.append('guppy-defs-0.2.d.ts')
expected_error_messages = 'FAILED Third party type defs check failed'
with self.open_file_swap, self.listdir_swap, self.print_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_third_party_libs_type_defs()
self.assertEqual(
error_messages.get_report()[1], expected_error_messages)
self.assert_same_list_elements([
'There are multiple type definitions for Guppy in the '
'typings dir.'], error_messages.get_report())
self.assertEqual('Third party type defs', error_messages.name)
self.assertTrue(error_messages.failed)
def test_check_third_party_libs_type_defs_no_type_defs(self):
self.files_in_typings_dir = [
'skulpt-defs-0.2.d.ts',
'math-expressions-defs-0.3.d.ts',
'midi-defs-0.4.d.ts',
'nerdamer-defs-0.6.d.ts'
]
expected_error_messages = 'FAILED Third party type defs check failed'
with self.open_file_swap, self.listdir_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_third_party_libs_type_defs()
self.assertEqual(
error_messages.get_report()[1], expected_error_messages)
self.assert_same_list_elements([
'There are no type definitions for Guppy in the '
'typings dir.'], error_messages.get_report())
self.assertEqual('Third party type defs', error_messages.name)
self.assertTrue(error_messages.failed)
def test_check_third_party_libs_type_defs_wrong_version(self):
self.files_in_typings_dir = [
'guppy-defs-0.2.d.ts',
'skulpt-defs-0.2.d.ts',
'math-expressions-defs-0.3.d.ts',
'midi-defs-0.4.d.ts',
'nerdamer-defs-0.6.d.ts'
]
expected_error_messages = 'FAILED Third party type defs check failed'
with self.open_file_swap, self.listdir_swap, self.print_swap:
error_messages = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_third_party_libs_type_defs()
self.assertEqual(
error_messages.get_report()[1], expected_error_messages)
self.assert_same_list_elements([
'Type definitions for Guppy are not up to date. The '
'current version of Guppy is 0.1 and the type definitions '
'are for version 0.2. Please refer typings/README.md '
'for more details.'], error_messages.get_report())
self.assertEqual('Third party type defs', error_messages.name)
self.assertTrue(error_messages.failed)
def test_check_github_workflows_use_merge_action_checks(self):
def mock_listdir(unused_path):
return ['pass.yml', 'fail.yml', 'README']
def mock_read(path):
if path.endswith('pass.yml'):
return '\n'.join([
'name: Passing workflow file',
'on:',
' push:',
' branches:',
' - develop',
'',
'jobs:',
' run:',
' steps:',
' - uses: actions/checkout@v2',
' - uses: ./.github/actions/merge',
' - run: echo "oppia"',
])
elif path.endswith('fail.yml'):
return '\n'.join([
'name: Passing workflow file',
'on:',
' push:',
' branches:',
' - develop',
'',
'jobs:',
' run:',
' steps:',
' - uses: actions/checkout@v2',
' - run: echo "oppia"',
])
raise AssertionError(
'mock_read called with unexpected path %s' % path)
listdir_swap = self.swap_with_checks(
os, 'listdir', mock_listdir,
expected_args=[(other_files_linter.WORKFLOWS_DIR,)])
read_swap = self.swap(FILE_CACHE, 'read', mock_read)
expected = [
'%s --> Job run does not use the .github/actions/merge action.' %
os.path.join(other_files_linter.WORKFLOWS_DIR, 'fail.yml'),
'FAILED Github workflows use merge action check failed',
]
with listdir_swap, read_swap:
task_results = other_files_linter.CustomLintChecksManager(
FILE_CACHE).check_github_workflows_use_merge_action()
self.assertEqual(task_results.get_report(), expected)
def test_perform_all_lint_checks(self):
lint_task_report = other_files_linter.CustomLintChecksManager(
FILE_CACHE).perform_all_lint_checks()
self.assertTrue(isinstance(lint_task_report, list))
def test_get_linters_with_success(self):
custom_linter, third_party_linter = (
other_files_linter.get_linters(FILE_CACHE))
self.assertTrue(
isinstance(
custom_linter, other_files_linter.CustomLintChecksManager))
self.assertEqual(third_party_linter, None)
|
py
|
1a5cd562e059b842249d8b84b8fb69183721c747
|
from .AccuDist import AccuDist as AccuDist
from .ADX import ADX as ADX
from .ALMA import ALMA as ALMA
from .AO import AO as AO
from .Aroon import Aroon as Aroon
from .ATR import ATR as ATR
from .BB import BB as BB
from .BOP import BOP as BOP
from .CCI import CCI as CCI
from .ChaikinOsc import ChaikinOsc as ChaikinOsc
from .ChandeKrollStop import ChandeKrollStop as ChandeKrollStop
from .CHOP import CHOP as CHOP
from .CoppockCurve import CoppockCurve as CoppockCurve
from .DEMA import DEMA as DEMA
from .DonchianChannels import DonchianChannels as DonchianChannels
from .DPO import DPO as DPO
from .EMA import EMA as EMA
from .EMV import EMV as EMV
from .FibRetracement import FibRetracement as FibRetracement
from .ForceIndex import ForceIndex as ForceIndex
from .HMA import HMA as HMA
from .Ichimoku import Ichimoku as Ichimoku
from .KAMA import KAMA as KAMA
from .KeltnerChannels import KeltnerChannels as KeltnerChannels
from .KST import KST as KST
from .KVO import KVO as KVO
from .MACD import MACD as MACD
from .MassIndex import MassIndex as MassIndex
from .McGinleyDynamic import McGinleyDynamic as McGinleyDynamic
from .MeanDev import MeanDev as MeanDev
from .OBV import OBV as OBV
from .PivotsHL import PivotsHL as PivotsHL
from .ROC import ROC as ROC
from .RSI import RSI as RSI
from .ParabolicSAR import ParabolicSAR as ParabolicSAR
from .SFX import SFX as SFX
from .SMA import SMA as SMA
from .SMMA import SMMA as SMMA
from .SOBV import SOBV as SOBV
from .StdDev import StdDev as StdDev
from .Stoch import Stoch as Stoch
from .StochRSI import StochRSI as StochRSI
from .TEMA import TEMA as TEMA
from .TRIX import TRIX as TRIX
from .TSI import TSI as TSI
from .UO import UO as UO
from .VTX import VTX as VTX
from .VWAP import VWAP as VWAP
from .VWMA import VWMA as VWMA
from .WMA import WMA as WMA
__all__ = (
"AccuDist",
"ADX",
"ALMA",
"AO",
"Aroon",
"ATR",
"BB",
"BOP",
"CCI",
"ChaikinOsc",
"ChandeKrollStop",
"CHOP",
"CoppockCurve",
"DEMA",
"DonchianChannels",
"DPO",
"EMA",
"EMV",
"FibRetracement",
"ForceIndex",
"HMA",
"Ichimoku",
"KAMA",
"KeltnerChannels",
"KST",
"KVO",
"MACD",
"MassIndex",
"McGinleyDynamic",
"MeanDev",
"OBV",
"ParabolicSAR",
"PivotsHL",
"ROC",
"RSI",
"SFX",
"SMA",
"SMMA",
"SOBV",
"StdDev",
"Stoch",
"StochRSI",
"TEMA",
"TRIX",
"TSI",
"UO",
"VTX",
"VWAP",
"VWMA",
"WMA",
)
|
py
|
1a5cd59fa41c984347a59cb70ea65a9a8e25482a
|
import time
from huobi.connection.websocket_req_client import *
from huobi.utils.channels_request import *
from huobi.model.market import *
class ReqPriceDepthService:
def __init__(self, params):
self.params = params
def subscribe(self, callback, error_handler, **kwargs):
symbol_list = self.params["symbol_list"]
step = self.params["step"]
def subscription(connection):
for symbol in symbol_list:
connection.send(request_price_depth_channel(symbol, step))
time.sleep(0.01)
def parse(dict_data):
price_depth_event = PriceDepthReq()
price_depth_event.id = dict_data.get("id")
price_depth_event.rep = dict_data.get("rep")
data = dict_data.get("data", {})
price_depth_obj = PriceDepth.json_parse(data)
price_depth_event.data = price_depth_obj
return price_depth_event
WebSocketReqClient(**kwargs).execute_subscribe_v1(subscription,
parse,
callback,
error_handler)
|
py
|
1a5cd5b37c7701d3b49ed8021213d84c8caf9a37
|
from typing import List, Optional, Tuple
import matplotlib as mpl
mpl.use("Agg")
from theseus.opt import Opts
import os
import cv2
import torch
import numpy as np
from theseus.opt import Config
from theseus.segmentation.models import MODEL_REGISTRY
from theseus.segmentation.augmentations import TRANSFORM_REGISTRY
from theseus.segmentation.datasets import DATASET_REGISTRY, DATALOADER_REGISTRY
from theseus.utilities.loading import load_state_dict
from theseus.utilities.loggers import LoggerObserver, StdoutLogger
from theseus.utilities.cuda import get_devices_info
from theseus.utilities.getter import get_instance, get_instance_recursively
from theseus.utilities.visualization.visualizer import Visualizer
from theseus.cps.models.wrapper import ModelWithLoss
class VideoWriter:
def __init__(self, video_info, saved_path):
self.video_info = video_info
self.saved_path = saved_path
self.FPS = self.video_info["fps"]
self.WIDTH = self.video_info["width"]
self.HEIGHT = self.video_info["height"]
self.NUM_FRAMES = self.video_info["num_frames"]
self.outvid = cv2.VideoWriter(
self.saved_path,
cv2.VideoWriter_fourcc(*"mp4v"),
self.FPS,
(self.WIDTH, self.HEIGHT),
)
def write(self, frame):
self.outvid.write(frame.astype(np.uint8))
class TestPipeline(object):
def __init__(self, opt: Config):
super(TestPipeline, self).__init__()
self.opt = opt
self.debug = opt["global"]["debug"]
self.logger = LoggerObserver.getLogger("main")
self.savedir = opt["global"]["save_dir"]
os.makedirs(self.savedir, exist_ok=True)
stdout_logger = StdoutLogger(__name__, self.savedir, debug=self.debug)
self.logger.subscribe(stdout_logger)
self.logger.text(self.opt, level=LoggerObserver.INFO)
self.transform_cfg = Config.load_yaml(opt["global"]["cfg_transform"])
self.device_name = opt["global"]["device"]
self.device = torch.device(self.device_name)
self.weights = opt["global"]["weights"]
self.transform = get_instance_recursively(
self.transform_cfg, registry=TRANSFORM_REGISTRY
)
self.dataset = get_instance(
opt["data"]["dataset"],
registry=DATASET_REGISTRY,
transform=self.transform["val"],
)
CLASSNAMES = self.dataset.classnames
self.dataloader = get_instance(
opt["data"]["dataloader"],
registry=DATALOADER_REGISTRY,
dataset=self.dataset,
)
self.model1 = get_instance(
self.opt["model1"],
registry=MODEL_REGISTRY,
classnames=CLASSNAMES,
num_classes=len(CLASSNAMES),
).to(self.device)
self.model2 = get_instance(
self.opt["model2"],
registry=MODEL_REGISTRY,
classnames=CLASSNAMES,
num_classes=len(CLASSNAMES),
).to(self.device)
if self.weights:
state_dict = torch.load(self.weights, map_location=self.device)
self.model1 = load_state_dict(self.model1, state_dict, "model1")
self.model2 = load_state_dict(self.model2, state_dict, "model2")
self.model = ModelWithLoss(
self.model1,
self.model2,
criterion_sup=None,
criterion_unsup=None,
soft_cps=True,
device=self.device,
)
def infocheck(self):
device_info = get_devices_info(self.device_name)
self.logger.text("Using " + device_info, level=LoggerObserver.INFO)
self.logger.text(
f"Number of test sample: {len(self.dataset)}", level=LoggerObserver.INFO
)
self.logger.text(
f"Everything will be saved to {self.savedir}", level=LoggerObserver.INFO
)
@torch.no_grad()
def inference(self):
self.infocheck()
self.logger.text("Inferencing...", level=LoggerObserver.INFO)
visualizer = Visualizer()
self.model.eval()
video_name, ext = os.path.splitext(os.path.basename(self.dataset.video_path))
saved_mask_path = os.path.join(self.savedir, f"{video_name}_masks{ext}")
saved_overlay_path = os.path.join(self.savedir, f"{video_name}_overlay{ext}")
mask_writer = VideoWriter(self.dataset.video_info, saved_mask_path)
overlay_writer = VideoWriter(self.dataset.video_info, saved_overlay_path)
for idx, batch in enumerate(self.dataloader):
inputs = batch["inputs"]
img_names = batch["img_names"]
ori_sizes = batch["ori_sizes"]
outputs = self.model.get_prediction(batch, self.device)
preds = outputs["masks"]
for (input, pred, filename, ori_size) in zip(
inputs, preds, img_names, ori_sizes
):
decode_pred = visualizer.decode_segmap(pred)[:, :, ::-1]
resized_decode_mask = cv2.resize(decode_pred, dsize=tuple(ori_size))
# Save mask
mask_writer.write(resized_decode_mask)
# Save overlay
raw_image = visualizer.denormalize(input)
raw_image = (raw_image * 255).astype(np.uint8)
ori_image = cv2.resize(raw_image, dsize=tuple(ori_size))
ori_image = cv2.cvtColor(ori_image, cv2.COLOR_RGB2BGR)
overlay = ori_image * 0.75 + resized_decode_mask * 0.25
overlay_writer.write(overlay)
self.logger.text(
f"Save submission video at {saved_mask_path}", level=LoggerObserver.INFO
)
self.logger.text(
f"Save overlay video at {saved_overlay_path}", level=LoggerObserver.INFO
)
if __name__ == "__main__":
opts = Opts().parse_args()
val_pipeline = TestPipeline(opts)
val_pipeline.inference()
|
py
|
1a5cd736d6aee246c21bf506116a21f5d5db1893
|
#!/usr/bin/python3
#
# ./cgetall.py canvas_course_page_url|course_id [destination_directory]
#
# get all of the Canvas course pages with a given base URL or for a given course_id
#
# with the option '-C'or '--containers' use HTTP rather than HTTPS for access to Canvas
# with the option "-v" or "--verbose" you get lots of output - showing in detail the operations of the program
#
# Can also be called with an alternative configuration file:
# ./cgetall.py --config config-test.json 11
#
# Example:
# cgetall.py https://kth.instructure.com/courses/11/pages/test-3
# or
# cgetall.py 11
#
# both get all of the course pages for course 11
#
# G. Q. Maguire Jr.
#
# 2020.03.27
# based on the earlier cgetall.py of 2016.07.25
#
import csv, requests, time
from pprint import pprint
import optparse
import sys
import json
#############################
###### EDIT THIS STUFF ######
#############################
global baseUrl # the base URL used for access to Canvas
global header # the header for all HTML requests
global payload # place to store additionally payload when needed for options to HTML requests
# Based upon the options to the program, initialize the variables used to access Canvas gia HTML requests
def initialize(options):
global baseUrl, header, payload
# styled based upon https://martin-thoma.com/configuration-files-in-python/
if options.config_filename:
config_file=options.config_filename
else:
config_file='config.json'
try:
with open(config_file) as json_data_file:
configuration = json.load(json_data_file)
access_token=configuration["canvas"]["access_token"]
if options.containers:
baseUrl="http://"+configuration["canvas"]["host"]+"/api/v1"
print("using HTTP for the container environment")
else:
baseUrl="https://"+configuration["canvas"]["host"]+"/api/v1"
header = {'Authorization' : 'Bearer ' + access_token}
payload = {}
except:
print("Unable to open configuration file named {}".format(config_file))
print("Please create a suitable configuration file, the default name is config.json")
sys.exit()
#modules_csv = 'modules.csv' # name of file storing module names
log_file = 'log.txt' # a log file. it will log things
def list_pages(course_id):
list_of_all_pages=[]
# Use the Canvas API to get the list of pages for this course
#GET /api/v1/courses/:course_id/pages
url = "{0}/courses/{1}/pages".format(baseUrl, course_id)
if Verbose_Flag:
print("url: {}".format(url))
r = requests.get(url, headers = header)
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
list_of_all_pages.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by [email protected] on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
page_response = r.json()
for p_response in page_response:
list_of_all_pages.append(p_response)
if Verbose_Flag:
for p in list_of_all_pages:
print("{}".format(p["title"]))
return list_of_all_pages
def getall_course_pages(course_id, destination_directory):
for p in list_pages(course_id):
url = "{0}/courses/{1}/pages/{2}".format(baseUrl, course_id, p["url"])
if Verbose_Flag:
print(url)
payload={}
r = requests.get(url, headers = header, data=payload)
if Verbose_Flag:
print("r.status_code: {}".format(r.status_code))
if r.status_code == requests.codes.ok:
page_response = r.json()
new_file_name=p["url"][p["url"].rfind("/")+1:]+'.html'
if len(destination_directory) > 0:
new_file_name=destination_directory+'/'+new_file_name
if Verbose_Flag:
print("new_file_name: {}".format(new_file_name))
# write out body of response as a .html page
with open(new_file_name, 'wb') as f:
# modified the code to handle empty files
if len(page_response["body"]) > 0:
encoded_output = bytes(page_response["body"], 'UTF-8')
else:
encoded_output = bytes("", 'UTF-8')
f.write(encoded_output)
continue
else:
print("No such page: {}".format(canvas_course_page_url))
continue
return True
def main():
global Verbose_Flag
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose',
dest="verbose",
default=False,
action="store_true",
help="Print lots of output to stdout"
)
parser.add_option("--config", dest="config_filename",
help="read configuration from FILE", metavar="FILE")
parser.add_option('-t', '--testing',
dest="testing",
default=False,
action="store_true",
help="execute test code"
)
parser.add_option('-C', '--containers',
dest="containers",
default=False,
action="store_true",
help="for the container enviroment in the virtual machine"
)
options, remainder = parser.parse_args()
Verbose_Flag=options.verbose
if Verbose_Flag:
print("ARGV :{}".format(sys.argv[1:]))
print("VERBOSE :{}".format(options.verbose))
print("REMAINING :{}".format(remainder))
initialize(options)
if (len(remainder) < 1):
print("Inusffient arguments\n must provide url or course_id\n")
else:
canvas_course_page_url=remainder[0]
if (len(remainder) >= 1):
destination_directory=remainder[1]
print("outputing files to {}".format(destination_directory))
else:
destination_directory=""
if canvas_course_page_url.find("http") >= 0:
#extract course_id from URL
course_id=canvas_course_page_url[canvas_course_page_url.find("courses/")+8:canvas_course_page_url.find("pages/")-1]
else:
course_id=remainder[0]
if Verbose_Flag:
print("course_id: {}".format(course_id))
output=getall_course_pages(course_id, destination_directory)
if (output):
if Verbose_Flag:
pprint(output)
if __name__ == "__main__": main()
|
py
|
1a5cd925e2e19c188978be89a0c4f9946e5bf02a
|
print()
print("Area of Circle: This Program calculate the area of circle")
r = input("please enter r ")
r = float(r)
a = 3.14 * r * r
print("area is " + str(a))
|
py
|
1a5cd9c35a47c33751f8290d5dddc13ff8efbf21
|
# -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from numpy.testing import assert_allclose
import pytest
from fluids.numerics import assert_close, assert_close1d
from chemicals.thermal_conductivity import *
from chemicals.identifiers import check_CAS
from chemicals.thermal_conductivity import k_data_Perrys_8E_2_314, k_data_Perrys_8E_2_315, k_data_VDI_PPDS_10, k_data_VDI_PPDS_9
def test_k_IAPWS():
rhos = [1., 122., 222., 272., 322., 372., 422., 750.]
Cps = [2069.0812064568445, 11353.470032452065, 101243.30196479718, 794916.0384979197, 5420611.2721776245, 500237.6519254826, 62663.67284339393, 4570.624565173062]
Cvs = [1595.69907291979, 3243.791325724295, 4523.436913569467, 5491.264195750903, 6188.749461187972, 5181.406642440796, 3904.379773638152, 2833.6557941038973]
mus = [2.3377752122053447e-05, 2.5520676836476175e-05, 3.133758919727549e-05, 3.622814313612717e-05, 4.296157881024315e-05, 4.5688204474708324e-05, 4.943625601494995e-05, 9.401498317589303e-05]
d_rho_d_Ps = [3.3774067394654917e-06, 1.710930848910942e-05, 0.000175456980972237, 0.0015082800389184703, 0.012136419490369314, 0.0012459172043680759, 0.00013039353796524478, 1.0510776327652118e-06]
k_CP = [0.05192989239188059, 0.13092288520449896, 0.3677874588628728, 0.7579597763651718, 1.4437555614266426, 0.6503194015489409, 0.4488834872838822, 0.6009613455848507]
k_calc = [k_IAPWS(T=647.35, rho=rhos[i], Cp=Cps[i], Cv=Cvs[i], mu=mus[i], drho_dP=d_rho_d_Ps[i]) for i in range(len(rhos))]
assert_close1d(k_calc, k_CP, rtol=5e-6)
# Region 1, test 2, from IAPWS formulation, exact match:
k = k_IAPWS(T=620., rho=699.226043, Cp=5320.47725, Cv=2916.92653, mu=84.1527945E-6, drho_dP=1.84869007E-6)
assert_close(k, 0.5450389394624772, rtol=1e-13)
# Region 2, test 1, from IAPWS formulation, exact match:
k= k_IAPWS(T=650., rho=1.00452141, Cp=2070.10035, Cv=1596.75313, mu=23.4877453E-6, drho_dP=3.36351419E-6)
assert_close(k, 0.052231102436372065, rtol=1e-13)
# Region 3, test 1, from IAPWS formulation, exact match:
k = k_IAPWS(T=647.35, rho=222., Cp=101054.488, Cv=4374.66458, mu=31.2204749E-6, drho_dP=177.778595E-6)
assert_close(k, 0.36687941154060383, rtol=1e-13)
def test_Perrys2_314_data():
# In perry's, only 102 is used. No chemicals are missing.
# Tmaxs all match to 5E-4. Tmins match to 1E-3.
assert all([check_CAS(i) for i in k_data_Perrys_8E_2_314.index])
tots_calc = [k_data_Perrys_8E_2_314[i].abs().sum() for i in [u'C1', u'C2', u'C3', u'C4', u'Tmin', u'Tmax']]
tots = [48935634.823768869, 297.41545078799999, 421906466448.71423, 232863514627157.62, 125020.26000000001, 347743.42000000004]
assert_close(tots_calc, tots)
assert k_data_Perrys_8E_2_314.index.is_unique
assert k_data_Perrys_8E_2_314.shape == (345, 7)
def test_Perrys2_315_data():
# From perry's - Deuterium , Nitrogen trifluoride , Nitrous oxide Silicon tetrafluoride , Terephthalic acid all have no data
# All perry's use #100.
# Tmins all match at 5E-4.
# Tmaxs all match at 2E-3.
assert all([check_CAS(i) for i in k_data_Perrys_8E_2_315.index])
tots_calc = [k_data_Perrys_8E_2_315[i].abs().sum() for i in [u'C1', u'C2', u'C3', u'C4', u'C5', u'Tmin', u'Tmax']]
tots = [82.001667499999996, 0.19894598900000002, 0.0065330144999999999, 0.00046928630199999995, 1.0268010799999999e-07, 70996.369999999995, 138833.41]
assert_close1d(tots_calc, tots)
assert k_data_Perrys_8E_2_315.index.is_unique
assert k_data_Perrys_8E_2_315.shape == (340, 8)
def test_VDI_PPDS_10_data():
"""Average deviation of 2.4% from tabulated values. Many chemicals have much
higher deviations. 10% or more deviations:
['75-34-3', '107-06-2', '106-93-4', '420-46-2', '71-55-6', '79-34-5',
'67-72-1', '76-12-0', '76-13-1', '76-14-2', '540-54-5', '75-01-4',
'75-35-4', '79-01-6', '127-18-4', '462-06-6', '108-90-7', '108-86-1',
'108-41-8', '100-44-7', '108-93-0', '100-61-8', '121-69-7', '91-66-7']
These have been checked - it appears the tabulated data is just incorrect.
"""
assert all([check_CAS(i) for i in k_data_VDI_PPDS_10.index])
tots_calc = [k_data_VDI_PPDS_10[i].abs().sum() for i in [u'A', u'B', u'C', u'D', u'E']]
tots = [2.2974640014599998, 0.015556001460000001, 1.9897655000000001e-05, 6.7747269999999993e-09, 2.3260109999999999e-12]
assert_close1d(tots_calc, tots)
assert k_data_VDI_PPDS_10.index.is_unique
assert k_data_VDI_PPDS_10.shape == (275, 6)
def test_VDI_PPDS_9_data():
"""Average deviation of 0.71% from tabulated values. The following have
larger deviations.
['124-18-5', '629-59-4', '629-78-7', '526-73-8', '95-63-6']
These have been checked - it appears the tabulated data is just incorrect.
"""
assert all([check_CAS(i) for i in k_data_VDI_PPDS_9.index])
tots_calc = [k_data_VDI_PPDS_9[i].abs().sum() for i in [u'A', u'B', u'C', u'D', u'E']]
tots = [63.458699999999993, 0.14461469999999998, 0.00042270770000000005, 1.7062660000000002e-06, 3.2715370000000003e-09]
assert_close1d(tots_calc, tots)
assert k_data_VDI_PPDS_9.index.is_unique
assert k_data_VDI_PPDS_9.shape == (271, 6)
def test_CSP_liq():
kl = Sheffy_Johnson(300, 47, 280)
assert_close(kl, 0.17740150413112196)
kl = Sato_Riedel(300, 47, 390, 520)
assert_close(kl, 0.2103769246133769)
kl = Lakshmi_Prasad(273.15, 100)
assert_close(kl, 0.013664450000000009)
kl = Gharagheizi_liquid(300, 40, 350, 1E6, 0.27)
assert_close(kl, 0.2171113029534838)
kl = Nicola_original(300, 142.3, 611.7, 0.49, 201853)
assert_close(kl, 0.2305018632230984)
kl = Nicola(300, 142.3, 611.7, 2110000.0, 0.49)
assert_close(kl, 0.10863821554584034)
# Not at all sure about this one
kl = Bahadori_liquid(273.15, 170)
assert_close(kl, 0.14274278108272603)
kl = kl_Mersmann_Kind(400, 170.33484, 658.0, 0.000754, 38)
assert_close(kl, 0.0895271829899285)
def test_CSP_liq_dense():
# From [2]_, for butyl acetate.
kl_dense = DIPPR9G(515.05, 3.92E7, 579.15, 3.212E6, 7.085E-2)
assert_close(kl_dense, 0.0864419738671184)
kld1 = Missenard(304., 6330E5, 591.8, 41E5, 0.129)
assert_close(kld1, 0.21983757770696569)
# # butyl acetate
kld2 = Missenard(515.05, 3.92E7, 579.15, 3.212E6, 7.085E-2)
assert_close(kld2, 0.086362465280714396)
def test_CSP_gas():
# 2-methylbutane at low pressure, 373.15 K. Mathes calculation in [1]_.
kg = Eucken(72.151, 135.9, 8.77E-6)
assert_close(kg, 0.018792644287722975)
# 2-methylbutane at low pressure, 373.15 K. Mathes calculation in [1]_.
kg = Eucken_modified(72.151, 135.9, 8.77E-6)
assert_close(kg, 0.023593536999201956)
# CO, brute force tests on three options for chemtype
kg1 = DIPPR9B(200., 28.01, 20.826, 1.277E-5, 132.92, chemtype='linear')
assert kg1 == DIPPR9B(200., 28.01, 20.826, 1.277E-5, 132.92) # No argument
kg2 = DIPPR9B(200., 28.01, 20.826, 1.277E-5, 132.92, chemtype='monoatomic')
kg3 = DIPPR9B(200., 28.01, 20.826, 1.277E-5, 132.92, chemtype='nonlinear')
assert_allclose([kg1, kg2, kg3], [0.01813208676438415, 0.023736881470903245, 0.018625352738307743])
with pytest.raises(ValueError):
DIPPR9B(200., 28.01, 20.826, 1.277E-5, 132.92, chemtype='FAIL')
kg = Chung(T=373.15, MW=72.151, Tc=460.4, omega=0.227, Cvm=135.9, mu=8.77E-6)
assert_close(kg, 0.023015653729496946)
kg = Eli_Hanley(T=373.15, MW=72.151, Tc=460.4, Vc=3.06E-4, Zc=0.267, omega=0.227, Cvm=135.9)
assert_close(kg, 0.022479517891353377)
kg = Eli_Hanley(T=1000, MW=72.151, Tc=460.4, Vc=3.06E-4, Zc=0.267, omega=0.227, Cvm=135.9)
assert_close(kg, 0.06369581356766069)
kg = Bahadori_gas(40+273.15, 20)
assert_close(kg, 0.031968165337873326)
kg = Gharagheizi_gas(580., 16.04246, 111.66, 4599000.0, 0.0115478000)
assert_close(kg, 0.09594861261873211)
def test_CSP_gas_dense():
kgs = [Stiel_Thodos_dense(T=378.15, MW=44.013, Tc=309.6, Pc=72.4E5, Vc=97.4E-6, Zc=0.274, Vm=i, kg=2.34E-2) for i in [144E-6, 24E-6, 240E-6]]
kgs_exp = [0.041245574404863684, 0.9158718777539487, 0.03258313269922979]
assert_allclose(kgs, kgs_exp)
kgs = [Eli_Hanley_dense(T=T, MW=42.081, Tc=364.9, Vc=1.81E-4, Zc=0.274, omega=0.144, Cvm=82.70, Vm=1.721E-4) for T in [473., 900]]
kgs_exp = [0.06038475936515042, 0.08987438807653142]
assert_allclose(kgs, kgs_exp)
kg = Eli_Hanley_dense(700, MW=42.081, Tc=364.9, Vc=1.81E-4, Zc=0.274, omega=0.144, Cvm=82.70, Vm=3.721E-4)
assert_allclose(kg, 0.06953791121177173)
kg = Chung_dense(T=473., MW=42.081, Tc=364.9, Vc=184.6E-6, omega=0.142, Cvm=82.67, Vm=172.1E-6, mu=134E-7, dipole=0.4)
assert_allclose(kg, 0.06160569232570781)
def test_DIPPR9H():
k = DIPPR9H([0.258, 0.742], [0.1692, 0.1528])
assert_allclose(k, 0.15657104706719646)
# with pytest.raises(Exception):
# DIPPR9H([0.258, 0.742], [0.1692])
def test_Filippov():
kl = Filippov([0.258, 0.742], [0.1692, 0.1528])
assert_allclose(kl, 0.15929167628799998)
with pytest.raises(ValueError):
Filippov([0.258], [0.1692, 0.1528])
def test_Lindsay_Bromley():
kg = Lindsay_Bromley(323.15, [0.23, 0.77], [1.939E-2, 1.231E-2], [1.002E-5, 1.015E-5], [248.31, 248.93], [46.07, 50.49])
assert_allclose(kg, 0.01390264417969313)
# with pytest.raises(Exception):
# Lindsay_Bromley(323.15, [0.23], [1.939E-2, 1.231E-2], [1.002E-5, 1.015E-5], [248.31, 248.93], [46.07, 50.49])
def test_Wassiljewa_Herning_Zipperer():
MWs = [40, 50, 60]
zs = [.1, .4, .5]
kgs = [.01, .015, .025]
k = Wassiljewa_Herning_Zipperer(zs, kgs, MWs)
assert_close(k, 0.01984976897415608, rtol=1e-13)
MWs = [40.0, 50.0, 60.0]
zs = [.1, .4, .5]
ks = [1.002E-5, 1.15E-5, 2e-5]
k = Wassiljewa_Herning_Zipperer(zs, ks, MWs)
assert_close(k, 1.5861181979916883e-05, rtol=1e-13)
MW_roots = [i**0.5 for i in MWs]
k = Wassiljewa_Herning_Zipperer(zs, ks, MWs, MW_roots)
assert_close(k, 1.5861181979916883e-05, rtol=1e-13)
def test_DIPPR9I():
k = DIPPR9I(zs=[.682, .318], Vms=[1.723e-2, 7.338e-2], ks=[.6037, .1628])
assert_close(k, 0.25397430656658937, rtol=1e-13)
|
py
|
1a5cdb39b507ade5c66100192b31b9eb03b1481e
|
import matplotlib.pyplot as plt
from algoritmo_genetico import AlgoritmoGenetico
class Produto:
def __init__(self, nome, espaco, valor):
self.nome = nome
self.espaco = espaco
self.valor = valor
if __name__ == "__main__":
# p1 = Produto("Iphone 6", 0.0000899, 2199.12)
lista_produtos = []
lista_produtos.append(Produto("Geladeira Dako", 0.751, 999.90))
lista_produtos.append(Produto("Iphone 6", 0.0000899, 2911.12))
lista_produtos.append(Produto("TV 55' ", 0.400, 4346.99))
lista_produtos.append(Produto("TV 50' ", 0.290, 3999.90))
lista_produtos.append(Produto("TV 42' ", 0.200, 2999.00))
lista_produtos.append(Produto("Notebook Dell", 0.00350, 2499.90))
lista_produtos.append(Produto("Ventilador Panasonic", 0.496, 199.90))
lista_produtos.append(Produto("Microondas Electrolux", 0.0424, 308.66))
lista_produtos.append(Produto("Microondas LG", 0.0544, 429.90))
lista_produtos.append(Produto("Microondas Panasonic", 0.0319, 299.29))
lista_produtos.append(Produto("Geladeira Brastemp", 0.635, 849.00))
lista_produtos.append(Produto("Geladeira Consul", 0.870, 1199.89))
lista_produtos.append(Produto("Notebook Lenovo", 0.498, 1999.90))
lista_produtos.append(Produto("Notebook Asus", 0.527, 3999.00))
espacos = []
valores = []
nomes = []
for produto in lista_produtos:
espacos.append(produto.espaco)
valores.append(produto.valor)
nomes.append(produto.nome)
limite = 3
tamanho_populacao = 20
taxa_mutacao = 0.01
numero_geracoes = 100
ag = AlgoritmoGenetico(tamanho_populacao, ver_evolucao=False)
resultado = ag.resolver(taxa_mutacao, numero_geracoes, espacos, valores, limite)
for i in range(len(lista_produtos)):
if resultado[i] == "1":
print("Nome: %s R$ %s " % (lista_produtos[i].nome, lista_produtos[i].valor))
"""
plt.plot(ag.lista_solucoes)
plt.title("Acompanhamento dos valores")
plt.show()
"""
input()
|
py
|
1a5cdbbca69d23f192257f918c8a30dce1c2ff10
|
# ============================================================
def solve():
"""
"""
from datetime import date # <- lets cheat
def next_first_day():
'''
'''
for year in range(1901, 2001):
for month in range(1, 13):
yield date(year, month, 1)
return sum(1 if d.weekday() == 6 else 0 for d in next_first_day())
# ============================================================
if __name__ == '__main__':
"""
"""
print solve()
|
py
|
1a5cdcf96c9d7bbce4f4dd6a4c51f75b6dbb3cb0
|
import sys
import os
sys.path.append(os.path.abspath("/src"))
import darknet
import utils
import parse
import kerasmodel
import yolodata
import ddd
from keras.models import load_model
from PIL import Image, ImageDraw
import numpy as np
from keras import backend as K
import keras.optimizers as opt
import cfgconst
#import opcv
import cv2
import scipy.misc
import tensorflow as tf
import keras
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = ""
from keras.callbacks import EarlyStopping, ModelCheckpoint
# define constant
#cpu config
config = tf.ConfigProto( device_count = {'GPU': 1 , 'CPU': 56} ) #max: 1 gpu, 56 cpu
sess = tf.Session(config=config)
keras.backend.set_session(sess)
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = ""
det_l = cfgconst.net.layers[len(cfgconst.net.layers)-1]
CLASSNUM = det_l.classes
f = open(cfgconst.labelnames)
voc_names =[]
for ln in f:
voc_names.append(ln.strip()) # = ["stopsign", "skis"]
# check class number
print voc_names
if CLASSNUM != len(voc_names):
print 'cfg file class setting is not equal to '+cfgconst.labelnames
exit()
# run_yolo
if len(sys.argv) < 2:
print ('usage: python %s [train/test/valid] [pretrained model (optional)]\n' %(sys.argv[0]))
exit()
voc_labels= []
for i in range(CLASSNUM):
voc_labels.append("ui_data/labels/"+voc_names[i]+".PNG")
if not os.path.isfile(voc_labels[i]):
print ('can not load image:%s' %(voc_labels[i]))
exit()
import utils
thresh = utils.find_float_arg(sys.argv, "-thresh", .2)
#print 'thresh='+str(thresh)
#exit()
cam_index = utils.find_int_arg(sys.argv, "-c", 0)
#cfg_path = sys.argv[2]
model_weights_path = sys.argv[2] if len(sys.argv) > 2 else 'noweight'
filename = sys.argv[3] if len(sys.argv) > 3 else 'nofilename'
print sys.argv
print model_weights_path+','+filename
def train_yolo( weights_path):
# construct network
net = cfgconst.net #parse.parse_network_cfg(cfg_path)
train_images = cfgconst.train #"train_data/train.txt"
backup_directory = "backup/"
# load pretrained model
if os.path.isfile(model_weights_path):
print 'Loading '+model_weights_path
model=load_model(model_weights_path, custom_objects={'yololoss': ddd.yololoss})
sgd = opt.SGD(lr=net.learning_rate, decay=net.decay, momentum=net.momentum, nesterov=True)
model.compile(loss=ddd.yololoss, optimizer=sgd, metrics=["accuracy"])
else:
# base is cfg name
#base = utils.basecfg(cfg_path)
print ('Learning Rate: %f, Momentum: %f, Decay: %f\n' %(net.learning_rate, net.momentum, net.decay));
model = kerasmodel.makenetwork(net)
(X_train, Y_train) = yolodata.load_data(train_images,net.h,net.w,net.c, net)
print ('max_batches : %d, X_train: %d, batch: %d\n' %(net.max_batches, len(X_train), net.batch));
print str(net.max_batches/(len(X_train)/net.batch))
#datagen = ImageDataGenerator(
# featurewise_center=True,
# featurewise_std_normalization=True,
# rotation_range=0,
# width_shift_range=0.,
# height_shift_range=0.,
# horizontal_flip=True)
#datagen.fit(X_train)
#model.fit_generator(datagen.flow(X_train, Y_train, batch_size=net.batch),
# samples_per_epoch=len(X_train), nb_epoch=net.max_batches/(len(X_train)/net.batch))
#model.fit(X_train, Y_train, batch_size=net.batch, nb_epoch=net.max_batches/(len(X_train)/net.batch))
early_stop = EarlyStopping(monitor='loss',
min_delta=0.001,
patience=3,
mode='min',
verbose=1)
checkpoint = ModelCheckpoint('yolo_weight.h5',
monitor='loss',
verbose=1,
save_best_only=True,
mode='min',
period=1)
batchesPerdataset = max(1,len(X_train)/net.batch)
model.fit(X_train, Y_train, nb_epoch=net.max_batches/(batchesPerdataset), batch_size=net.batch, verbose=1)
model.save_weights('yolo_weight_rd.h5')
model.save('yolo_kerasmodel_rd.h5')
def debug_yolo( cfg_path, model_weights_path='yolo_kerasmodel_rd.h5' ):
net = cfgconst.net ##parse.parse_network_cfg(cfg_path)
testmodel = load_model(model_weights_path, custom_objects={'yololoss': ddd.yololoss})
(s,w,h,c) = testmodel.layers[0].input_shape
x_test,y_test = yolodata.load_data('train_data/test.txt', h, w, c, net)
testloss = testmodel.evaluate(x_test,y_test)
print y_test
print 'testloss= '+str(testloss)
def predict(X_test, testmodel, confid_thresh):
print 'predict, confid_thresh='+str(confid_thresh)
pred = testmodel.predict(X_test)
(s,w,h,c) = testmodel.layers[0].input_shape
# find confidence value > 0.5
confid_index_list =[]
confid_value_list =[]
x_value_list = []
y_value_list =[]
w_value_list =[]
h_value_list =[]
class_id_list =[]
classprob_list =[]
x0_list = []
x1_list = []
y0_list = []
y1_list = []
det_l = cfgconst.net.layers[len(cfgconst.net.layers)-1]
side = det_l.side
classes = det_l.classes
xtext_index =0
foundindex = False
max_confid =0
#
for p in pred:
#foundindex = False
for k in range(1): #5+classes):
#print 'L'+str(k)
for i in range(side):
for j in range(side):
if k==0:
max_confid = max(max_confid,p[k*49+i*7+j])
#sys.stdout.write( str(p[k*49+i*7+j])+', ' )
if k==0 and p[k*49+i*7+j]>confid_thresh:
confid_index_list.append(i*7+j)
foundindex = True
#print '-'
print 'max_confid='+str(max_confid)
#
for confid_index in confid_index_list:
confid_value = max(0,p[0*49+confid_index])
x_value = max(0,p[1*49+confid_index])
y_value = max(0,p[2*49+confid_index])
w_value = max(0,p[3*49+confid_index])
h_value = max(0,p[4*49+confid_index])
maxclassprob = 0
maxclassprob_i =-1
for i in range(classes):
if p[(5+i)*49+confid_index] > maxclassprob and foundindex:
maxclassprob = p[(5+i)*49+confid_index]
maxclassprob_i = i
classprob_list.append( maxclassprob)
class_id_list.append( maxclassprob_i)
print 'max_confid='+str(max_confid)+',c='+str(confid_value)+',x='+str(x_value)+',y='+str(y_value)+',w='+str(w_value)+',h='+str(h_value)+',cid='+str(maxclassprob_i)+',prob='+str(maxclassprob)
#
row = confid_index / side
col = confid_index % side
x = (w / side) * (col + x_value)
y = (w / side) * (row + y_value)
print 'confid_index='+str(confid_index)+',x='+str(x)+',y='+str(y)+',row='+str(row)+',col='+str(col)
#draw = ImageDraw.Draw(nim)
#draw.rectangle([x-(w_value/2)*w,y-(h_value/2)*h,x+(w_value/2)*w,y+(h_value/2)*h])
#del draw
#nim.save('predbox.png')
#sourceimage = X_test[xtext_index].copy()
x0_list.append( max(0, int(x-(w_value/2)*w)) )
y0_list.append( max(0, int(y-(h_value/2)*h)) )
x1_list.append( int(x+(w_value/2)*w) )
y1_list.append( int(y+(h_value/2)*h) )
break
#xtext_index = xtext_index + 1
#print pred
sourceimage = X_test[0].copy()
return sourceimage, x0_list, y0_list, x1_list, y1_list, classprob_list, class_id_list
def test_yolo(imglist_path, model_weights_path='yolo_kerasmodel_rd.h5', confid_thresh=0.3):
print 'test_yolo: '+imglist_path
# custom objective function
#print (s,w,h,c)
#exit()
if os.path.isfile(imglist_path):
testmodel = load_model(model_weights_path, custom_objects={'yololoss': ddd.yololoss})
(s,w,h,c) = testmodel.layers[0].input_shape
f = open(imglist_path)
for img_path in f:
#
#X_test = []
if os.path.isfile(img_path.strip()):
frame = Image.open(img_path.strip())
#(orgw,orgh) = img.size
nim = scipy.misc.imresize(frame, (w, h, c))
if nim.shape != (w, h, c):
continue
#nim = img.resize( (w, h), Image.BILINEAR )
img, x0_list, y0_list, x1_list, y1_list, classprob_list, class_id_list = predict(np.asarray([nim]), testmodel, thresh)
#X_test.append(np.asarray(nim))
#predict(np.asarray(X_test), testmodel, confid_thresh)
# found confid box
for x0,y0,x1,y1,classprob,class_id in zip(x0_list, y0_list, x1_list, y1_list, classprob_list, class_id_list):
#
# draw bounding box
cv2.rectangle(img, (x0, y0), (x1, y1), (255,255,255), 2)
# draw classimg
classimg = cv2.imread(voc_labels[class_id])
print 'box='+str(x0)+','+str(y0)+','+str(x1)+','+str(y1)
#print img.shape
#print classimg.shape
yst = max(0,y0-classimg.shape[0])
yend = max(y0,classimg.shape[0])
img[yst:yend, x0:x0+classimg.shape[1]] = classimg
# draw text
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, str(classprob), (x0,y0-classimg.shape[0]-1), font, 1,(255,255,255),2,cv2.LINE_AA)
#
cv2.imshow('frame',img)
if cv2.waitKey(1000) & 0xFF == ord('q'):
break
else:
print img_path+' predict fail'
cv2.destroyAllWindows()
else:
print imglist_path+' does not exist'
def demo_yolo(model_weights_path, filename, thresh=0.3):
print 'demo_yolo'
testmodel = load_model(model_weights_path, custom_objects={'yololoss': ddd.yololoss})
(s,w,h,c) = testmodel.layers[0].input_shape
cap = cv2.VideoCapture(filename)
while (cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
#print frame
nim = scipy.misc.imresize(frame, (w, h, c))
#nim = np.resize(frame, (w, h, c)) #, Image.BILINEAR )
img, x0_list, y0_list, x1_list, y1_list, classprob_list, class_id_list = predict(np.asarray([nim]), testmodel, thresh)
# found confid box
for x0,y0,x1,y1,classprob,class_id in zip(x0_list, y0_list, x1_list, y1_list, classprob_list, class_id_list):
#
# draw bounding box
cv2.rectangle(img, (x0, y0), (x1, y1), (255,255,255), 2)
# draw classimg
classimg = cv2.imread(voc_labels[class_id])
print 'box='+str(x0)+','+str(y0)+','+str(x1)+','+str(y1)
#print img.shape
#print classimg.shape
yst = max(0,y0-classimg.shape[0])
yend = max(y0,classimg.shape[0])
img[yst:yend, x0:x0+classimg.shape[1]] = classimg
# draw text
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, str(classprob), (x0,y0-classimg.shape[0]-1), font, 1,(255,255,255),2,cv2.LINE_AA)
#
cv2.imshow('frame',img)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if sys.argv[1]=='train':
train_yolo(model_weights_path)
elif sys.argv[1]=='test':
if os.path.isfile(model_weights_path):
test_yolo(filename, model_weights_path, confid_thresh=thresh)
else:
test_yolo(filename, confid_thresh=thresh)
elif sys.argv[1]=='demo_video':
if os.path.isfile(model_weights_path):
print 'pretrain model:'+model_weights_path+', video:'+filename+', thresh:'+str(thresh)
demo_yolo(model_weights_path, filename, thresh)
else:
print 'syntax error::need specify a pretrained model'
exit()
elif sys.argv[1]=='debug':
debug_yolo( cfg_path, model_weights_path )
|
py
|
1a5cdd7d47ad25327532791e803051573011f674
|
"""Download & cache terraform binaries"""
import logging
import platform
import re
from functools import partial
from os import PathLike, getenv, pathsep
from pathlib import Path
from typing import Union
from miutil.fdio import extractall
from miutil.web import urlopen_cached
__all__ = ["ARCH", "CACHE_DIR", "OS", "VERSION_TF", "terraform"]
log = logging.getLogger(__name__)
CACHE_DIR = "~/.terraform"
VERSION_TF = "1.0.5"
ARCH = "amd64" if "64" in platform.machine() else "386"
match = partial(re.match, string=platform.system(), flags=re.I)
for i in {"darwin", "freebsd", "linux", "openbsd", "windows|cli|cygwin|msys"}:
if match(i):
OS = i.split("|", 1)[0]
break
else:
OS = match("[a-zA-Z]+").group(0).lower()
AnyPath = Union[str, "PathLike[str]", Path]
def terraform(cache: AnyPath = CACHE_DIR, version: str = VERSION_TF) -> Path:
"""
Finds the first terraform binary on the $PATH,
otherwise downloads `version` to `cache`.
"""
base_bin = "terraform" + (".exe" if OS == "windows" else "")
for path in map(Path, getenv("PATH").split(pathsep)):
if (path / base_bin).is_file():
return (path / base_bin).resolve()
cache = Path(cache).expanduser()
bin = cache / base_bin
url = (
f"https://releases.hashicorp.com/terraform"
f"/{version}/terraform_{version}_{OS}_{ARCH}.zip"
)
if not bin.is_file():
log.info("Downloading to %s", cache)
with urlopen_cached(url, cache) as fd:
extractall(fd, cache)
assert bin.is_file()
if OS != "windows":
bin.chmod(0o755)
return bin
|
py
|
1a5cdeb64f0792357f760d477e391a14989a6269
|
class SMLAOptimizer:
pass
|
py
|
1a5cdf096c733b1507e5c33d59ee8b37bf30813f
|
from __future__ import annotations
import click, parse, json
from modules import fileutils
from dataclasses import dataclass
from collections import defaultdict
@dataclass
class Point:
x:int
y:int
def __str__(self):
return f'({self.x}, {self.y})'
def __hash__(self):
return hash((self.x, self.y))
def __eq__(self, other):
return self.x == other.x and self.y == other.y
class Line:
def __init__(self, p1:Point, p2:Point):
self.p1 = p1
self.p2 = p2
self._points = []
self._slope = None
self._intercept = None
@property
def points(self):
if not self._points:
print(self)
if self.is_horizontal:
small,large = min(self.p1.x, self.p2.x), max(self.p1.x, self.p2.x)
self._points = [Point(x, self.p1.y) for x in range(small,large+1)]
elif self.is_vertical:
small,large = min(self.p1.y, self.p2.y), max(self.p1.y, self.p2.y)
self._points = [Point(self.p1.x, y) for y in range(small, large+1)]
else:
small,large = min(self.p1.x, self.p2.x), max(self.p1.x, self.p2.x)
self._points = [Point(x, self.solve(x)) for x in range(small, large+1)]
return self._points
def solve(self,x):
y = (self.slope) * x + self.y_intercept
return int(y)
@property
def slope(self):
if self._slope is None:
if self.is_vertical:
self._slope = float('NaN')
else:
self._slope = (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)
return self._slope
@property
def y_intercept(self):
if self._intercept is None:
self._intercept = self.p1.y - (self.slope * self.p1.x)
return self._intercept
@property
def is_horizontal(self):
return self.p1.y == self.p2.y
@property
def is_vertical(self):
return self.p1.x == self.p2.x
def __str__(self):
return f'{self.p1} -> {self.p2} :: y = {self.slope}x + {self.y_intercept}'
def parse_lines(lst):
pattern = parse.compile('{x1:d},{y1:d} -> {x2:d},{y2:d}')
lines = []
for elem in lst:
info = pattern.parse(elem)
lines.append(Line(Point(x=info['x1'], y=info['y1']), Point(x=info['x2'], y=info['y2'])))
return lines
def part_1(lines):
"""Part 1"""
lines = parse_lines(lines)
grid = defaultdict(lambda: 0)
for line in lines:
for point in line.points:
if line.is_horizontal or line.is_vertical:
grid[point] += 1
danger = sum([1 for v in grid.values() if v>1])
print(f'{danger=}')
def part_2(lines):
"""Part 2"""
lines = parse_lines(lines)
grid = defaultdict(lambda: 0)
for line in lines:
for point in line.points:
grid[point] += 1
danger = sum([1 for v in grid.values() if v>1])
print(f'{danger=}')
@click.command()
@click.option('--test', '-t', is_flag=True, default=False)
@click.argument('part', type=int)
def d5(test, part):
"""Day 5 commands"""
lines = fileutils.load_lines(5, test)
fn = {
1: part_1,
2: part_2,
}.get(part)
fn(lines)
|
py
|
1a5cdf7b1351d5bda8dc124d92d8931c1917fb00
|
# -*- coding: utf-8 -*-
"""
@date: 2021/9/23 下午10:06
@file: general_dataset_v2.py
@author: zj
@description:
"""
import os
import json
from torch.utils.data import Dataset
from .evaluator.general_evaluator import GeneralEvaluator
from .util import default_loader
class GeneralDatasetV2(Dataset):
def __init__(self, root, transform=None, target_transform=None, top_k=(1, 5), keep_rgb=False):
assert os.path.isfile(root)
with open(root, 'r') as f:
data_dict = json.load(f)
self.classes = list(data_dict.keys())
self.total_img_list = list()
self.total_label_list = list()
for key in self.classes:
img_list = data_dict[key]
label = self.classes.index(key)
for img_path in img_list:
assert os.path.isfile(img_path), img_path
self.total_img_list.append(img_path)
self.total_label_list.append(label)
self.root = root
self.transform = transform
self.target_transform = target_transform
self.keep_rgb = keep_rgb
self._update_evaluator(top_k)
def __getitem__(self, index: int):
img_path = self.total_img_list[index]
target = self.total_label_list[index]
image = default_loader(img_path, rgb=self.keep_rgb)
if self.transform is not None:
image = self.transform(image)
if self.target_transform is not None:
target = self.target_transform(target)
return image, target
def __len__(self) -> int:
return len(self.total_img_list)
def _update_evaluator(self, top_k):
self.evaluator = GeneralEvaluator(self.classes, top_k=top_k)
def get_classes(self):
return self.classes
def __repr__(self):
return self.__class__.__name__ + ' (' + self.root + ')'
|
py
|
1a5ce0cf00c7d0ece20580c726a729ec26330a49
|
from django.conf.urls import url
from . import views
app_name = 'user_management_ui'
urlpatterns = [
url(
r'^register/verify/(?P<token>[0-9A-Za-z:\-_]+)/$',
views.VerifyUserEmailView.as_view(),
name='registration-verify',
),
]
|
py
|
1a5ce211dfd59128982bedcc19004c6824bcbbcd
|
import concurrent.futures as futures
import os
import pathlib
import re
from collections import OrderedDict
import numpy as np
from skimage import io
def get_image_index_str(img_idx):
return "{:06d}".format(img_idx)
def get_kitti_info_path(idx,
prefix,
info_type='image_2',
file_tail='.png',
training=True,
relative_path=True):
img_idx_str = get_image_index_str(idx)
img_idx_str += file_tail
prefix = pathlib.Path(prefix)
if training:
file_path = pathlib.Path('training') / info_type / img_idx_str
else:
file_path = pathlib.Path('testing') / info_type / img_idx_str
if not (prefix / file_path).exists():
raise ValueError("file not exist: {}".format(file_path))
if relative_path:
return str(file_path)
else:
return str(prefix / file_path)
def get_image_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'image_2', '.png', training,
relative_path)
def get_label_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'label_2', '.txt', training,
relative_path)
def get_velodyne_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training,
relative_path)
def get_calib_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'calib', '.txt', training,
relative_path)
def _extend_matrix(mat):
mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)
return mat
def get_kitti_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
image_ids=7481,
extend_matrix=True,
num_worker=8,
relative_path=True,
with_imageshape=True):
# image_infos = []
root_path = pathlib.Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
image_info = {'image_idx': idx}
annotations = None
if velodyne:
image_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path)
image_info['img_path'] = get_image_path(idx, path, training,
relative_path)
if with_imageshape:
img_path = image_info['img_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['img_shape'] = np.array(
io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(idx, path, training, relative_path)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array(
[float(info) for info in lines[0].split(' ')[1:13]]).reshape(
[3, 4])
P1 = np.array(
[float(info) for info in lines[1].split(' ')[1:13]]).reshape(
[3, 4])
P2 = np.array(
[float(info) for info in lines[2].split(' ')[1:13]]).reshape(
[3, 4])
P3 = np.array(
[float(info) for info in lines[3].split(' ')[1:13]]).reshape(
[3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
image_info['calib/P0'] = P0
image_info['calib/P1'] = P1
image_info['calib/P2'] = P2
image_info['calib/P3'] = P3
R0_rect = np.array([
float(info) for info in lines[4].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
image_info['calib/R0_rect'] = rect_4x4
Tr_velo_to_cam = np.array([
float(info) for info in lines[5].split(' ')[1:13]
]).reshape([3, 4])
Tr_imu_to_velo = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo)
image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam
image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo
if annotations is not None:
image_info['annos'] = annotations
add_difficulty_to_annos(image_info)
return image_info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
def filter_kitti_anno(image_anno,
used_classes,
used_difficulty=None,
dontcare_iou=None):
if not isinstance(used_classes, (list, tuple)):
used_classes = [used_classes]
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, x in enumerate(image_anno['name']) if x in used_classes
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][relevant_annotation_indices])
if used_difficulty is not None:
relevant_annotation_indices = [
i for i, x in enumerate(img_filtered_annotations['difficulty'])
if x in used_difficulty
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
img_filtered_annotations[key][relevant_annotation_indices])
if 'DontCare' in used_classes and dontcare_iou is not None:
dont_care_indices = [
i for i, x in enumerate(img_filtered_annotations['name'])
if x == 'DontCare'
]
# bounding box format [y_min, x_min, y_max, x_max]
all_boxes = img_filtered_annotations['bbox']
ious = iou(all_boxes, all_boxes[dont_care_indices])
# Remove all bounding boxes that overlap with a dontcare region.
if ious.size > 0:
boxes_to_remove = np.amax(ious, axis=1) > dontcare_iou
for key in image_anno.keys():
img_filtered_annotations[key] = (img_filtered_annotations[key][
np.logical_not(boxes_to_remove)])
return img_filtered_annotations
def filter_annos_low_score(image_annos, thresh):
new_image_annos = []
for anno in image_annos:
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, s in enumerate(anno['score']) if s >= thresh
]
for key in anno.keys():
img_filtered_annotations[key] = (
anno[key][relevant_annotation_indices])
new_image_annos.append(img_filtered_annotations)
return new_image_annos
def kitti_result_line(result_dict, precision=4):
prec_float = "{" + ":.{}f".format(precision) + "}"
res_line = []
all_field_default = OrderedDict([
('name', None),
('truncated', -1),
('occluded', -1),
('alpha', -10),
('bbox', None),
('dimensions', [-1, -1, -1]),
('location', [-1000, -1000, -1000]),
('rotation_y', -10),
('score', None),
])
res_dict = [(key, None) for key, val in all_field_default.items()]
res_dict = OrderedDict(res_dict)
for key, val in result_dict.items():
if all_field_default[key] is None and val is None:
raise ValueError("you must specify a value for {}".format(key))
res_dict[key] = val
for key, val in res_dict.items():
if key == 'name':
res_line.append(val)
elif key in ['truncated', 'alpha', 'rotation_y', 'score']:
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append(prec_float.format(val))
elif key == 'occluded':
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append('{}'.format(val))
elif key in ['bbox', 'dimensions', 'location']:
if val is None:
res_line += [str(v) for v in all_field_default[key]]
else:
res_line += [prec_float.format(v) for v in val]
else:
raise ValueError("unknown key. supported key:{}".format(
res_dict.keys()))
return ' '.join(res_line)
def add_difficulty_to_annos(info):
min_height = [40, 25,
25] # minimum height for evaluated groundtruth/detections
max_occlusion = [
0, 1, 2
] # maximum occlusion level of the groundtruth used for eval_utils
max_trunc = [
0.15, 0.3, 0.5
] # maximum truncation level of the groundtruth used for eval_utils
annos = info['annos']
dims = annos['dimensions'] # lhw format
bbox = annos['bbox']
height = bbox[:, 3] - bbox[:, 1]
occlusion = annos['occluded']
truncation = annos['truncated']
diff = []
easy_mask = np.ones((len(dims), ), dtype=np.bool)
moderate_mask = np.ones((len(dims), ), dtype=np.bool)
hard_mask = np.ones((len(dims), ), dtype=np.bool)
i = 0
for h, o, t in zip(height, occlusion, truncation):
if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:
easy_mask[i] = False
if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:
moderate_mask[i] = False
if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:
hard_mask[i] = False
i += 1
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
for i in range(len(dims)):
if is_easy[i]:
diff.append(0)
elif is_moderate[i]:
diff.append(1)
elif is_hard[i]:
diff.append(2)
else:
diff.append(-1)
annos["difficulty"] = np.array(diff, np.int32)
return diff
def get_label_anno(label_path):
annotations = {}
annotations.update({
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': []
})
with open(label_path, 'r') as f:
lines = f.readlines()
# if len(lines) == 0 or len(lines[0]) < 15:
# content = []
# else:
content = [line.strip().split(' ') for line in lines]
annotations['name'] = np.array([x[0] for x in content])
annotations['truncated'] = np.array([float(x[1]) for x in content])
annotations['occluded'] = np.array([int(x[2]) for x in content])
annotations['alpha'] = np.array([float(x[3]) for x in content])
annotations['bbox'] = np.array(
[[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4)
# dimensions will convert hwl format to standard lhw(camera) format.
annotations['dimensions'] = np.array(
[[float(info) for info in x[8:11]] for x in content]).reshape(
-1, 3)[:, [2, 0, 1]]
annotations['location'] = np.array(
[[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3)
annotations['rotation_y'] = np.array(
[float(x[14]) for x in content]).reshape(-1)
if len(content) != 0 and len(content[0]) == 16: # have score
annotations['score'] = np.array([float(x[15]) for x in content])
else:
annotations['score'] = np.zeros([len(annotations['bbox'])])
return annotations
def get_label_annos(label_folder, image_ids=None):
if image_ids is None:
filepaths = pathlib.Path(label_folder).glob('*.txt')
prog = re.compile(r'^\d{6}.txt$')
filepaths = filter(lambda f: prog.match(f.name), filepaths)
image_ids = [int(p.stem) for p in filepaths]
image_ids = sorted(image_ids)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
annos = []
label_folder = pathlib.Path(label_folder)
for idx in image_ids:
image_idx = get_image_index_str(idx)
label_filename = label_folder / (image_idx + '.txt')
annos.append(get_label_anno(label_filename))
return annos
def area(boxes, add1=False):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
if add1:
return (boxes[:, 2] - boxes[:, 0] + 1.0) * (
boxes[:, 3] - boxes[:, 1] + 1.0)
else:
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2, add1=False):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
if add1:
all_pairs_min_ymax += 1.0
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
if add1:
all_pairs_min_xmax += 1.0
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2, add1=False):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2, add1)
area1 = area(boxes1, add1)
area2 = area(boxes2, add1)
union = np.expand_dims(
area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
|
py
|
1a5ce223e92f8f2a132365e75c5a5c00d8ad5da4
|
import unittest
import numpy as np
import pyroomacoustics as pra
room_dim = [15, 14, 16]
absorption = 0.2
source_position = [2.0, 3.1, 2.0]
mic_position = [2.0, 1.5, 2.0]
fs = 16000
max_order = 5
# scenario A
def get_room_constructor_args():
'''
When provided with sources and microphones, the constructor
should try to compute the RIR immediately
'''
source = pra.SoundSource(position=source_position)
mics = pra.MicrophoneArray(np.array([mic_position]).T, fs)
shoebox = pra.ShoeBox(
room_dim,
absorption=absorption,
fs=fs,
max_order=max_order,
sources=[source],
mics=mics,
)
shoebox.image_source_model()
shoebox.compute_rir()
return shoebox
#scenario B
def get_room_add_method():
shoebox = pra.ShoeBox(room_dim, absorption=absorption, fs=fs, max_order=max_order)
shoebox.add_source(source_position)
mics = pra.MicrophoneArray(np.array([mic_position]).T, fs)
shoebox.add_microphone_array(mics)
shoebox.image_source_model()
shoebox.compute_rir()
return shoebox
class RoomConstructorSources(unittest.TestCase):
def test_room_constructor(self):
room_1 = get_room_constructor_args()
self.assertTrue(isinstance(room_1.sources[0], pra.SoundSource))
def test_room_add_method(self):
room_2 = get_room_add_method()
self.assertTrue(isinstance(room_2.sources[0], pra.SoundSource))
def test_rir_equal(self):
room_1 = get_room_constructor_args()
room_2 = get_room_add_method()
self.assertTrue(np.allclose(room_1.rir[0][0], room_2.rir[0][0]))
if __name__ == '__main__':
unittest.main()
|
py
|
1a5ce229d0485d4ab276f5e5a66bea950c88c474
|
class BaseRequestError(Exception):
def __init__(self, *args, **kwargs):
self.errors = []
self.code = 400
if 'code' in kwargs:
self.code = kwargs['code']
def add_error(self, err):
self.info.append(err)
def set_errors(self, errors):
self.errors = errors
class BadRequestError(BaseRequestError):
"""400 BadRequestError"""
def __init__(self, *args, **kwargs):
super(BadRequestError, self).__init__(*args, **kwargs)
|
py
|
1a5ce38f6126129c0f2212c8f4af2747a4fddfef
|
from .base import extend_model, Model
|
py
|
1a5ce3b242f42af9e2f6863a33042ac09db5f578
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class ipsecprofile(base_resource) :
"""Configuration for IPSEC profile resource."""
def __init__(self) :
self._name = ""
self._ikeversion = ""
self._encalgo = []
self._hashalgo = []
self._lifetime = 0
self._psk = ""
self._publickey = ""
self._privatekey = ""
self._peerpublickey = ""
self._livenesscheckinterval = 0
self._replaywindowsize = 0
self._ikeretryinterval = 0
self._retransmissiontime = 0
self._perfectforwardsecrecy = ""
self._builtin = []
self.___count = 0
@property
def name(self) :
"""The name of the ipsec profile.<br/>Minimum length = 1<br/>Maximum length = 32."""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""The name of the ipsec profile.<br/>Minimum length = 1<br/>Maximum length = 32
:param name:
"""
try :
self._name = name
except Exception as e:
raise e
@property
def ikeversion(self) :
"""IKE Protocol Version.<br/>Possible values = V1, V2."""
try :
return self._ikeversion
except Exception as e:
raise e
@ikeversion.setter
def ikeversion(self, ikeversion) :
"""IKE Protocol Version.<br/>Possible values = V1, V2
:param ikeversion:
"""
try :
self._ikeversion = ikeversion
except Exception as e:
raise e
@property
def encalgo(self) :
"""Type of encryption algorithm.<br/>Possible values = AES, 3DES."""
try :
return self._encalgo
except Exception as e:
raise e
@encalgo.setter
def encalgo(self, encalgo) :
"""Type of encryption algorithm.<br/>Possible values = AES, 3DES
:param encalgo:
"""
try :
self._encalgo = encalgo
except Exception as e:
raise e
@property
def hashalgo(self) :
"""Type of hashing algorithm.<br/>Possible values = HMAC_SHA1, HMAC_SHA256, HMAC_SHA384, HMAC_SHA512, HMAC_MD5."""
try :
return self._hashalgo
except Exception as e:
raise e
@hashalgo.setter
def hashalgo(self, hashalgo) :
"""Type of hashing algorithm.<br/>Possible values = HMAC_SHA1, HMAC_SHA256, HMAC_SHA384, HMAC_SHA512, HMAC_MD5
:param hashalgo:
"""
try :
self._hashalgo = hashalgo
except Exception as e:
raise e
@property
def lifetime(self) :
"""Lifetime of IKE SA in seconds. Lifetime of IPSec SA will be (lifetime of IKE SA/8).<br/>Minimum length = 480<br/>Maximum length = 31536000."""
try :
return self._lifetime
except Exception as e:
raise e
@lifetime.setter
def lifetime(self, lifetime) :
"""Lifetime of IKE SA in seconds. Lifetime of IPSec SA will be (lifetime of IKE SA/8).<br/>Minimum length = 480<br/>Maximum length = 31536000
:param lifetime:
"""
try :
self._lifetime = lifetime
except Exception as e:
raise e
@property
def psk(self) :
"""Pre shared key value."""
try :
return self._psk
except Exception as e:
raise e
@psk.setter
def psk(self, psk) :
"""Pre shared key value.
:param psk:
"""
try :
self._psk = psk
except Exception as e:
raise e
@property
def publickey(self) :
"""Public key file path."""
try :
return self._publickey
except Exception as e:
raise e
@publickey.setter
def publickey(self, publickey) :
"""Public key file path.
:param publickey:
"""
try :
self._publickey = publickey
except Exception as e:
raise e
@property
def privatekey(self) :
"""Private key file path."""
try :
return self._privatekey
except Exception as e:
raise e
@privatekey.setter
def privatekey(self, privatekey) :
"""Private key file path.
:param privatekey:
"""
try :
self._privatekey = privatekey
except Exception as e:
raise e
@property
def peerpublickey(self) :
"""Peer public key file path."""
try :
return self._peerpublickey
except Exception as e:
raise e
@peerpublickey.setter
def peerpublickey(self, peerpublickey) :
"""Peer public key file path.
:param peerpublickey:
"""
try :
self._peerpublickey = peerpublickey
except Exception as e:
raise e
@property
def livenesscheckinterval(self) :
"""Number of seconds after which a notify payload is sent to check the liveliness of the peer. Additional retries are done as per retransmit interval setting. Zero value disables liveliness checks.<br/>Maximum length = 64999."""
try :
return self._livenesscheckinterval
except Exception as e:
raise e
@livenesscheckinterval.setter
def livenesscheckinterval(self, livenesscheckinterval) :
"""Number of seconds after which a notify payload is sent to check the liveliness of the peer. Additional retries are done as per retransmit interval setting. Zero value disables liveliness checks.<br/>Maximum length = 64999
:param livenesscheckinterval:
"""
try :
self._livenesscheckinterval = livenesscheckinterval
except Exception as e:
raise e
@property
def replaywindowsize(self) :
"""IPSec Replay window size for the data traffic.<br/>Maximum length = 16384."""
try :
return self._replaywindowsize
except Exception as e:
raise e
@replaywindowsize.setter
def replaywindowsize(self, replaywindowsize) :
"""IPSec Replay window size for the data traffic.<br/>Maximum length = 16384
:param replaywindowsize:
"""
try :
self._replaywindowsize = replaywindowsize
except Exception as e:
raise e
@property
def ikeretryinterval(self) :
"""IKE retry interval for bringing up the connection.<br/>Minimum length = 60<br/>Maximum length = 3600."""
try :
return self._ikeretryinterval
except Exception as e:
raise e
@ikeretryinterval.setter
def ikeretryinterval(self, ikeretryinterval) :
"""IKE retry interval for bringing up the connection.<br/>Minimum length = 60<br/>Maximum length = 3600
:param ikeretryinterval:
"""
try :
self._ikeretryinterval = ikeretryinterval
except Exception as e:
raise e
@property
def retransmissiontime(self) :
"""The interval in seconds to retry sending the IKE messages to peer, three consecutive attempts are done with doubled interval after every failure.<br/>Minimum length = 1<br/>Maximum length = 99."""
try :
return self._retransmissiontime
except Exception as e:
raise e
@retransmissiontime.setter
def retransmissiontime(self, retransmissiontime) :
"""The interval in seconds to retry sending the IKE messages to peer, three consecutive attempts are done with doubled interval after every failure.<br/>Minimum length = 1<br/>Maximum length = 99
:param retransmissiontime:
"""
try :
self._retransmissiontime = retransmissiontime
except Exception as e:
raise e
@property
def perfectforwardsecrecy(self) :
"""Enable/Disable PFS.<br/>Possible values = ENABLE, DISABLE."""
try :
return self._perfectforwardsecrecy
except Exception as e:
raise e
@perfectforwardsecrecy.setter
def perfectforwardsecrecy(self, perfectforwardsecrecy) :
"""Enable/Disable PFS.<br/>Possible values = ENABLE, DISABLE
:param perfectforwardsecrecy:
"""
try :
self._perfectforwardsecrecy = perfectforwardsecrecy
except Exception as e:
raise e
@property
def builtin(self) :
"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL."""
try :
return self._builtin
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(ipsecprofile_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ipsecprofile
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""Use this API to add ipsecprofile.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
addresource = ipsecprofile()
addresource.name = resource.name
addresource.ikeversion = resource.ikeversion
addresource.encalgo = resource.encalgo
addresource.hashalgo = resource.hashalgo
addresource.lifetime = resource.lifetime
addresource.psk = resource.psk
addresource.publickey = resource.publickey
addresource.privatekey = resource.privatekey
addresource.peerpublickey = resource.peerpublickey
addresource.livenesscheckinterval = resource.livenesscheckinterval
addresource.replaywindowsize = resource.replaywindowsize
addresource.ikeretryinterval = resource.ikeretryinterval
addresource.retransmissiontime = resource.retransmissiontime
addresource.perfectforwardsecrecy = resource.perfectforwardsecrecy
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ ipsecprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].ikeversion = resource[i].ikeversion
addresources[i].encalgo = resource[i].encalgo
addresources[i].hashalgo = resource[i].hashalgo
addresources[i].lifetime = resource[i].lifetime
addresources[i].psk = resource[i].psk
addresources[i].publickey = resource[i].publickey
addresources[i].privatekey = resource[i].privatekey
addresources[i].peerpublickey = resource[i].peerpublickey
addresources[i].livenesscheckinterval = resource[i].livenesscheckinterval
addresources[i].replaywindowsize = resource[i].replaywindowsize
addresources[i].ikeretryinterval = resource[i].ikeretryinterval
addresources[i].retransmissiontime = resource[i].retransmissiontime
addresources[i].perfectforwardsecrecy = resource[i].perfectforwardsecrecy
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""Use this API to delete ipsecprofile.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
deleteresource = ipsecprofile()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ ipsecprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ ipsecprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
"""Use this API to fetch all the ipsecprofile resources that are configured on netscaler.
:param client:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
if not name :
obj = ipsecprofile()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = ipsecprofile()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [ipsecprofile() for _ in range(len(name))]
obj = [ipsecprofile() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = ipsecprofile()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
"""Use this API to fetch filtered set of ipsecprofile resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param client:
:param filter_:
"""
try :
obj = ipsecprofile()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
"""Use this API to count the ipsecprofile resources configured on NetScaler.
:param client:
"""
try :
obj = ipsecprofile()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
"""Use this API to count filtered the set of ipsecprofile resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param client:
:param filter_:
"""
try :
obj = ipsecprofile()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Perfectforwardsecrecy:
""" """
ENABLE = "ENABLE"
DISABLE = "DISABLE"
class Builtin:
""" """
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class Encalgo:
""" """
AES = "AES"
_3DES = "3DES"
class Ikeversion:
""" """
V1 = "V1"
V2 = "V2"
class Hashalgo:
""" """
HMAC_SHA1 = "HMAC_SHA1"
HMAC_SHA256 = "HMAC_SHA256"
HMAC_SHA384 = "HMAC_SHA384"
HMAC_SHA512 = "HMAC_SHA512"
HMAC_MD5 = "HMAC_MD5"
class ipsecprofile_response(base_response) :
""" """
def __init__(self, length=1) :
self.ipsecprofile = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.ipsecprofile = [ipsecprofile() for _ in range(length)]
|
py
|
1a5ce413a2dd566138a822c8aa5225d081dfdc4b
|
from fastapi import Path
# example for /pep/{namespace}
example_namespace = Path(
...,
description="A namespace that holds projects.",
regex=r"^\w+$",
example="demo",
)
example_pep_id = Path(
...,
description="A project name inside a particular namespace",
example="BiocProject"
)
|
py
|
1a5ce46b37b6d95024d9f165ea23621672131bb1
|
import os
import os.path
import sys
from multiprocessing import Pool
import numpy as np
import cv2
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from progress_bar import ProgressBar
def main():
"""A multii-thread tool to crop sub images."""
input_folder = '../dataset/dataset/training_set/train_hdr'
save_folder = '../dataset/dataset/training_set/train_hdr_sub'
n_thread = 20
crop_sz = 480 # crop size
step = 240 # crop stride
thres_sz = 48
compression_level = 0 # 3 is the default value in cv2
# CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer
# compression time. If read raw images during training, use 0 for faster IO speed.
if not os.path.exists(save_folder):
os.makedirs(save_folder)
print('mkdir [{:s}] ...'.format(save_folder))
else:
print('Folder [{:s}] already exists. Exit...'.format(save_folder))
sys.exit(1)
img_list = []
for root, _, file_list in sorted(os.walk(input_folder)):
path = [os.path.join(root, x) for x in file_list] # assume only images in the input_folder
img_list.extend(path)
#for file_name in file_list:
# if os.path.splitext(file_name)[1] == '.png':
# img_list.append(os.path.join(root, file_name))
def update(arg):
pbar.update(arg)
pbar = ProgressBar(len(img_list))
pool = Pool(n_thread)
for path in img_list:
pool.apply_async(worker,
args=(path, save_folder, crop_sz, step, thres_sz, compression_level),
callback=update)
pool.close()
pool.join()
print('All subprocesses done.')
def worker(path, save_folder, crop_sz, step, thres_sz, compression_level):
img_name = os.path.basename(path)
#img_name = '_'.join(path.split('/')[-4:])
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
n_channels = len(img.shape)
if n_channels == 2:
h, w = img.shape
elif n_channels == 3:
h, w, c = img.shape
else:
raise ValueError('Wrong image shape - {}'.format(n_channels))
h_space = np.arange(0, h - crop_sz + 1, step)
if h - (h_space[-1] + crop_sz) > thres_sz:
h_space = np.append(h_space, h - crop_sz)
w_space = np.arange(0, w - crop_sz + 1, step)
if w - (w_space[-1] + crop_sz) > thres_sz:
w_space = np.append(w_space, w - crop_sz)
index = 0
for x in h_space:
for y in w_space:
index += 1
if n_channels == 2:
crop_img = img[x:x + crop_sz, y:y + crop_sz]
else:
crop_img = img[x:x + crop_sz, y:y + crop_sz, :]
crop_img = np.ascontiguousarray(crop_img)
# var = np.var(crop_img / 255)
# if var > 0.008:
# print(img_name, index_str, var)
cv2.imwrite(
os.path.join(save_folder, img_name.replace('.png', '_s{:03d}.png'.format(index))),
crop_img, [cv2.IMWRITE_PNG_COMPRESSION, compression_level])
return 'Processing {:s} ...'.format(img_name)
if __name__ == '__main__':
main()
|
py
|
1a5ce4cd93734e7a6d03d5465d4a289ce5df565b
|
import shutil
import collections
import os
import numpy as np
import torch.nn as nn
import matplotlib.pyplot as plt
from mle_logging import MLELogger
time_tic1 = {"num_steps": 10, "num_epochs": 1}
stats_tic1 = {"train_loss": 0.1234, "test_loss": 0.1235}
time_tic2 = {"num_steps": 20, "num_epochs": 1}
stats_tic2 = {"train_loss": 0.2, "test_loss": 0.1}
time_tic3 = {"num_steps": 30, "num_epochs": 1}
stats_tic3 = {"train_loss": 0.223, "test_loss": 0.097}
time_tic4 = {"num_steps": 40, "num_epochs": 1}
stats_tic4 = {"train_loss": 0.123, "test_loss": 0.085}
class DummyModel(nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
model = DummyModel()
fig, ax = plt.subplots()
ax.plot(np.random.normal(0, 1, 20))
some_dict = {"hi": "there"}
log_config = {
"time_to_track": ["num_steps", "num_epochs"],
"what_to_track": ["train_loss", "test_loss"],
"experiment_dir": "reload_dir/",
"model_type": "torch",
"ckpt_time_to_track": "num_steps",
"save_every_k_ckpt": 2,
"save_top_k_ckpt": 2,
"top_k_metric_name": "test_loss",
"top_k_minimize_metric": True,
}
def test_reload():
"""Test reloading/continuation of previous log with top/every k."""
if os.path.exists(log_config["experiment_dir"]) and os.path.isdir(
log_config["experiment_dir"]
):
shutil.rmtree(log_config["experiment_dir"])
log = MLELogger(**log_config)
log.update(time_tic1, stats_tic1, model, fig, some_dict, save=True)
log.update(time_tic2, stats_tic2, model, fig, some_dict, save=True)
log.update(time_tic3, stats_tic3, model, fig, some_dict, save=True)
# Reload the previously instantiated logger from the directory
relog = MLELogger(**log_config, reload=True)
# Check correctness of checkpoints
assert collections.Counter(relog.model_log.top_k_ckpt_list) == collections.Counter(
[
"reload_dir/models/top_k/top_k_no_seed_provided_top_0.pt",
"reload_dir/models/top_k/top_k_no_seed_provided_top_1.pt",
]
)
assert collections.Counter(
relog.model_log.top_k_storage_time
) == collections.Counter([20, 30])
assert np.allclose(relog.model_log.top_k_performance, [0.097, 0.1])
assert collections.Counter(
relog.model_log.every_k_storage_time
) == collections.Counter([20])
assert collections.Counter(
relog.model_log.every_k_ckpt_list
) == collections.Counter(
["reload_dir/models/every_k/every_k_no_seed_provided_k_2.pt"]
)
# Check correctness of figure paths
assert collections.Counter(
relog.figure_log.fig_storage_paths
) == collections.Counter(
[
"reload_dir/figures/fig_1_no_seed_provided.png",
"reload_dir/figures/fig_2_no_seed_provided.png",
"reload_dir/figures/fig_3_no_seed_provided.png",
]
)
# Check correctness of extra paths
assert collections.Counter(
relog.extra_log.extra_storage_paths
) == collections.Counter(
[
"reload_dir/extra/extra_1_no_seed_provided.pkl",
"reload_dir/extra/extra_2_no_seed_provided.pkl",
"reload_dir/extra/extra_3_no_seed_provided.pkl",
]
)
# Check correctness of reloaded statistics
assert np.allclose(
relog.stats_log.stats_tracked["test_loss"], np.array([0.1235, 0.1, 0.097])
)
assert np.allclose(
relog.stats_log.clock_tracked["num_steps"], np.array([10, 20, 30])
)
# Add new result to log
relog.update(time_tic4, stats_tic4, model, fig, some_dict, save=True)
# Check correctness of figure paths
assert collections.Counter(
relog.figure_log.fig_storage_paths
) == collections.Counter(
[
"reload_dir/figures/fig_1_no_seed_provided.png",
"reload_dir/figures/fig_2_no_seed_provided.png",
"reload_dir/figures/fig_3_no_seed_provided.png",
"reload_dir/figures/fig_4_no_seed_provided.png",
]
)
# Check correctness of extra paths
assert collections.Counter(
relog.extra_log.extra_storage_paths
) == collections.Counter(
[
"reload_dir/extra/extra_1_no_seed_provided.pkl",
"reload_dir/extra/extra_2_no_seed_provided.pkl",
"reload_dir/extra/extra_3_no_seed_provided.pkl",
"reload_dir/extra/extra_4_no_seed_provided.pkl",
]
)
# Check correctness of reloaded statistics
assert np.allclose(
np.array(relog.stats_log.stats_tracked["test_loss"]),
np.array([0.1235, 0.1, 0.097, 0.085]),
)
assert np.allclose(
np.array(relog.stats_log.clock_tracked["num_steps"]),
np.array([10, 20, 30, 40]),
)
# Clean up/delete files
shutil.rmtree(log_config["experiment_dir"])
|
py
|
1a5ce5039992ffa588f2a30e557d61473d2b3ce1
|
from ._dynamodb_connection import DynamoDbConnection as ConnectionClass
from ._version import __version__
|
py
|
1a5ce543201ab40334c2bb91b8e0a36d3b0fe5fe
|
"""
Created on 15.04.2019 by Tatiana Korchuganova
Function to transform data from standard Grafana API response to D3.js compatible
"""
def stacked_hist(series, group_by=None, split_series=None):
plot_data = {}
if not group_by:
return plot_data
if not split_series:
split_series_value = 'all'
split_series_list = []
tags = series[0]['tags'].keys()
if group_by in tags:
for s in series:
split_series_value = s['tags'][split_series] if split_series else split_series_value
if split_series_value not in split_series_list:
split_series_list.append(split_series_value)
if s['tags'][group_by] not in plot_data:
plot_data[s['tags'][group_by]] = {}
if split_series_value not in plot_data[s['tags'][group_by]]:
plot_data[s['tags'][group_by]][split_series_value] = 0
plot_data[s['tags'][group_by]][split_series_value] += s['values'][0][1]
# fill holes by 0 value
for gb, ssd in plot_data.items():
for s in split_series_list:
if s not in ssd.keys():
ssd[s] = 0
return plot_data
def pledges_merging(data, pledges, coeff, pledges_dict, federations_info, type='dst_federation'):
if type == 'dst_federation':
pl_type = 'real_federation'
for fed in data['results'][0]['series']:
# fed['values'][-1][1] = 0
if fed['tags'][type] not in federations_info:
federations_info[fed['tags'][type]] = {}
if fed['tags']['computingsite'] not in federations_info[fed['tags'][type]]:
federations_info[fed['tags'][type]][fed['tags']['computingsite']] = \
{'site': fed['tags']['dst_experiment_site'],
'computingsite': fed['tags']['computingsite'],
'tier': fed['tags']['dst_tier'],
'sum_hs06sec': int(round(float(sum_calculate(fed['values'], 1) / 86400))),
'sum_count': sum_calculate(fed['values'], 2),
'sum_cpuconsumptiontime': int(round(float(sum_calculate(fed['values'], 3) / 86400))),
'sum_walltime': int(round(float(sum_calculate(fed['values'], 4) / 86400)))
}
else:
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['site'] \
= fed['tags']['dst_experiment_site']
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['computingsite'] \
= fed['tags']['computingsite']
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['tier'] \
= fed['tags']['dst_tier']
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['sum_hs06sec'] \
+= int(round(float(sum_calculate(fed['values'], 1) / 86400)))
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['sum_count'] \
= sum_calculate(fed['values'], 2)
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['sum_cpuconsumptiontime'] \
+= int(round(float(sum_calculate(fed['values'], 3) / 86400)))
federations_info[fed['tags'][type]][fed['tags']['computingsite']]['sum_walltime'] \
+= int(round(float(sum_calculate(fed['values'], 4) / 86400)))
if fed['tags'][type] not in pledges_dict:
pledges_dict[fed['tags'][type]] = {}
pledges_dict[fed['tags'][type]]['tier'] = fed['tags']['dst_tier']
pledges_dict[fed['tags'][type]]["hs06sec"] = 0
pledges_dict[fed['tags'][type]]["pledges"] = 0
for value in fed['values']:
pledges_dict[fed['tags'][type]]['hs06sec'] += value[1]
for fed in pledges['results'][0]['series']:
# fed['values'][-1][1] = 0
if fed['tags'][pl_type] not in pledges_dict:
pledges_dict[fed['tags'][pl_type]] = {}
if fed['tags']['tier'] == 'Tier 0':
pledges_dict[fed['tags'][pl_type]]['tier'] = 0
elif fed['tags']['tier'] == 'Tier 1':
pledges_dict[fed['tags'][pl_type]]['tier'] = 1
elif fed['tags']['tier'] == 'Tier 2':
pledges_dict[fed['tags'][pl_type]]['tier'] = 2
elif fed['tags']['tier'] == 'Tier 3':
pledges_dict[fed['tags'][pl_type]]['tier'] = 3
pledges_dict[fed['tags'][pl_type]]["hs06sec"] = 0
pledges_dict[fed['tags'][pl_type]]["pledges"] = 0
for value in fed['values']:
pledges_dict[fed['tags'][pl_type]]['pledges'] += value[1]
return pledges_dict, federations_info
if type == 'dst_country':
pl_type = 'country'
for fed in data['results'][0]['series']:
# fed['values'][-1][1] = 0
if fed['tags'][type] == "United States of America":
fed['tags'][type] = "USA"
if fed['tags'][type] not in pledges_dict:
if fed['tags']['dst_federation'] in ('CH-CERN'):
fed['tags'][type] = 'CERN'
pledges_dict[fed['tags'][type]] = {}
pledges_dict[fed['tags'][type]]["hs06sec"] = 0
pledges_dict[fed['tags'][type]]["pledges"] = 0
for value in fed['values']:
pledges_dict[fed['tags'][type]]['hs06sec'] += value[1]
else:
if fed['tags']['dst_federation'] in ('CH-CERN'):
fed['tags'][type] = 'CERN'
for value in fed['values']:
pledges_dict[fed['tags'][type]]['hs06sec'] += value[1]
for fed in pledges['results'][0]['series']:
# fed['values'][-1][1] = 0
if fed['tags'][pl_type] not in pledges_dict:
# fed['values'][1] = fed['values'][2]
# pledges_dict[fed['tags'][pl_type]]['pledges'] = 0
if fed['tags'][pl_type] == 'Latin America':
fed['tags'][pl_type] = 'Chile'
if fed['tags']['real_federation'] in ('CH-CERN'):
fed['tags'][pl_type] = 'CERN'
if fed['tags'][pl_type] not in pledges_dict:
pledges_dict[fed['tags'][pl_type]] = {}
pledges_dict[fed['tags'][pl_type]]["hs06sec"] = 0
pledges_dict[fed['tags'][pl_type]]["pledges"] = 0
for value in fed['values']:
pledges_dict[fed['tags'][pl_type]]['pledges'] += value[1]
else:
if fed['tags'][pl_type] == 'Latin America':
fed['tags'][pl_type] = 'Chile'
if fed['tags']['real_federation'] in ('CH-CERN'):
fed['tags'][pl_type] = 'CERN'
for value in fed['values']:
pledges_dict[fed['tags'][pl_type]]['pledges'] += value[1]
return pledges_dict
def sum_calculate(data, column_number):
sum_for_column = 0
for value in data:
sum_for_column += value[column_number]
return sum_for_column
|
py
|
1a5ce5d0317084c6402560616e452828e74eb91c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiMemberDataTagQueryModel(object):
def __init__(self):
self._isv_pid = None
self._shop_id = None
self._user_id = None
@property
def isv_pid(self):
return self._isv_pid
@isv_pid.setter
def isv_pid(self, value):
self._isv_pid = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.isv_pid:
if hasattr(self.isv_pid, 'to_alipay_dict'):
params['isv_pid'] = self.isv_pid.to_alipay_dict()
else:
params['isv_pid'] = self.isv_pid
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMemberDataTagQueryModel()
if 'isv_pid' in d:
o.isv_pid = d['isv_pid']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
py
|
1a5ce5da55ffaf4c0cde7ac1567fd841c35a1c7f
|
# Copyright 2021 Injective Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Injective Exchange API client for Python. Example only."""
import asyncio
import logging
from pyinjective.async_client import AsyncClient
from pyinjective.constant import Network
async def main() -> None:
network = Network.testnet()
client = AsyncClient(network, insecure=False)
market_id = "0x4ca0f92fc28be0c9761326016b5a1a2177dd6375558365116b5bdda9abc229ce"
subaccount_id = "0xc6fe5d33615a1c52c08018c47e8bc53646a0e101000000000000000000000000"
trades = await client.get_derivative_trades(
market_id=market_id,
subaccount_id=subaccount_id
)
print(trades)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
asyncio.get_event_loop().run_until_complete(main())
|
py
|
1a5ce600e560acd257f18ec2f2a88e43d12f3f04
|
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t; python-indent-offset: 2 -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
# ==============================================================================
# Authors: Patrick Lehmann
#
# Python functions: A streaming VHDL parser
#
# Description:
# ------------------------------------
# TODO:
#
# License:
# ==============================================================================
# Copyright 2017-2020 Patrick Lehmann - Boetzingen, Germany
# Copyright 2016-2017 Patrick Lehmann - Dresden, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# load dependencies
import pyVHDLParser.Blocks.InterfaceObject
from pyVHDLParser.Blocks.Common import LinebreakBlock, EmptyLineBlock, WhitespaceBlock, IndentationBlock
from pyVHDLParser.Blocks.Comment import SingleLineCommentBlock, MultiLineCommentBlock
from pyVHDLParser.Blocks import StartOfDocumentBlock, EndOfDocumentBlock
from pyVHDLParser.Blocks.Structural import Entity
from pyVHDLParser.Blocks.List import PortList
from test.TestCase import TestCase as TestCaseBase
from test.Counter import Counter
class TestCase(TestCaseBase):
__NAME__ = "Port lists"
__FILENAME__ = "PortList.vhdl"
def __init__(self):
pass
@classmethod
def GetExpectedBlocks(cls):
counter = cls.GetExpectedBlocksAfterStrip()
counter.AddType(EmptyLineBlock, 8)
counter.AddType(LinebreakBlock, 37)
counter.AddType(IndentationBlock, 18)
counter.AddType(WhitespaceBlock, 3)
return counter
@classmethod
def GetExpectedBlocksAfterStrip(cls):
counter = Counter()
counter.AddType(StartOfDocumentBlock, 1)
counter.AddType(Entity.NameBlock, 9)
counter.AddType(PortList.OpenBlock, 9)
counter.AddType(pyVHDLParser.Blocks.InterfaceObject.InterfaceSignalBlock, 13)
counter.AddType(PortList.DelimiterBlock, 4)
counter.AddType(PortList.CloseBlock, 9)
counter.AddType(Entity.EndBlock, 9)
counter.AddType(EndOfDocumentBlock, 1)
return counter
|
py
|
1a5ce81b88d3ca6ae84235c51c5101de32626c78
|
import gym
def get_env_monitor(env):
"""
Args:
env: gym.Env. The wrapped environment.
Returns:
the `gym.wrappers.Monitor` around env
Raises:
`ValueError` if env is not wrapper by Monitor
"""
currentenv = env
while True:
if "Monitor" in currentenv.__class__.__name__:
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
raise ValueError("Couldn't find wrapper named Monitor")
|
py
|
1a5ce84e81ace4e629fde13dbdaba7867f560ec1
|
'''
Let’s say I give you a list saved in a variable: a = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100].
Write one line of Python that takes this list a and makes a new list that has only the even elements of this list in it.
'''
def main():
a = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
b = [num for num in a if num % 2 == 0]
print(b)
if __name__ == '__main__' :
main()
|
py
|
1a5ce8795c024ff64b51122dc3ff88453be407a6
|
# salariedemployee.py
"""SalariedEmployee concrete subclass of Employee."""
from decimal import Decimal
from employee import Employee
class SalariedEmployee(Employee):
"""Class representing an employee who gets paid a weekly salary."""
def __init__(self, first_name, last_name, ssn, weekly_salary):
"""Initialize SalariedEmployee attributes."""
super().__init__(first_name, last_name, ssn)
self.weekly_salary = weekly_salary
@property
def weekly_salary(self):
return self._weekly_salary
@weekly_salary.setter
def weekly_salary(self, salary):
"""Set weekly_salary or raise ValueError if invalid."""
if salary < Decimal('0.0'):
raise ValueError('salary worked must be >= 0.0')
self._weekly_salary = salary
def earnings(self):
"""Calculate earnings."""
return self.weekly_salary
def __repr__(self):
"""Return string representation for repr()."""
return ('SalariedEmployee: ' + super().__repr__() +
f'\nweekly salary: {self.weekly_salary:.2f}')
##########################################################################
# (C) Copyright 2019 by Deitel & Associates, Inc. and #
# Pearson Education, Inc. All Rights Reserved. #
# #
# DISCLAIMER: The authors and publisher of this book have used their #
# best efforts in preparing the book. These efforts include the #
# development, research, and testing of the theories and programs #
# to determine their effectiveness. The authors and publisher make #
# no warranty of any kind, expressed or implied, with regard to these #
# programs or to the documentation contained in these books. The authors #
# and publisher shall not be liable in any event for incidental or #
# consequential damages in connection with, or arising out of, the #
# furnishing, performance, or use of these programs. #
##########################################################################
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.