repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
andrewjrobinson/FreeCAD_sf_master | src/Mod/Draft/DraftTrackers.py | 1 | 34739 | #***************************************************************************
#* *
#* Copyright (c) 2011 *
#* Yorik van Havre <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="FreeCAD Draft Trackers"
__author__ = "Yorik van Havre"
__url__ = "http://www.freecadweb.org"
import FreeCAD,FreeCADGui,math,Draft, DraftVecUtils
from FreeCAD import Vector
from pivy import coin
class Tracker:
"A generic Draft Tracker, to be used by other specific trackers"
def __init__(self,dotted=False,scolor=None,swidth=None,children=[],ontop=False):
global Part, DraftGeomUtils
import Part, DraftGeomUtils
self.ontop = ontop
color = coin.SoBaseColor()
color.rgb = scolor or FreeCADGui.draftToolBar.getDefaultColor("ui")
drawstyle = coin.SoDrawStyle()
if swidth:
drawstyle.lineWidth = swidth
if dotted:
drawstyle.style = coin.SoDrawStyle.LINES
drawstyle.lineWeight = 3
drawstyle.linePattern = 0x0f0f #0xaa
node = coin.SoSeparator()
for c in [drawstyle, color] + children:
node.addChild(c)
self.switch = coin.SoSwitch() # this is the on/off switch
self.switch.addChild(node)
self.switch.whichChild = -1
self.Visible = False
from DraftGui import todo
todo.delay(self._insertSwitch, self.switch)
def finalize(self):
from DraftGui import todo
todo.delay(self._removeSwitch, self.switch)
self.switch = None
def _insertSwitch(self, switch):
'''insert self.switch into the scene graph. Must not be called
from an event handler (or other scene graph traversal).'''
sg=Draft.get3DView().getSceneGraph()
if self.ontop:
sg.insertChild(switch,0)
else:
sg.addChild(switch)
def _removeSwitch(self, switch):
'''remove self.switch from the scene graph. As with _insertSwitch,
must not be called during scene graph traversal).'''
sg=Draft.get3DView().getSceneGraph()
sg.removeChild(switch)
def on(self):
self.switch.whichChild = 0
self.Visible = True
def off(self):
self.switch.whichChild = -1
self.Visible = False
def lowerTracker(self):
'''lowers the tracker to the bottom of the scenegraph, so
it doesn't obscure the other objects'''
if self.switch:
sg=Draft.get3DView().getSceneGraph()
sg.removeChild(self.switch)
sg.addChild(self.switch)
def raiseTracker(self):
'''raises the tracker to the top of the scenegraph, so
it obscures the other objects'''
if self.switch:
sg=Draft.get3DView().getSceneGraph()
sg.removeChild(self.switch)
sg.insertChild(self.switch,0)
class snapTracker(Tracker):
"A Snap Mark tracker, used by tools that support snapping"
def __init__(self):
color = coin.SoBaseColor()
color.rgb = FreeCADGui.draftToolBar.getDefaultColor("snap")
self.marker = coin.SoMarkerSet() # this is the marker symbol
self.marker.markerIndex = coin.SoMarkerSet.CIRCLE_FILLED_9_9
self.coords = coin.SoCoordinate3() # this is the coordinate
self.coords.point.setValue((0,0,0))
node = coin.SoAnnotation()
node.addChild(self.coords)
node.addChild(color)
node.addChild(self.marker)
Tracker.__init__(self,children=[node])
def setMarker(self,style):
if (style == "square"):
self.marker.markerIndex = coin.SoMarkerSet.DIAMOND_FILLED_9_9
elif (style == "circle"):
self.marker.markerIndex = coin.SoMarkerSet.CIRCLE_LINE_9_9
elif (style == "quad"):
self.marker.markerIndex = coin.SoMarkerSet.SQUARE_FILLED_9_9
elif (style == "empty"):
self.marker.markerIndex = coin.SoMarkerSet.SQUARE_LINE_9_9
else:
self.marker.markerIndex = coin.SoMarkerSet.CIRCLE_FILLED_9_9
def setCoords(self,point):
self.coords.point.setValue((point.x,point.y,point.z))
class lineTracker(Tracker):
"A Line tracker, used by the tools that need to draw temporary lines"
def __init__(self,dotted=False,scolor=None,swidth=None):
line = coin.SoLineSet()
line.numVertices.setValue(2)
self.coords = coin.SoCoordinate3() # this is the coordinate
self.coords.point.setValues(0,2,[[0,0,0],[1,0,0]])
Tracker.__init__(self,dotted,scolor,swidth,[self.coords,line])
def p1(self,point=None):
"sets or gets the first point of the line"
if point:
self.coords.point.set1Value(0,point.x,point.y,point.z)
else:
return Vector(self.coords.point.getValues()[0].getValue())
def p2(self,point=None):
"sets or gets the second point of the line"
if point:
self.coords.point.set1Value(1,point.x,point.y,point.z)
else:
return Vector(self.coords.point.getValues()[-1].getValue())
def getLength(self):
"returns the length of the line"
p1 = Vector(self.coords.point.getValues()[0].getValue())
p2 = Vector(self.coords.point.getValues()[-1].getValue())
return (p2.sub(p1)).Length
class rectangleTracker(Tracker):
"A Rectangle tracker, used by the rectangle tool"
def __init__(self,dotted=False,scolor=None,swidth=None,face=False):
self.origin = Vector(0,0,0)
line = coin.SoLineSet()
line.numVertices.setValue(5)
self.coords = coin.SoCoordinate3() # this is the coordinate
self.coords.point.setValues(0,50,[[0,0,0],[2,0,0],[2,2,0],[0,2,0],[0,0,0]])
if face:
m1 = coin.SoMaterial()
m1.transparency.setValue(0.5)
m1.diffuseColor.setValue([0.5,0.5,1.0])
f = coin.SoIndexedFaceSet()
f.coordIndex.setValues([0,1,2,3])
Tracker.__init__(self,dotted,scolor,swidth,[self.coords,line,m1,f])
else:
Tracker.__init__(self,dotted,scolor,swidth,[self.coords,line])
self.u = FreeCAD.DraftWorkingPlane.u
self.v = FreeCAD.DraftWorkingPlane.v
def setorigin(self,point):
"sets the base point of the rectangle"
self.coords.point.set1Value(0,point.x,point.y,point.z)
self.coords.point.set1Value(4,point.x,point.y,point.z)
self.origin = point
def update(self,point):
"sets the opposite (diagonal) point of the rectangle"
diagonal = point.sub(self.origin)
inpoint1 = self.origin.add(DraftVecUtils.project(diagonal,self.v))
inpoint2 = self.origin.add(DraftVecUtils.project(diagonal,self.u))
self.coords.point.set1Value(1,inpoint1.x,inpoint1.y,inpoint1.z)
self.coords.point.set1Value(2,point.x,point.y,point.z)
self.coords.point.set1Value(3,inpoint2.x,inpoint2.y,inpoint2.z)
def setPlane(self,u,v=None):
'''sets given (u,v) vectors as working plane. You can give only u
and v will be deduced automatically given current workplane'''
self.u = u
if v:
self.v = v
else:
norm = FreeCAD.DraftWorkingPlane.u.cross(FreeCAD.DraftWorkingPlane.v)
self.v = self.u.cross(norm)
def p1(self,point=None):
"sets or gets the base point of the rectangle"
if point:
self.setorigin(point)
else:
return Vector(self.coords.point.getValues()[0].getValue())
def p2(self):
"gets the second point (on u axis) of the rectangle"
return Vector(self.coords.point.getValues()[3].getValue())
def p3(self,point=None):
"sets or gets the opposite (diagonal) point of the rectangle"
if point:
self.update(point)
else:
return Vector(self.coords.point.getValues()[2].getValue())
def p4(self):
"gets the fourth point (on v axis) of the rectangle"
return Vector(self.coords.point.getValues()[1].getValue())
def getSize(self):
"returns (length,width) of the rectangle"
p1 = Vector(self.coords.point.getValues()[0].getValue())
p2 = Vector(self.coords.point.getValues()[2].getValue())
diag = p2.sub(p1)
return ((DraftVecUtils.project(diag,self.u)).Length,(DraftVecUtils.project(diag,self.v)).Length)
def getNormal(self):
"returns the normal of the rectangle"
return (self.u.cross(self.v)).normalize()
class dimTracker(Tracker):
"A Dimension tracker, used by the dimension tool"
def __init__(self,dotted=False,scolor=None,swidth=None):
line = coin.SoLineSet()
line.numVertices.setValue(4)
self.coords = coin.SoCoordinate3() # this is the coordinate
self.coords.point.setValues(0,4,[[0,0,0],[0,0,0],[0,0,0],[0,0,0]])
Tracker.__init__(self,dotted,scolor,swidth,[self.coords,line])
self.p1 = self.p2 = self.p3 = None
def update(self,pts):
if not pts:
return
elif len(pts) == 1:
self.p3 = pts[0]
else:
self.p1 = pts[0]
self.p2 = pts[1]
if len(pts) > 2:
self.p3 = pts[2]
self.calc()
def calc(self):
import Part
if (self.p1 != None) and (self.p2 != None):
points = [DraftVecUtils.tup(self.p1,True),DraftVecUtils.tup(self.p2,True),\
DraftVecUtils.tup(self.p1,True),DraftVecUtils.tup(self.p2,True)]
if self.p3 != None:
p1 = self.p1
p4 = self.p2
if DraftVecUtils.equals(p1,p4):
proj = None
else:
base = Part.Line(p1,p4).toShape()
proj = DraftGeomUtils.findDistance(self.p3,base)
if not proj:
p2 = p1
p3 = p4
else:
p2 = p1.add(proj.negative())
p3 = p4.add(proj.negative())
points = [DraftVecUtils.tup(p1),DraftVecUtils.tup(p2),DraftVecUtils.tup(p3),DraftVecUtils.tup(p4)]
self.coords.point.setValues(0,4,points)
class bsplineTracker(Tracker):
"A bspline tracker"
def __init__(self,dotted=False,scolor=None,swidth=None,points = []):
self.bspline = None
self.points = points
self.trans = coin.SoTransform()
self.sep = coin.SoSeparator()
self.recompute()
Tracker.__init__(self,dotted,scolor,swidth,[self.trans,self.sep])
def update(self, points):
self.points = points
self.recompute()
def recompute(self):
if (len(self.points) >= 2):
if self.bspline: self.sep.removeChild(self.bspline)
self.bspline = None
c = Part.BSplineCurve()
# DNC: allows to close the curve by placing ends close to each other
if ( len(self.points) >= 3 ) and ( (self.points[0] - self.points[-1]).Length < Draft.tolerance() ):
# YVH: Added a try to bypass some hazardous situations
try:
c.interpolate(self.points[:-1], True)
except:
pass
elif self.points:
try:
c.interpolate(self.points, False)
except:
pass
c = c.toShape()
buf=c.writeInventor(2,0.01)
#fp=open("spline.iv","w")
#fp.write(buf)
#fp.close()
try:
ivin = coin.SoInput()
ivin.setBuffer(buf)
ivob = coin.SoDB.readAll(ivin)
except:
# workaround for pivy SoInput.setBuffer() bug
import re
buf = buf.replace("\n","")
pts = re.findall("point \[(.*?)\]",buf)[0]
pts = pts.split(",")
pc = []
for p in pts:
v = p.strip().split()
pc.append([float(v[0]),float(v[1]),float(v[2])])
coords = coin.SoCoordinate3()
coords.point.setValues(0,len(pc),pc)
line = coin.SoLineSet()
line.numVertices.setValue(-1)
self.bspline = coin.SoSeparator()
self.bspline.addChild(coords)
self.bspline.addChild(line)
self.sep.addChild(self.bspline)
else:
if ivob and ivob.getNumChildren() > 1:
self.bspline = ivob.getChild(1).getChild(0)
self.bspline.removeChild(self.bspline.getChild(0))
self.bspline.removeChild(self.bspline.getChild(0))
self.sep.addChild(self.bspline)
else:
FreeCAD.Console.PrintWarning("bsplineTracker.recompute() failed to read-in Inventor string\n")
class arcTracker(Tracker):
"An arc tracker"
def __init__(self,dotted=False,scolor=None,swidth=None,start=0,end=math.pi*2):
self.circle = None
self.startangle = math.degrees(start)
self.endangle = math.degrees(end)
self.trans = coin.SoTransform()
self.trans.translation.setValue([0,0,0])
self.sep = coin.SoSeparator()
self.recompute()
Tracker.__init__(self,dotted,scolor,swidth,[self.trans, self.sep])
def setCenter(self,cen):
"sets the center point"
self.trans.translation.setValue([cen.x,cen.y,cen.z])
def setRadius(self,rad):
"sets the radius"
self.trans.scaleFactor.setValue([rad,rad,rad])
def getRadius(self):
"returns the current radius"
return self.trans.scaleFactor.getValue()[0]
def setStartAngle(self,ang):
"sets the start angle"
self.startangle = math.degrees(ang)
self.recompute()
def setEndAngle(self,ang):
"sets the end angle"
self.endangle = math.degrees(ang)
self.recompute()
def getAngle(self,pt):
"returns the angle of a given vector"
c = self.trans.translation.getValue()
center = Vector(c[0],c[1],c[2])
base = FreeCAD.DraftWorkingPlane.u
rad = pt.sub(center)
return(DraftVecUtils.angle(rad,base,FreeCAD.DraftWorkingPlane.axis))
def getAngles(self):
"returns the start and end angles"
return(self.startangle,self.endangle)
def setStartPoint(self,pt):
"sets the start angle from a point"
self.setStartAngle(-self.getAngle(pt))
def setEndPoint(self,pt):
"sets the end angle from a point"
self.setEndAngle(self.getAngle(pt))
def setApertureAngle(self,ang):
"sets the end angle by giving the aperture angle"
ap = math.degrees(ang)
self.endangle = self.startangle + ap
self.recompute()
def recompute(self):
import Part,re
if self.circle: self.sep.removeChild(self.circle)
self.circle = None
if self.endangle < self.startangle:
c = Part.makeCircle(1,Vector(0,0,0),FreeCAD.DraftWorkingPlane.axis,self.endangle,self.startangle)
else:
c = Part.makeCircle(1,Vector(0,0,0),FreeCAD.DraftWorkingPlane.axis,self.startangle,self.endangle)
buf=c.writeInventor(2,0.01)
try:
ivin = coin.SoInput()
ivin.setBuffer(buf)
ivob = coin.SoDB.readAll(ivin)
except:
# workaround for pivy SoInput.setBuffer() bug
buf = buf.replace("\n","")
pts = re.findall("point \[(.*?)\]",buf)[0]
pts = pts.split(",")
pc = []
for p in pts:
v = p.strip().split()
pc.append([float(v[0]),float(v[1]),float(v[2])])
coords = coin.SoCoordinate3()
coords.point.setValues(0,len(pc),pc)
line = coin.SoLineSet()
line.numVertices.setValue(-1)
self.circle = coin.SoSeparator()
self.circle.addChild(coords)
self.circle.addChild(line)
self.sep.addChild(self.circle)
else:
if ivob and ivob.getNumChildren() > 1:
self.circle = ivob.getChild(1).getChild(0)
self.circle.removeChild(self.circle.getChild(0))
self.circle.removeChild(self.circle.getChild(0))
self.sep.addChild(self.circle)
else:
FreeCAD.Console.PrintWarning("arcTracker.recompute() failed to read-in Inventor string\n")
class ghostTracker(Tracker):
'''A Ghost tracker, that allows to copy whole object representations.
You can pass it an object or a list of objects, or a shape.'''
def __init__(self,sel):
self.trans = coin.SoTransform()
self.trans.translation.setValue([0,0,0])
self.children = [self.trans]
rootsep = coin.SoSeparator()
if not isinstance(sel,list):
sel = [sel]
for obj in sel:
rootsep.addChild(self.getNode(obj))
self.children.append(rootsep)
Tracker.__init__(self,children=self.children)
def update(self,obj):
"recreates the ghost from a new object"
obj.ViewObject.show()
self.finalize()
sep = getNode(obj)
Tracker.__init__(self,children=[self.sep])
self.on()
obj.ViewObject.hide()
def move(self,delta):
"moves the ghost to a given position, relative from its start position"
self.trans.translation.setValue([delta.x,delta.y,delta.z])
def rotate(self,axis,angle):
"rotates the ghost of a given angle"
self.trans.rotation.setValue(coin.SbVec3f(DraftVecUtils.tup(axis)),angle)
def center(self,point):
"sets the rotation/scale center of the ghost"
self.trans.center.setValue(point.x,point.y,point.z)
def scale(self,delta):
"scales the ghost by the given factor"
self.trans.scaleFactor.setValue([delta.x,delta.y,delta.z])
def getNode(self,obj):
"returns a coin node representing the given object"
if isinstance(obj,Part.Shape):
return self.getNodeLight(obj)
elif obj.isDerivedFrom("Part::Feature"):
return self.getNodeFull(obj)
else:
return self.getNodeFull(obj)
def getNode(self,obj):
"gets a coin node which is a full copy of the current representation"
sep = coin.SoSeparator()
try:
sep.addChild(obj.ViewObject.RootNode.copy())
except:
pass
return sep
def getNodeLight(self,shape):
"extract a lighter version directly from a shape"
# very error-prone, will be obsoleted ASAP
sep = coin.SoSeparator()
try:
inputstr = coin.SoInput()
inputstr.setBuffer(shape.writeInventor())
coinobj = coin.SoDB.readAll(inputstr)
# only add wireframe or full node?
sep.addChild(coinobj.getChildren()[1])
# sep.addChild(coinobj)
except:
print "Error retrieving coin node"
return sep
class editTracker(Tracker):
"A node edit tracker"
def __init__(self,pos=Vector(0,0,0),name="None",idx=0,objcol=None):
color = coin.SoBaseColor()
if objcol:
color.rgb = objcol[:3]
else:
color.rgb = FreeCADGui.draftToolBar.getDefaultColor("snap")
self.marker = coin.SoMarkerSet() # this is the marker symbol
self.marker.markerIndex = coin.SoMarkerSet.SQUARE_FILLED_9_9
self.coords = coin.SoCoordinate3() # this is the coordinate
self.coords.point.setValue((pos.x,pos.y,pos.z))
selnode = coin.SoType.fromName("SoFCSelection").createInstance()
selnode.documentName.setValue(FreeCAD.ActiveDocument.Name)
selnode.objectName.setValue(name)
selnode.subElementName.setValue("EditNode"+str(idx))
node = coin.SoAnnotation()
selnode.addChild(self.coords)
selnode.addChild(color)
selnode.addChild(self.marker)
node.addChild(selnode)
Tracker.__init__(self,children=[node],ontop=True)
self.on()
def set(self,pos):
self.coords.point.setValue((pos.x,pos.y,pos.z))
def get(self):
p = self.coords.point.getValues()[0]
return Vector(p[0],p[1],p[2])
def move(self,delta):
self.set(self.get().add(delta))
class PlaneTracker(Tracker):
"A working plane tracker"
def __init__(self):
# getting screen distance
p1 = Draft.get3DView().getPoint((100,100))
p2 = Draft.get3DView().getPoint((110,100))
bl = (p2.sub(p1)).Length * (Draft.getParam("snapRange",5)/2)
pick = coin.SoPickStyle()
pick.style.setValue(coin.SoPickStyle.UNPICKABLE)
self.trans = coin.SoTransform()
self.trans.translation.setValue([0,0,0])
m1 = coin.SoMaterial()
m1.transparency.setValue(0.8)
m1.diffuseColor.setValue([0.4,0.4,0.6])
c1 = coin.SoCoordinate3()
c1.point.setValues([[-bl,-bl,0],[bl,-bl,0],[bl,bl,0],[-bl,bl,0]])
f = coin.SoIndexedFaceSet()
f.coordIndex.setValues([0,1,2,3])
m2 = coin.SoMaterial()
m2.transparency.setValue(0.7)
m2.diffuseColor.setValue([0.2,0.2,0.3])
c2 = coin.SoCoordinate3()
c2.point.setValues([[0,bl,0],[0,0,0],[bl,0,0],[-.05*bl,.95*bl,0],[0,bl,0],
[.05*bl,.95*bl,0],[.95*bl,.05*bl,0],[bl,0,0],[.95*bl,-.05*bl,0]])
l = coin.SoLineSet()
l.numVertices.setValues([3,3,3])
s = coin.SoSeparator()
s.addChild(pick)
s.addChild(self.trans)
s.addChild(m1)
s.addChild(c1)
s.addChild(f)
s.addChild(m2)
s.addChild(c2)
s.addChild(l)
Tracker.__init__(self,children=[s])
def set(self,pos=None):
if pos:
Q = FreeCAD.DraftWorkingPlane.getRotation().Rotation.Q
else:
plm = FreeCAD.DraftWorkingPlane.getPlacement()
Q = plm.Rotation.Q
pos = plm.Base
self.trans.translation.setValue([pos.x,pos.y,pos.z])
self.trans.rotation.setValue([Q[0],Q[1],Q[2],Q[3]])
self.on()
class wireTracker(Tracker):
"A wire tracker"
def __init__(self,wire):
self.line = coin.SoLineSet()
self.closed = DraftGeomUtils.isReallyClosed(wire)
if self.closed:
self.line.numVertices.setValue(len(wire.Vertexes)+1)
else:
self.line.numVertices.setValue(len(wire.Vertexes))
self.coords = coin.SoCoordinate3()
self.update(wire)
Tracker.__init__(self,children=[self.coords,self.line])
def update(self,wire,forceclosed=False):
if wire:
if self.closed or forceclosed:
self.line.numVertices.setValue(len(wire.Vertexes)+1)
else:
self.line.numVertices.setValue(len(wire.Vertexes))
for i in range(len(wire.Vertexes)):
p=wire.Vertexes[i].Point
self.coords.point.set1Value(i,[p.x,p.y,p.z])
if self.closed or forceclosed:
t = len(wire.Vertexes)
p = wire.Vertexes[0].Point
self.coords.point.set1Value(t,[p.x,p.y,p.z])
class gridTracker(Tracker):
"A grid tracker"
def __init__(self):
# self.space = 1
self.space = Draft.getParam("gridSpacing",1)
# self.mainlines = 10
self.mainlines = Draft.getParam("gridEvery",10)
self.numlines = 100
col = [0.2,0.2,0.3]
pick = coin.SoPickStyle()
pick.style.setValue(coin.SoPickStyle.UNPICKABLE)
self.trans = coin.SoTransform()
self.trans.translation.setValue([0,0,0])
bound = (self.numlines/2)*self.space
pts = []
mpts = []
apts = []
for i in range(self.numlines+1):
curr = -bound + i*self.space
z = 0
if i/float(self.mainlines) == i/self.mainlines:
if round(curr,4) == 0:
apts.extend([[-bound,curr,z],[bound,curr,z]])
apts.extend([[curr,-bound,z],[curr,bound,z]])
else:
mpts.extend([[-bound,curr,z],[bound,curr,z]])
mpts.extend([[curr,-bound,z],[curr,bound,z]])
else:
pts.extend([[-bound,curr,z],[bound,curr,z]])
pts.extend([[curr,-bound,z],[curr,bound,z]])
idx = []
midx = []
aidx = []
for p in range(0,len(pts),2):
idx.append(2)
for mp in range(0,len(mpts),2):
midx.append(2)
for ap in range(0,len(apts),2):
aidx.append(2)
mat1 = coin.SoMaterial()
mat1.transparency.setValue(0.7)
mat1.diffuseColor.setValue(col)
self.coords1 = coin.SoCoordinate3()
self.coords1.point.setValues(pts)
lines1 = coin.SoLineSet()
lines1.numVertices.setValues(idx)
mat2 = coin.SoMaterial()
mat2.transparency.setValue(0.3)
mat2.diffuseColor.setValue(col)
self.coords2 = coin.SoCoordinate3()
self.coords2.point.setValues(mpts)
lines2 = coin.SoLineSet()
lines2.numVertices.setValues(midx)
mat3 = coin.SoMaterial()
mat3.transparency.setValue(0)
mat3.diffuseColor.setValue(col)
self.coords3 = coin.SoCoordinate3()
self.coords3.point.setValues(apts)
lines3 = coin.SoLineSet()
lines3.numVertices.setValues(aidx)
s = coin.SoSeparator()
s.addChild(pick)
s.addChild(self.trans)
s.addChild(mat1)
s.addChild(self.coords1)
s.addChild(lines1)
s.addChild(mat2)
s.addChild(self.coords2)
s.addChild(lines2)
s.addChild(mat3)
s.addChild(self.coords3)
s.addChild(lines3)
Tracker.__init__(self,children=[s])
self.update()
def update(self):
bound = (self.numlines/2)*self.space
pts = []
mpts = []
for i in range(self.numlines+1):
curr = -bound + i*self.space
if i/float(self.mainlines) == i/self.mainlines:
mpts.extend([[-bound,curr,0],[bound,curr,0]])
mpts.extend([[curr,-bound,0],[curr,bound,0]])
else:
pts.extend([[-bound,curr,0],[bound,curr,0]])
pts.extend([[curr,-bound,0],[curr,bound,0]])
self.coords1.point.setValues(pts)
self.coords2.point.setValues(mpts)
def setSpacing(self,space):
self.space = space
self.update()
def setMainlines(self,ml):
self.mainlines = ml
self.update()
def set(self):
Q = FreeCAD.DraftWorkingPlane.getRotation().Rotation.Q
P = FreeCAD.DraftWorkingPlane.position
self.trans.rotation.setValue([Q[0],Q[1],Q[2],Q[3]])
self.trans.translation.setValue([P.x,P.y,P.z])
self.on()
def getClosestNode(self,point):
"returns the closest node from the given point"
# get the 2D coords.
# point = FreeCAD.DraftWorkingPlane.projectPoint(point)
pt = FreeCAD.DraftWorkingPlane.getLocalCoords(point)
pu = (round(pt.x/self.space,0))*self.space
pv = (round(pt.y/self.space,0))*self.space
pt = FreeCAD.DraftWorkingPlane.getGlobalCoords(Vector(pu,pv,0))
return pt
class boxTracker(Tracker):
"A box tracker, can be based on a line object"
def __init__(self,line=None,width=0.1,height=1):
self.trans = coin.SoTransform()
m = coin.SoMaterial()
m.transparency.setValue(0.8)
m.diffuseColor.setValue([0.4,0.4,0.6])
self.cube = coin.SoCube()
self.cube.height.setValue(width)
self.cube.depth.setValue(height)
self.baseline = None
if line:
self.baseline = line
self.update()
Tracker.__init__(self,children=[self.trans,m,self.cube])
def update(self,line=None,normal=None):
import WorkingPlane, DraftGeomUtils
if not normal:
normal = FreeCAD.DraftWorkingPlane.axis
if line:
if isinstance(line,list):
bp = line[0]
lvec = line[1].sub(line[0])
else:
lvec = DraftGeomUtils.vec(line.Shape.Edges[0])
bp = line.Shape.Edges[0].Vertexes[0].Point
elif self.baseline:
lvec = DraftGeomUtils.vec(self.baseline.Shape.Edges[0])
bp = self.baseline.Shape.Edges[0].Vertexes[0].Point
else:
return
right = lvec.cross(normal)
self.cube.width.setValue(lvec.Length)
p = WorkingPlane.getPlacementFromPoints([bp,bp.add(lvec),bp.add(right)])
if p:
self.trans.rotation.setValue(p.Rotation.Q)
bp = bp.add(lvec.multiply(0.5))
bp = bp.add(DraftVecUtils.scaleTo(normal,self.cube.depth.getValue()/2))
self.pos(bp)
def setRotation(self,rot):
self.trans.rotation.setValue(rot.Q)
def pos(self,p):
self.trans.translation.setValue(DraftVecUtils.tup(p))
def width(self,w=None):
if w:
self.cube.height.setValue(w)
else:
return self.cube.height.getValue()
def length(self,l=None):
if l:
self.cube.width.setValue(l)
else:
return self.cube.width.getValue()
def height(self,h=None):
if h:
self.cube.depth.setValue(h)
self.update()
else:
return self.cube.depth.getValue()
class radiusTracker(Tracker):
"A tracker that displays a transparent sphere to inicate a radius"
def __init__(self,position=FreeCAD.Vector(0,0,0),radius=1):
self.trans = coin.SoTransform()
self.trans.translation.setValue([position.x,position.y,position.z])
m = coin.SoMaterial()
m.transparency.setValue(0.9)
m.diffuseColor.setValue([0,1,0])
self.sphere = coin.SoSphere()
self.sphere.radius.setValue(radius)
self.baseline = None
Tracker.__init__(self,children=[self.trans,m,self.sphere])
def update(self,arg1,arg2=None):
if isinstance(arg1,FreeCAD.Vector):
self.trans.translation.setValue([arg1.x,arg1.y,arg1.z])
else:
self.sphere.radius.setValue(arg1)
if arg2 != None:
if isinstance(arg2,FreeCAD.Vector):
self.trans.translation.setValue([arg2.x,arg2.y,arg2.z])
else:
self.sphere.radius.setValue(arg2)
class archDimTracker(Tracker):
"A wrapper around a Sketcher dim"
def __init__(self,p1=FreeCAD.Vector(0,0,0),p2=FreeCAD.Vector(1,0,0),mode=1):
import SketcherGui
self.dimnode = coin.SoType.fromName("SoDatumLabel").createInstance()
p1node = coin.SbVec3f([p1.x,p1.y,p1.z])
p2node = coin.SbVec3f([p2.x,p2.y,p2.z])
self.dimnode.pnts.setValues([p1node,p2node])
self.dimnode.lineWidth = 1
color = FreeCADGui.draftToolBar.getDefaultColor("snap")
self.dimnode.textColor.setValue(coin.SbVec3f(color))
self.setString()
self.setMode(mode)
Tracker.__init__(self,children=[self.dimnode])
def setString(self,text=None):
"sets the dim string to the given value or auto value"
self.dimnode.param1.setValue(.5)
p1 = Vector(self.dimnode.pnts.getValues()[0].getValue())
p2 = Vector(self.dimnode.pnts.getValues()[-1].getValue())
m = self.dimnode.datumtype.getValue()
if m == 2:
self.Distance = (DraftVecUtils.project(p2.sub(p1),Vector(1,0,0))).Length
elif m == 3:
self.Distance = (DraftVecUtils.project(p2.sub(p1),Vector(0,1,0))).Length
else:
self.Distance = (p2.sub(p1)).Length
if not text:
text = Draft.getParam("dimPrecision",2)
text = "%."+str(text)+"f"
text = (text % self.Distance)
self.dimnode.string.setValue(text)
def setMode(self,mode=1):
"""sets the mode: 0 = without lines (a simple mark), 1 =
aligned (default), 2 = horizontal, 3 = vertical."""
self.dimnode.datumtype.setValue(mode)
def p1(self,point=None):
"sets or gets the first point of the dim"
if point:
self.dimnode.pnts.set1Value(0,point.x,point.y,point.z)
self.setString()
else:
return Vector(self.dimnode.pnts.getValues()[0].getValue())
def p2(self,point=None):
"sets or gets the second point of the dim"
if point:
self.dimnode.pnts.set1Value(1,point.x,point.y,point.z)
self.setString()
else:
return Vector(self.dimnode.pnts.getValues()[-1].getValue())
| lgpl-2.1 | -1,483,052,015,789,805,300 | 37.945067 | 114 | 0.567259 | false |
CiscoSystems/os-sqe | lab/scenarios/servers_from_snapshot_scenario.py | 1 | 1179 | from lab.test_case_worker import TestCaseWorker
class ServersFromSnapshotScenario(TestCaseWorker):
ARG_MANDATORY_N_SERVERS = 'n_servers'
ARG_MANDATORY_UPTIME = 'uptime'
def check_arguments(self):
assert self.n_servers >= 1
assert self.uptime > 10
@property
def n_servers(self):
return self.args[self.ARG_MANDATORY_N_SERVERS]
@property
def uptime(self):
return self.args[self.ARG_MANDATORY_UPTIME]
def setup_worker(self):
pass
def loop_worker(self):
import time
from lab.cloud.cloud_server import CloudServer
self.log(self.STATUS_SERVER_CREATING + ' n=' + str(self.n_servers))
flavor = self.cloud.flavors[0]
image = self.cloud.images[0]
keypair = self.cloud.keypairs[0]
self.servers = CloudServer.create(how_many=self.n_servers, flavor=flavor.name, image=image.name, on_nets=[], key=keypair.name, timeout=self.timeout, cloud=self.cloud)
self.log('Waiting 30 sec to settle servers...')
time.sleep(30)
self.log(self.STATUS_SERVER_CREATED)
if str(self.uptime) != 'forever':
time.sleep(self.uptime)
| apache-2.0 | 719,308,940,948,195,300 | 30.864865 | 174 | 0.648855 | false |
kreatorkodi/repository.torrentbr | plugin.video.youtube/resources/lib/youtube_plugin/kodion/impl/xbmc/xbmc_context_ui.py | 1 | 5105 | __author__ = 'bromix'
from six import string_types
import xbmc
import xbmcgui
from ..abstract_context_ui import AbstractContextUI
from .xbmc_progress_dialog import XbmcProgressDialog
from .xbmc_progress_dialog_bg import XbmcProgressDialogBG
from ... import constants
from ... import utils
class XbmcContextUI(AbstractContextUI):
def __init__(self, xbmc_addon, context):
AbstractContextUI.__init__(self)
self._xbmc_addon = xbmc_addon
self._context = context
self._view_mode = None
def create_progress_dialog(self, heading, text=None, background=False):
if background and self._context.get_system_version().get_version() > (12, 3):
return XbmcProgressDialogBG(heading, text)
return XbmcProgressDialog(heading, text)
def set_view_mode(self, view_mode):
if isinstance(view_mode, string_types):
view_mode = self._context.get_settings().get_int(constants.setting.VIEW_X % view_mode, 50)
self._view_mode = view_mode
def get_view_mode(self):
if self._view_mode is not None:
return self._view_mode
return self._context.get_settings().get_int(constants.setting.VIEW_DEFAULT, 50)
def get_skin_id(self):
return xbmc.getSkinDir()
def on_keyboard_input(self, title, default='', hidden=False):
# fallback for Frodo
if self._context.get_system_version().get_version() <= (12, 3):
keyboard = xbmc.Keyboard(default, title, hidden)
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
text = utils.to_unicode(keyboard.getText())
return True, text
else:
return False, u''
# Starting with Gotham (13.X > ...)
dialog = xbmcgui.Dialog()
result = dialog.input(title, utils.to_unicode(default), type=xbmcgui.INPUT_ALPHANUM)
if result:
text = utils.to_unicode(result)
return True, text
return False, u''
def on_numeric_input(self, title, default=''):
dialog = xbmcgui.Dialog()
result = dialog.input(title, str(default), type=xbmcgui.INPUT_NUMERIC)
if result:
return True, int(result)
return False, None
def on_yes_no_input(self, title, text, nolabel='', yeslabel=''):
dialog = xbmcgui.Dialog()
return dialog.yesno(title, text, nolabel=nolabel, yeslabel=yeslabel)
def on_ok(self, title, text):
dialog = xbmcgui.Dialog()
return dialog.ok(title, text)
def on_remove_content(self, content_name):
text = self._context.localize(constants.localize.REMOVE_CONTENT) % utils.to_unicode(content_name)
return self.on_yes_no_input(self._context.localize(constants.localize.CONFIRM_REMOVE), text)
def on_delete_content(self, content_name):
text = self._context.localize(constants.localize.DELETE_CONTENT) % utils.to_unicode(content_name)
return self.on_yes_no_input(self._context.localize(constants.localize.CONFIRM_DELETE), text)
def on_select(self, title, items=[]):
_dict = {}
_items = []
i = 0
for item in items:
if isinstance(item, tuple):
_dict[i] = item[1]
_items.append(item[0])
else:
_dict[i] = i
_items.append(item)
i += 1
dialog = xbmcgui.Dialog()
result = dialog.select(title, _items)
return _dict.get(result, -1)
def show_notification(self, message, header='', image_uri='', time_milliseconds=5000):
_header = header
if not _header:
_header = self._context.get_name()
_header = utils.to_utf8(_header)
_image = image_uri
if not _image:
_image = self._context.get_icon()
_message = utils.to_utf8(message)
try:
_message = _message.replace(',', ' ')
_message = _message.replace('\n', ' ')
except TypeError:
_message = _message.replace(b',', b' ')
_message = _message.replace(b'\n', b' ')
_message = utils.to_unicode(_message)
_header = utils.to_unicode(_header)
xbmc.executebuiltin("Notification(%s, %s, %d, %s)" % (_header, _message, time_milliseconds, _image))
def open_settings(self):
self._xbmc_addon.openSettings()
@staticmethod
def refresh_container():
xbmc.executebuiltin("Container.Refresh")
@staticmethod
def set_home_window_property(property_id, value):
property_id = 'plugin.video.youtube-' + property_id
xbmcgui.Window(10000).setProperty(property_id, value)
@staticmethod
def get_home_window_property(property_id):
property_id = 'plugin.video.youtube-' + property_id
return xbmcgui.Window(10000).getProperty(property_id) or None
@staticmethod
def clear_home_window_property(property_id):
property_id = 'plugin.video.youtube-' + property_id
xbmcgui.Window(10000).clearProperty(property_id)
| gpl-2.0 | 5,859,240,278,206,949,000 | 33.261745 | 108 | 0.610382 | false |
fernan9/LANGEBIO-Internship | BLAST_rbh_run.py | 1 | 3987 | #! /usr/bin/env python
""" DESCRIPTION
"""
import glob, sys, csv
from tabulate import tabulate
from Bio.Blast.Applications import NcbiblastpCommandline
"""--- FUNCTIONS ---"""
def carga_csv(file_name):
""" creates a list of lists with a csv file """
tabla = []
cr = csv.reader(open(file_name,"rb"))
for row in cr:
tabla.append(row)
return tabla
def crea_comparacion(tabla_ref, estructura = 'star', comparacion = 'bi'):
""" creates comparisons lists (code) depending on arguments """
lista = []
tabla = list(tabla_ref)
if estructura == 'star':
nodo = tabla.pop()
for organismo in tabla:
lista.append([nodo[1],organismo[1]])
if comparacion == 'bi':
lista.append([organismo[1], nodo[1]])
else:
comps = estructura.split(',')
for comp in comps:
pareja = comp.split('-')
query = tabla[int(pareja[0])][1]
db = tabla[int(pareja[1])][1]
lista.append([query, db])
if comparacion == 'bi':
lista.append([db, query])
return lista
def imprime_comparacion(listas):
""" prints the comparison as a readable format"""
print 'COMPARISONS\n-----------\n'
for lista in listas:
print lista[0] + ' --> ' + lista[1]
print '\n'
def imprime_referencia(claves):
""" prints the comparison as a readable format"""
print 'REFERENCE\n---------'
n = 0
for key, val in claves.items():
print n, '. ', key, '\t', val
n=n+1
print '\n'
def crea_diccionario(tabla):
""" creates a dictionary of code:organism"""
diccionario={}
for row in tabla:
diccionario[row[1]]=row[0]
return diccionario
"""--- PROGRAM BODY ---"""
print '----------------\nBLAST EVALUATION\n----------------'
blast_eval = 1e-05
comparison_list = []
# charge csv file
nombre_csv = raw_input('Please enter the CSV file name: ')
organismos = carga_csv(nombre_csv)
referencia = crea_diccionario(organismos)
comparison_list = crea_comparacion(organismos)
# present csv data
print '\nCSV data\n--------'
print tabulate(organismos, headers=["Organism","Code", "Genome File", "Database folder"]) + '\n'
# present options: blast parameters, comparison parameters, run
while 1:
imprime_referencia(referencia)
imprime_comparacion(comparison_list)
print 'CHOOSE AN OPTION\n----------------\n1) Comparisons\n2) Run\n3) Quit'
user_in = raw_input('Option: ')
if user_in == '1':
imprime_referencia(referencia)
print ('Please enter the comparisons using the organism index.\n' +
'Format: "-" between indices; "," between comparisons; no spaces.\n')
nueva_comparacion = raw_input('Comparisons: ')
print 'Choose "bi" for bidirectional or "uni" for unidirectional; no quotation marks.'
tipo_comparacion = raw_input('Direction: ')
comparison_list = crea_comparacion(organismos, nueva_comparacion, tipo_comparacion)
elif user_in == '2':
blast_eval = raw_input('\nPlease write the desired E value for BLAST runs; 1e-5 suggested.\nE_value: ')
print '\nBLAST+ commands to be runned...\n'
break
elif user_in == '3': quit()
else: print ('Incorrect option, try again.\n')
# create commands for comparisons
comandos = []
for pair in comparison_list:
nombre = referencia[pair[0]].split()
comandos.append([(nombre[0]+'_'+nombre[1]+'.faa'), ('db_'+pair[1]+'/db_'+pair[1]), (pair[0]+'_'+pair[1]+'.xml')])
print tabulate(comandos, headers=["Genome file","Database", "Product file"]) + '\n'
raw_input('Press ENTER to continue')
# run commands, inform data created
for comando in comandos:
blastp_cline = NcbiblastpCommandline(query=comando[0], db=comando[1], evalue=blast_eval ,outfmt=5, out=comando[2])
print 'File ' + comando[2] + ' is currently in progess...'
stdout, stderr = blastp_cline()
print 'WORK COMPLETED\n--------------'
| gpl-2.0 | -2,226,153,138,521,786,600 | 34.598214 | 118 | 0.610986 | false |
CajetanP/code-learning | Python/Learning/Language/ifelse.py | 1 | 1729 | people = 30
cars = 40
trucks = 15
if cars > people:
print("Take the cars.")
elif cars < people:
print("Don't take the cars!")
else:
print("We can't decide ;/")
if trucks > cars:
print("Too many trucks!")
elif trucks < cars:
print("Maybe we could take the trucks?")
else:
print("We can't decide.")
if people > trucks:
print("Let's just take the trucks!")
else:
print("Let's stay home then :v")
"""
and
or
not
!!Prints from python 2, don't copy this shit :v
print "You enter a dark room with two doors. Do you go through door #1 or door #2?"
door = raw_input("> ")
if door == "1":
print "There's a giant bear here eating a cheese cake. What do you do?"
print "1. Take the cake."
print "2. Scream at the bear."
bear = raw_input("> ")
if bear == "1":
print "The bear eats your face off. Good job!"
elif bear == "2":
print "The bear eats your legs off. Good job!"
else:
print "Well, doing %s is probably better. Bear runs away." % bear
elif door == "2":
print "You stare into the endless abyss at Cthulhu's retina."
print "1. Blueberries."
print "2. Yellow jacket clothespins."
print "3. Understanding revolvers yelling melodies."
insanity = raw_input("> ")
if insanity == "1" or insanity == "2":
print "Your body survives powered by a mind of jello. Good job!"
else:
print "The insanity rots your eyes into a pool of muck. Good job!"
else:
print "You stumble around and fall on a knife and die. Good job!"
#How do I tell if a number is between a range of numbers?
# You have two options: Use 0 < x < 10 or 1 <= x < 10, which is classic notation, or use x in range(1, 10).
"""
| mit | 6,353,056,969,983,072,000 | 24.80597 | 109 | 0.622325 | false |
brayden2544/Mystuff-final | account/views/reset_password.py | 1 | 1223 | from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from account import models as amod
from . import templater
import datetime
def process_request(request):
'''Creates a reset password form to email a unique link to the user'''
user = amod.User.objects.get(email=request.urlparams[0])
key = request.urlparams[1]
now = datetime.datetime.utcnow()
exp_date = user.password_reset_date.replace(tzinfo=None)
if key != user.password_reset_key or now > exp_date:
return HttpResponseRedirect('/account/password_reset_invalid')
form = PasswordForm()
if request.method == 'POST':
form = PasswordForm(request.POST)
if form.is_valid():
password = form.cleaned_data['password']
print(user)
print(password)
user.set_password(password)
user.save()
return HttpResponseRedirect('/account/password_reset/')
tvars = {
'form': form,
}
return templater.render_to_response(request, 'reset_password.html', tvars)
class PasswordForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput)
| apache-2.0 | -6,399,470,654,394,678,000 | 30.358974 | 77 | 0.670482 | false |
rootio/rootio_web | alembic/env.py | 1 | 2308 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
import os,sys
parentdir = os.path.abspath(os.path.join('.', '.'))
sys.path.insert(0,parentdir)
from rootio.app import create_app
from rootio.extensions import db
app = create_app()
config.set_main_option("sqlalchemy.url", app.config["SQLALCHEMY_DATABASE_URI"])
target_metadata = db.Model.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
compare_type=True
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| agpl-3.0 | -2,669,275,381,323,216,400 | 27.146341 | 79 | 0.687608 | false |
sam-roth/Keypad | keypad/qt/qt_util.py | 1 | 2494 |
from PyQt4.Qt import *
from collections import namedtuple
import contextlib
from ..core.key import SimpleKeySequence
from ..core.color import Color
from ..abstract.textview import KeyEvent
from ..core.responder import Responder
from .. import api
import abc
import math
def set_tab_order(parent, widgets):
for first, second in zip(widgets, widgets[1:]):
parent.setTabOrder(first, second)
def qsizef_ceil(size):
'''
Converts a QSizeF to a QSize, rounding up.
:type size: PyQr4.Qt.QSizeF
'''
return QSize(
math.ceil(size.width()),
math.ceil(size.height())
)
class CloseEvent(object):
def __init__(self):
self.is_intercepted = False
def intercept(self):
self.is_intercepted = True
def marshal_key_event(event):
return KeyEvent(
key=SimpleKeySequence(
modifiers=event.modifiers() & ~Qt.KeypadModifier,
keycode=event.key()
),
text=event.text().replace('\r', '\n')
)
def to_q_key_sequence(key_seq):
return QKeySequence(key_seq.keycode | key_seq.modifiers)
def to_q_color(color):
if isinstance(color, QColor):
return color
r,g,b,a = Color.from_hex(color)
return QColor.fromRgb(r,g,b,a)
@contextlib.contextmanager
def ending(painter):
try:
yield painter
finally:
painter.end()
@contextlib.contextmanager
def restoring(painter):
try:
painter.save()
yield painter
finally:
painter.restore()
def qcolor_marshaller(attrname):
def fget(self):
# QColor::name() actually returns an HTML-style hex string like
# #AABBCC.
color = getattr(self, attrname)
return Color.from_rgb(color.red(),
color.green(),
color.blue(),
color.alpha())
def fset(self, value):
setattr(self, attrname, to_q_color(value))
return property(fget, fset)
class ABCWithQtMeta(pyqtWrapperType, abc.ABCMeta):
pass
class AutoresponderMixin:
@property
def next_responders(self):
pw = self.parentWidget()
while pw is not None and not isinstance(pw, Responder):
pw = pw.parentWidget()
if pw is not None and isinstance(pw, Responder):
return [pw] + super().next_responders
else:
return super().next_responders
class Autoresponder(AutoresponderMixin, Responder):
pass
| gpl-3.0 | -4,094,260,488,308,843,500 | 20.135593 | 71 | 0.615477 | false |
nextoa/comb | comb/slot.py | 1 | 2027 | # -*- coding: utf-8 -*-
class Slot(object):
"""
To use comb, you should create a python module file. we named *slot*.
A legal slot must be named 'Slot' in your module file and it must be at least contain four method:
* `initialize`
initial resource, e.g: database handle
* `__enter__`
get next data to do,you can fetch one or more data.
* `slot`
user custom code
* `__exit__`
when slot finished, call this method
"""
def __init__(self, combd):
"""Don't override this method unless what you're doing.
"""
self.threads_num = combd.threads_num
self.sleep = combd.sleep
self.sleep_max = combd.sleep_max
self.debug = combd.debug
self.combd = combd
self.initialize()
def initialize(self):
"""Hook for subclass initialization.
This block is execute before thread initial
Example::
class UserSlot(Slot):
def initialize(self):
self.threads_num = 10
def slot(self, result):
...
"""
pass
def __enter__(self):
"""You **MUST** return False when no data to do.
The return value will be used in `Slot.slot`
"""
print("You should override __enter__ method by subclass")
return False
def __exit__(self, exc_type, exc_val, exc_tb):
"""When slot done, will call this method.
"""
print("You should override __exit__ method by subclass")
pass
def slot(self, msg):
"""
Add your custom code at here.
For example, look at:
* `comb.demo.list`
* `comb.demo.mongo`
* `comb.demo.redis`
"""
pass
# @staticmethod
# def options():
# """
# replace this method if you want add user options
# :return:
# """
# return ()
# pass
| mit | -5,057,463,884,052,601,000 | 19.474747 | 102 | 0.51258 | false |
4ndreas/ROSCoffeButler | src/arbotix_python/src/arbotix_python/diff_controller.py | 1 | 10710 | #!/usr/bin/env python
"""
diff_controller.py - controller for a differential drive
Copyright (c) 2010-2011 Vanadium Labs LLC. All right reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Vanadium Labs LLC nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import rospy
from math import sin,cos,pi
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from diagnostic_msgs.msg import *
from tf.broadcaster import TransformBroadcaster
from ax12 import *
from controllers import *
from struct import unpack
class DiffController(Controller):
""" Controller to handle movement & odometry feedback for a differential
drive mobile base. """
def __init__(self, device, name):
Controller.__init__(self, device, name)
self.pause = True
self.last_cmd = rospy.Time.now()
# parameters: rates and geometry
self.rate = rospy.get_param('~controllers/'+name+'/rate',10.0)
self.timeout = rospy.get_param('~controllers/'+name+'/timeout',1.0)
self.t_delta = rospy.Duration(1.0/self.rate)
self.t_next = rospy.Time.now() + self.t_delta
self.ticks_meter = float(rospy.get_param('~controllers/'+name+'/ticks_meter'))
self.base_width = float(rospy.get_param('~controllers/'+name+'/base_width'))
self.base_frame_id = rospy.get_param('~controllers/'+name+'/base_frame_id', 'base_link')
self.odom_frame_id = rospy.get_param('~controllers/'+name+'/odom_frame_id', 'odom')
# parameters: PID
self.Kp = rospy.get_param('~controllers/'+name+'/Kp', 5)
self.Kd = rospy.get_param('~controllers/'+name+'/Kd', 1)
self.Ki = rospy.get_param('~controllers/'+name+'/Ki', 0)
self.Ko = rospy.get_param('~controllers/'+name+'/Ko', 50)
# parameters: acceleration
self.accel_limit = rospy.get_param('~controllers/'+name+'/accel_limit', 0.1)
self.max_accel = int(self.accel_limit*self.ticks_meter/self.rate)
# output for joint states publisher
self.joint_names = ["base_l_wheel_joint","base_r_wheel_joint"]
self.joint_positions = [0,0]
self.joint_velocities = [0,0]
# internal data
self.v_left = 0 # current setpoint velocity
self.v_right = 0
self.v_des_left = 0 # cmd_vel setpoint
self.v_des_right = 0
self.enc_left = None # encoder readings
self.enc_right = None
self.x = 0 # position in xy plane
self.y = 0
self.th = 0
self.dx = 0 # speeds in x/rotation
self.dr = 0
self.then = rospy.Time.now() # time for determining dx/dy
# subscriptions
rospy.Subscriber("cmd_vel", Twist, self.cmdVelCb)
self.odomPub = rospy.Publisher("odom", Odometry, queue_size=5)
self.odomBroadcaster = TransformBroadcaster()
rospy.loginfo("Started DiffController ("+name+"). Geometry: " + str(self.base_width) + "m wide, " + str(self.ticks_meter) + " ticks/m.")
def startup(self):
if not self.fake:
self.setup(self.Kp,self.Kd,self.Ki,self.Ko)
def update(self):
now = rospy.Time.now()
if now > self.t_next:
elapsed = now - self.then
self.then = now
elapsed = elapsed.to_sec()
if self.fake:
x = cos(self.th)*self.dx*elapsed
y = -sin(self.th)*self.dx*elapsed
self.x += cos(self.th)*self.dx*elapsed
self.y += sin(self.th)*self.dx*elapsed
self.th += self.dr*elapsed
else:
# read encoders
try:
left, right = self.status()
except Exception as e:
rospy.logerr("Could not update encoders: " + str(e))
return
rospy.logdebug("Encoders: " + str(left) +","+ str(right))
# calculate odometry
if self.enc_left == None:
d_left = 0
d_right = 0
else:
d_left = (left - self.enc_left)/self.ticks_meter
d_right = (right - self.enc_right)/self.ticks_meter
self.enc_left = left
self.enc_right = right
d = (d_left+d_right)/2
th = (d_right-d_left)/self.base_width
self.dx = d / elapsed
self.dr = th / elapsed
if (d != 0):
x = cos(th)*d
y = -sin(th)*d
self.x = self.x + (cos(self.th)*x - sin(self.th)*y)
self.y = self.y + (sin(self.th)*x + cos(self.th)*y)
if (th != 0):
self.th = self.th + th
# publish or perish
quaternion = Quaternion()
quaternion.x = 0.0
quaternion.y = 0.0
quaternion.z = sin(self.th/2)
quaternion.w = cos(self.th/2)
self.odomBroadcaster.sendTransform(
(self.x, self.y, 0),
(quaternion.x, quaternion.y, quaternion.z, quaternion.w),
rospy.Time.now(),
self.base_frame_id,
self.odom_frame_id
)
odom = Odometry()
odom.header.stamp = now
odom.header.frame_id = self.odom_frame_id
odom.pose.pose.position.x = self.x
odom.pose.pose.position.y = self.y
odom.pose.pose.position.z = 0
odom.pose.pose.orientation = quaternion
odom.child_frame_id = self.base_frame_id
odom.twist.twist.linear.x = self.dx
odom.twist.twist.linear.y = 0
odom.twist.twist.angular.z = self.dr
self.odomPub.publish(odom)
if now > (self.last_cmd + rospy.Duration(self.timeout)):
self.v_des_left = 0
self.v_des_right = 0
# update motors
if not self.fake:
if self.v_left < self.v_des_left:
self.v_left += self.max_accel
if self.v_left > self.v_des_left:
self.v_left = self.v_des_left
else:
self.v_left -= self.max_accel
if self.v_left < self.v_des_left:
self.v_left = self.v_des_left
if self.v_right < self.v_des_right:
self.v_right += self.max_accel
if self.v_right > self.v_des_right:
self.v_right = self.v_des_right
else:
self.v_right -= self.max_accel
if self.v_right < self.v_des_right:
self.v_right = self.v_des_right
self.write(self.v_left, self.v_right)
self.t_next = now + self.t_delta
def shutdown(self):
if not self.fake:
self.write(0,0)
def cmdVelCb(self,req):
""" Handle movement requests. """
self.last_cmd = rospy.Time.now()
if self.fake:
self.dx = req.linear.x # m/s
self.dr = req.angular.z # rad/s
else:
# set motor speeds in ticks per 1/30s
self.v_des_left = int( ((req.linear.x - (req.angular.z * self.base_width/2.0)) * self.ticks_meter) / 30.0)
self.v_des_right = int( ((req.linear.x + (req.angular.z * self.base_width/2.0)) * self.ticks_meter) / 30.0)
def getDiagnostics(self):
""" Get a diagnostics status. """
msg = DiagnosticStatus()
msg.name = self.name
msg.level = DiagnosticStatus.OK
msg.message = "OK"
if not self.fake:
msg.values.append(KeyValue("Left", str(self.enc_left)))
msg.values.append(KeyValue("Right", str(self.enc_right)))
msg.values.append(KeyValue("dX", str(self.dx)))
msg.values.append(KeyValue("dR", str(self.dr)))
return msg
###
### Controller Specification:
###
### setup: Kp, Kd, Ki, Ko (all unsigned char)
###
### write: left_speed, right_speed (2-byte signed, ticks per frame)
###
### status: left_enc, right_enc (4-byte signed)
###
def setup(self, kp, kd, ki, ko):
success = self.device.execute(253, AX_CONTROL_SETUP, [10, kp, kd, ki, ko])
def write(self, left, right):
""" Send a closed-loop speed. Base PID loop runs at 30Hz, these values
are therefore in ticks per 1/30 second. """
left = left&0xffff
right = right&0xffff
success = self.device.execute(253, AX_CONTROL_WRITE, [10, left%256, left>>8, right%256, right>>8])
def status(self):
""" read 32-bit (signed) encoder values. """
values = self.device.execute(253, AX_CONTROL_STAT, [10])
left_values = "".join([chr(k) for k in values[0:4] ])
right_values = "".join([chr(k) for k in values[4:] ])
try:
left = unpack('=l',left_values)[0]
right = unpack('=l',right_values)[0]
return [left, right]
except:
return None
| mit | 5,647,903,849,542,350,000 | 40.034483 | 144 | 0.556489 | false |
juancarlospaco/css-html-js-minify | css_html_js_minify/js_minifier.py | 1 | 6476 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""JavaScript Minifier functions for CSS-HTML-JS-Minify."""
import re
from io import StringIO # pure-Python StringIO supports unicode.
from .css_minifier import condense_semicolons
__all__ = ('js_minify', )
def remove_commented_lines(js):
"""Force remove commented out lines from Javascript."""
result = ""
for line in js.splitlines():
line = re.sub(r"/\*.*\*/" ,"" ,line) # (/*COMMENT */)
line = re.sub(r"//.*","" ,line) # (//COMMENT)
result += '\n'+line
return result
def simple_replacer_js(js):
"""Force strip simple replacements from Javascript."""
return condense_semicolons(js.replace("debugger;", ";").replace(
";}", "}").replace("; ", ";").replace(" ;", ";").rstrip("\n;"))
def js_minify_keep_comments(js):
"""Return a minified version of the Javascript string."""
ins, outs = StringIO(js), StringIO()
JavascriptMinify(ins, outs).minify()
return force_single_line_js(outs.getvalue())
def force_single_line_js(js):
"""Force Javascript to a single line, even if need to add semicolon."""
return ";".join(js.splitlines()) if len(js.splitlines()) > 1 else js
class JavascriptMinify(object):
"""Minify an input stream of Javascript, writing to an output stream."""
def __init__(self, instream=None, outstream=None):
"""Init class."""
self.ins, self.outs = instream, outstream
def minify(self, instream=None, outstream=None):
"""Minify Javascript using StringIO."""
if instream and outstream:
self.ins, self.outs = instream, outstream
write, read = self.outs.write, self.ins.read
space_strings = ("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$\\")
starters, enders = '{[(+-', '}])+-"\''
newlinestart_strings = starters + space_strings
newlineend_strings = enders + space_strings
do_newline, do_space = False, False
doing_single_comment, doing_multi_comment = False, False
previous_before_comment, in_quote = '', ''
in_re, quote_buf = False, []
previous = read(1)
next1 = read(1)
if previous == '/':
if next1 == '/':
doing_single_comment = True
elif next1 == '*':
doing_multi_comment = True
else:
write(previous)
elif not previous:
return
elif previous >= '!':
if previous in "'\"":
in_quote = previous
write(previous)
previous_non_space = previous
else:
previous_non_space = ' '
if not next1:
return
while True:
next2 = read(1)
if not next2:
last = next1.strip()
conditional_1 = (doing_single_comment or doing_multi_comment)
if not conditional_1 and last not in ('', '/'):
write(last)
break
if doing_multi_comment:
if next1 == '*' and next2 == '/':
doing_multi_comment = False
next2 = read(1)
elif doing_single_comment:
if next1 in '\r\n':
doing_single_comment = False
while next2 in '\r\n':
next2 = read(1)
if not next2:
break
if previous_before_comment in ')}]':
do_newline = True
elif previous_before_comment in space_strings:
write('\n')
elif in_quote:
quote_buf.append(next1)
if next1 == in_quote:
numslashes = 0
for c in reversed(quote_buf[:-1]):
if c != '\\':
break
else:
numslashes += 1
if numslashes % 2 == 0:
in_quote = ''
write(''.join(quote_buf))
elif next1 in '\r\n':
conditional_2 = previous_non_space in newlineend_strings
if conditional_2 or previous_non_space > '~':
while 1:
if next2 < '!':
next2 = read(1)
if not next2:
break
else:
conditional_3 = next2 in newlinestart_strings
if conditional_3 or next2 > '~' or next2 == '/':
do_newline = True
break
elif next1 < '!' and not in_re:
conditional_4 = next2 in space_strings or next2 > '~'
conditional_5 = previous_non_space in space_strings
conditional_6 = previous_non_space > '~'
if (conditional_5 or conditional_6) and (conditional_4):
do_space = True
elif next1 == '/':
if in_re:
if previous != '\\':
in_re = False
write('/')
elif next2 == '/':
doing_single_comment = True
previous_before_comment = previous_non_space
elif next2 == '*':
doing_multi_comment = True
else:
in_re = previous_non_space in '(,=:[?!&|'
write('/')
else:
if do_space:
do_space = False
write(' ')
if do_newline:
write('\n')
do_newline = False
write(next1)
if not in_re and next1 in "'\"":
in_quote = next1
quote_buf = []
previous = next1
next1 = next2
if previous >= '!':
previous_non_space = previous
def js_minify(js):
"""Minify a JavaScript string."""
print("""Future JavaScript support is orphan and not supported!.
If you want to make ES6,ES7 work feel free to send pull requests.""")
js = remove_commented_lines(js)
js = js_minify_keep_comments(js)
return js.strip()
| lgpl-3.0 | 1,878,974,442,023,061,800 | 35.178771 | 79 | 0.468345 | false |
YYWen0o0/python-frame-django | tests/admin_filters/tests.py | 2 | 41331 | from __future__ import unicode_literals
import datetime
from django.contrib.admin import (site, ModelAdmin, SimpleListFilter,
BooleanFieldListFilter, AllValuesFieldListFilter, RelatedOnlyFieldListFilter)
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, RequestFactory, override_settings
from django.utils.encoding import force_text
from django.utils import six
from .models import Book, Department, Employee
def select_by(dictlist, key, value):
return [x for x in dictlist if x[key] == value][0]
class DecadeListFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (
('the 80s', "the 1980's"),
('the 90s', "the 1990's"),
('the 00s', "the 2000's"),
('other', "other decades"),
)
def queryset(self, request, queryset):
decade = self.value()
if decade == 'the 80s':
return queryset.filter(year__gte=1980, year__lte=1989)
if decade == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
if decade == 'the 00s':
return queryset.filter(year__gte=2000, year__lte=2009)
class DecadeListFilterWithTitleAndParameter(DecadeListFilter):
title = 'publication decade'
parameter_name = 'publication-decade'
class DecadeListFilterWithoutTitle(DecadeListFilter):
parameter_name = 'publication-decade'
class DecadeListFilterWithoutParameter(DecadeListFilter):
title = 'publication decade'
class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
pass
class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter):
def queryset(self, request, queryset):
raise 1 / 0
class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
qs = model_admin.get_queryset(request)
if qs.filter(year__gte=1980, year__lte=1989).exists():
yield ('the 80s', "the 1980's")
if qs.filter(year__gte=1990, year__lte=1999).exists():
yield ('the 90s', "the 1990's")
if qs.filter(year__gte=2000, year__lte=2009).exists():
yield ('the 00s', "the 2000's")
class DecadeListFilterParameterEndsWith__In(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__in' # Ends with '__in"
class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__isnull' # Ends with '__isnull"
class DepartmentListFilterLookupWithNonStringValue(SimpleListFilter):
title = 'department'
parameter_name = 'department'
def lookups(self, request, model_admin):
return sorted(set([
(employee.department.id, # Intentionally not a string (Refs #19318)
employee.department.code)
for employee in model_admin.get_queryset(request).all()
]))
def queryset(self, request, queryset):
if self.value():
return queryset.filter(department__id=self.value())
class DepartmentListFilterLookupWithUnderscoredParameter(DepartmentListFilterLookupWithNonStringValue):
parameter_name = 'department__whatever'
class DepartmentListFilterLookupWithDynamicValue(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
if self.value() == 'the 80s':
return (('the 90s', "the 1990's"),)
elif self.value() == 'the 90s':
return (('the 80s', "the 1980's"),)
else:
return (('the 80s', "the 1980's"), ('the 90s', "the 1990's"),)
class CustomUserAdmin(UserAdmin):
list_filter = ('books_authored', 'books_contributed')
class BookAdmin(ModelAdmin):
list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no')
ordering = ('-id',)
class BookAdminWithTupleBooleanFilter(BookAdmin):
list_filter = ('year', 'author', 'contributors', ('is_best_seller', BooleanFieldListFilter), 'date_registered', 'no')
class BookAdminWithUnderscoreLookupAndTuple(BookAdmin):
list_filter = ('year', ('author__email', AllValuesFieldListFilter), 'contributors', 'is_best_seller', 'date_registered', 'no')
class BookAdminRelatedOnlyFilter(ModelAdmin):
list_filter = (
'year', 'is_best_seller', 'date_registered', 'no',
('author', RelatedOnlyFieldListFilter),
('contributors', RelatedOnlyFieldListFilter),
)
ordering = ('-id',)
class DecadeFilterBookAdmin(ModelAdmin):
list_filter = ('author', DecadeListFilterWithTitleAndParameter)
ordering = ('-id',)
class DecadeFilterBookAdminWithoutTitle(ModelAdmin):
list_filter = (DecadeListFilterWithoutTitle,)
class DecadeFilterBookAdminWithoutParameter(ModelAdmin):
list_filter = (DecadeListFilterWithoutParameter,)
class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin):
list_filter = (DecadeListFilterWithNoneReturningLookups,)
class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin):
list_filter = (DecadeListFilterWithFailingQueryset,)
class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin):
list_filter = (DecadeListFilterWithQuerysetBasedLookups,)
class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__In,)
class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__Isnull,)
class EmployeeAdmin(ModelAdmin):
list_display = ['name', 'department']
list_filter = ['department']
class DepartmentFilterEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithNonStringValue, ]
class DepartmentFilterUnderscoredEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithUnderscoredParameter, ]
class DepartmentFilterDynamicValueBookAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithDynamicValue, ]
class ListFiltersTests(TestCase):
def setUp(self):
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.one_week_ago = self.today - datetime.timedelta(days=7)
if self.today.month == 12:
self.next_month = self.today.replace(year=self.today.year + 1, month=1, day=1)
else:
self.next_month = self.today.replace(month=self.today.month + 1, day=1)
self.next_year = self.today.replace(year=self.today.year + 1, month=1, day=1)
self.request_factory = RequestFactory()
# Users
self.alfred = User.objects.create_user('alfred', '[email protected]')
self.bob = User.objects.create_user('bob', '[email protected]')
self.lisa = User.objects.create_user('lisa', '[email protected]')
# Books
self.djangonaut_book = Book.objects.create(title='Djangonaut: an art of living', year=2009, author=self.alfred, is_best_seller=True, date_registered=self.today)
self.bio_book = Book.objects.create(title='Django: a biography', year=1999, author=self.alfred, is_best_seller=False, no=207)
self.django_book = Book.objects.create(title='The Django Book', year=None, author=self.bob, is_best_seller=None, date_registered=self.today, no=103)
self.gipsy_book = Book.objects.create(title='Gipsy guitar for dummies', year=2002, is_best_seller=True, date_registered=self.one_week_ago)
self.gipsy_book.contributors = [self.bob, self.lisa]
self.gipsy_book.save()
# Departments
self.dev = Department.objects.create(code='DEV', description='Development')
self.design = Department.objects.create(code='DSN', description='Design')
# Employees
self.john = Employee.objects.create(name='John Blue', department=self.dev)
self.jack = Employee.objects.create(name='Jack Red', department=self.design)
def get_changelist(self, request, model, modeladmin):
return ChangeList(request, model, modeladmin.list_display, modeladmin.list_display_links,
modeladmin.list_filter, modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page, modeladmin.list_max_show_all, modeladmin.list_editable, modeladmin)
def test_datefieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'date_registered__gte': self.today,
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Today")
self.assertEqual(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today,
self.tomorrow,
)
)
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(day=1),
'date_registered__lt': self.next_month})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month):
# In case one week ago is in the same month.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This month")
self.assertEqual(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today.replace(day=1),
self.next_month,
)
)
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(month=1, day=1),
'date_registered__lt': self.next_year})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if self.today.year == self.one_week_ago.year:
# In case one week ago is in the same year.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This year")
self.assertEqual(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today.replace(month=1, day=1),
self.next_year,
)
)
request = self.request_factory.get('/', {
'date_registered__gte': str(self.one_week_ago),
'date_registered__lt': str(self.tomorrow),
})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Past 7 days")
self.assertEqual(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
str(self.one_week_ago),
str(self.tomorrow),
)
)
@override_settings(USE_TZ=True)
def test_datefieldlistfilter_with_time_zone_support(self):
# Regression for #17830
self.test_datefieldlistfilter()
def test_allvaluesfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'year__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')
request = self.request_factory.get('/', {'year': '2002'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?year=2002')
def test_relatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure that all users are present in the author's list filter
filterspec = changelist.get_filters(request)[0][1]
expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
self.assertEqual(filterspec.lookup_choices, expected)
request = self.request_factory.get('/', {'author__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.gipsy_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')
request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
def test_relatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure that all users are present in the contrib's list filter
filterspec = changelist.get_filters(request)[0][2]
expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
self.assertEqual(filterspec.lookup_choices, expected)
request = self.request_factory.get('/', {'contributors__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')
request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
def test_relatedfieldlistfilter_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, site)
# FK relationship -----
request = self.request_factory.get('/', {'books_authored__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.lisa])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True')
request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk)
# M2M relationship -----
request = self.request_factory.get('/', {'books_contributed__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.alfred])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True')
request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.django_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk)
def test_relatedonlyfieldlistfilter_foreignkey(self):
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure that only actual authors are present in author's list filter
filterspec = changelist.get_filters(request)[0][4]
expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob')]
self.assertEqual(filterspec.lookup_choices, expected)
def test_relatedonlyfieldlistfilter_manytomany(self):
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure that only actual contributors are present in contrib's list filter
filterspec = changelist.get_filters(request)[0][5]
expected = [(self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
self.assertEqual(filterspec.lookup_choices, expected)
def test_booleanfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def test_booleanfieldlistfilter_tuple(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def verify_booleanfieldlistfilter(self, modeladmin):
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'is_best_seller__exact': 0})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=0')
request = self.request_factory.get('/', {'is_best_seller__exact': 1})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=1')
request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True')
def test_fieldlistfilter_underscorelookup_tuple(self):
"""
Ensure ('fieldpath', ClassName ) lookups pass lookup_allowed checks
when fieldpath contains double underscore in value.
Refs #19182
"""
modeladmin = BookAdminWithUnderscoreLookupAndTuple(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'author__email': '[email protected]'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book, self.djangonaut_book])
def test_simplelistfilter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
# Make sure that the first option is 'All' ---------------------------
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id')))
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
# Look for books in the 1980s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 80s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'the 1980\'s')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s')
# Look for books in the 1990s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s')
# Look for books in the 2000s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s')
# Combine multiple filters -------------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.djangonaut_book])
# Make sure the correct choices are selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk)
def test_listfilter_without_title(self):
"""
Any filter must define a title.
"""
modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_without_parameter(self):
"""
Any SimpleListFilter must define a parameter_name.
"""
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_none_returning_lookups(self):
"""
A SimpleListFilter lookups method can return None but disables the
filter completely.
"""
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_filter_with_failing_queryset(self):
"""
Ensure that when a filter's queryset method fails, it fails loudly and
the corresponding exception doesn't get swallowed.
Refs #17828.
"""
modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)
request = self.request_factory.get('/', {})
self.assertRaises(ZeroDivisionError, self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_queryset_based_lookups(self):
modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'the 1990\'s')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s')
self.assertEqual(choices[2]['display'], 'the 2000\'s')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s')
def test_two_characters_long_field(self):
"""
Ensure that list_filter works with two-characters long field names.
Refs #16080.
"""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'no': '207'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'number')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?no=207')
def test_parameter_ends_with__in__or__isnull(self):
"""
Ensure that a SimpleListFilter's parameter name is not mistaken for a
model field if it ends with '__isnull' or '__in'.
Refs #17091.
"""
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get('/', {'decade__in': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s')
# When it ends with '__isnull' ---------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)
request = self.request_factory.get('/', {'decade__isnull': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s')
def test_lookup_with_non_string_value(self):
"""
Ensure choices are set the selected class when using non-string values
for lookups in SimpleListFilters.
Refs #19318
"""
modeladmin = DepartmentFilterEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department': self.john.pk})
changelist = self.get_changelist(request, Employee, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department=%s' % self.john.pk)
def test_lookup_with_non_string_value_underscored(self):
"""
Ensure SimpleListFilter lookups pass lookup_allowed checks when
parameter_name attribute contains double-underscore value.
Refs #19182
"""
modeladmin = DepartmentFilterUnderscoredEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department__whatever': self.john.pk})
changelist = self.get_changelist(request, Employee, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__whatever=%s' % self.john.pk)
def test_fk_with_to_field(self):
"""
Ensure that a filter on a FK respects the FK's to_field attribute.
Refs #17972.
"""
modeladmin = EmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.jack, self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get('/', {'department__code__exact': 'DEV'})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], False)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
def test_lookup_with_dynamic_value(self):
"""
Ensure SimpleListFilter can access self.value() inside the lookup.
"""
modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site)
def _test_choices(request, expected_displays):
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = tuple(c['display'] for c in filterspec.choices(changelist))
self.assertEqual(choices, expected_displays)
_test_choices(self.request_factory.get('/', {}),
("All", "the 1980's", "the 1990's"))
_test_choices(self.request_factory.get('/', {'publication-decade': 'the 80s'}),
("All", "the 1990's"))
_test_choices(self.request_factory.get('/', {'publication-decade': 'the 90s'}),
("All", "the 1980's"))
| bsd-3-clause | -9,038,131,155,378,770,000 | 43.925 | 168 | 0.656916 | false |
DataDog/integrations-core | vertica/datadog_checks/vertica/vertica.py | 1 | 28443 | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from __future__ import division
import logging
import ssl
from collections import OrderedDict, defaultdict
from datetime import datetime
from itertools import chain
import vertica_python as vertica
from six import iteritems
from vertica_python.vertica.column import timestamp_tz_parse
from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative
from datadog_checks.base.utils.common import exclude_undefined_keys
from datadog_checks.base.utils.containers import iter_unique
from . import views
from .utils import kilobytes_to_bytes, node_state_to_service_check
# Python 3 only
PROTOCOL_TLS_CLIENT = getattr(ssl, 'PROTOCOL_TLS_CLIENT', ssl.PROTOCOL_TLS)
class VerticaCheck(AgentCheck):
__NAMESPACE__ = 'vertica'
SERVICE_CHECK_CONNECT = 'can_connect'
SERVICE_CHECK_NODE_STATE = 'node_state'
# This remapper is used to support legacy Vertica integration config values
TLS_CONFIG_REMAPPER = {
'cert': {'name': 'tls_cert'},
'private_key': {'name': 'tls_private_key'},
'ca_cert': {'name': 'tls_ca_cert'},
'validate_hostname': {'name': 'tls_validate_hostname'},
}
def __init__(self, name, init_config, instances):
super(VerticaCheck, self).__init__(name, init_config, instances)
self._server = self.instance.get('server', 'localhost')
self._port = int(self.instance.get('port', 5433))
self._username = self.instance.get('username')
self._db = self.instance.get('db', self._username)
self._password = self.instance.get('password', '')
self._backup_servers = [
(bs.get('server', self._server), int(bs.get('port', self._port)))
for bs in self.instance.get('backup_servers', [])
]
self._connection_load_balance = is_affirmative(self.instance.get('connection_load_balance', False))
self._timeout = float(self.instance.get('timeout', 10))
self._tags = self.instance.get('tags', [])
self._client_lib_log_level = self.instance.get('client_lib_log_level', self._get_default_client_lib_log_level())
# If `tls_verify` is explicitly set to true, set `use_tls` to true (for legacy support)
# `tls_verify` used to do what `use_tls` does now
self._tls_verify = is_affirmative(self.instance.get('tls_verify'))
self._use_tls = is_affirmative(self.instance.get('use_tls', False))
if self._tls_verify and not self._use_tls:
self._use_tls = True
custom_queries = self.instance.get('custom_queries', [])
use_global_custom_queries = self.instance.get('use_global_custom_queries', True)
# Handle overrides
if use_global_custom_queries == 'extend':
custom_queries.extend(self.init_config.get('global_custom_queries', []))
elif 'global_custom_queries' in self.init_config and is_affirmative(use_global_custom_queries):
custom_queries = self.init_config.get('global_custom_queries', [])
# Deduplicate
self._custom_queries = list(iter_unique(custom_queries))
# Add global database tag
self._tags.append('db:{}'.format(self._db))
# We'll connect on the first check run
self._connection = None
# Cache database results for re-use among disparate functions
self._view = defaultdict(list)
self._metric_groups = []
self.check_initializations.append(self.parse_metric_groups)
def _get_default_client_lib_log_level(self):
if self.log.logger.getEffectiveLevel() <= logging.DEBUG:
# Automatically collect library logs for debug flares.
return logging.DEBUG
# Default to no library logs, since they're too verbose even at the INFO level.
return None
def _connect(self):
if self._connection is None:
connection = self.get_connection()
if connection is None:
return
self._connection = connection
elif self._connection_load_balance or self._connection.closed():
self._connection.reset_connection()
def check(self, _):
self._connect()
# The order of queries is important as some results are cached for later re-use
try:
for method in self._metric_groups:
method()
self.query_version()
self.query_custom()
finally:
self._view.clear()
def query_licenses(self):
# https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/SQLReferenceManual/SystemTables/CATALOG/LICENSES.htm
for db_license in self.iter_rows(views.Licenses):
self._view[views.Licenses].append(db_license)
tags = ['license_type:{}'.format(db_license['licensetype'])]
tags.extend(self._tags)
expiration = db_license['end_date']
if expiration and expiration != 'Perpetual':
expiration = timestamp_tz_parse(expiration)
seconds_until_expiration = (expiration - datetime.now(tz=expiration.tzinfo)).total_seconds()
else:
seconds_until_expiration = -1
self.gauge('license.expiration', seconds_until_expiration, tags=tags)
def query_license_audits(self):
# https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/SQLReferenceManual/SystemTables/CATALOG/LICENSE_AUDITS.htm
for license_audit in self.iter_rows(views.LicenseAudits):
last_audit = license_audit['audit_start_timestamp']
if last_audit:
seconds_since_last_audit = (datetime.now(tz=last_audit.tzinfo) - last_audit).total_seconds()
else:
seconds_since_last_audit = -1
self.gauge('license.latest_audit', seconds_since_last_audit, tags=self._tags)
size = int(license_audit['license_size_bytes'])
used = int(license_audit['database_size_bytes'])
self.gauge('license.size', size, tags=self._tags)
self.gauge('license.used', used, tags=self._tags)
self.gauge('license.usable', size - used, tags=self._tags)
self.gauge('license.utilized', used / size * 100, tags=self._tags)
def query_system(self):
# https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/SQLReferenceManual/SystemTables/MONITOR/SYSTEM.htm
# Will only be 1 system
for system in self.iter_rows(views.System):
total_nodes = system['node_count']
self.gauge('node.total', total_nodes, tags=self._tags)
self.gauge('node.down', system['node_down_count'], tags=self._tags)
# Is is possible for there to be no restriction
allowed_nodes = self._view[views.Licenses][0]['node_restriction']
if allowed_nodes is not None:
self.gauge('node.allowed', allowed_nodes, tags=self._tags)
self.gauge('node.available', allowed_nodes - total_nodes, tags=self._tags)
self.gauge('ksafety.current', system['current_fault_tolerance'], tags=self._tags)
self.gauge('ksafety.intended', system['designed_fault_tolerance'], tags=self._tags)
self.gauge('epoch.ahm', system['ahm_epoch'], tags=self._tags)
self.gauge('epoch.current', system['current_epoch'], tags=self._tags)
self.gauge('epoch.last_good', system['last_good_epoch'], tags=self._tags)
def query_nodes(self):
# https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/SQLReferenceManual/SystemTables/CATALOG/NODES.htm
for node in self.iter_rows(views.Nodes):
tags = ['node_name:{}'.format(node['node_name'])]
tags.extend(self._tags)
node_state = node['node_state']
self.service_check(
self.SERVICE_CHECK_NODE_STATE, node_state_to_service_check(node_state), message=node_state, tags=tags
)
def query_projections(self):
# https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/SQLReferenceManual/SystemTables/CATALOG/PROJECTIONS.htm
total_projections = 0
unsegmented_projections = 0
unsafe_projections = 0
for projection in self.iter_rows(views.Projections):
total_projections += 1
if not projection['is_segmented']:
unsegmented_projections += 1
if not projection['is_up_to_date']:
unsafe_projections += 1
self.gauge('projection.total', total_projections, tags=self._tags)
self.gauge('projection.unsegmented', unsegmented_projections, tags=self._tags)
if total_projections:
unsegmented_percent = unsegmented_projections / total_projections * 100
else:
unsegmented_percent = 0
self.gauge('projection.unsegmented_percent', unsegmented_percent, tags=self._tags)
self.gauge('projection.unsafe', unsafe_projections, tags=self._tags)
if total_projections:
unsafe_percent = unsafe_projections / total_projections * 100
else:
unsafe_percent = 0
self.gauge('projection.unsafe_percent', unsafe_percent, tags=self._tags)
def query_projection_storage(self):
# https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/SQLReferenceManual/SystemTables/MONITOR/PROJECTION_STORAGE.htm
projection_data = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: {'ros_count': 0, 'rows_ros': 0, 'rows_wos': 0, 'used_ros': 0, 'used_wos': 0}
)
)
)
for ps in self.iter_rows(views.ProjectionStorage):
projection = projection_data[ps['node_name']][ps['anchor_table_name']][ps['projection_name']]
projection['ros_count'] += ps['ros_count']
projection['rows_ros'] += ps['ros_row_count']
projection['rows_wos'] += ps['wos_row_count']
projection['used_ros'] += ps['ros_used_bytes']
projection['used_wos'] += ps['wos_used_bytes']
total_rows_ros = 0
total_rows_wos = 0
total_used_ros = 0
total_used_wos = 0
# My understanding is that nodes have multiple tables, which in turn can have multiple projections
for node, tables in iteritems(projection_data):
node_tags = ['node_name:{}'.format(node)]
node_tags.extend(self._tags)
node_rows_ros = 0
node_rows_wos = 0
node_used_ros = 0
node_used_wos = 0
for table, projections in iteritems(tables):
table_tags = ['table_name:{}'.format(table)]
table_tags.extend(node_tags)
table_rows_ros = 0
table_rows_wos = 0
table_used_ros = 0
table_used_wos = 0
for projection, data in iteritems(projections):
projection_tags = ['projection_name:{}'.format(projection)]
projection_tags.extend(table_tags)
projection_rows_ros = data['rows_ros']
projection_rows_wos = data['rows_wos']
projection_used_ros = data['used_ros']
projection_used_wos = data['used_wos']
self.gauge('projection.ros.containers', data['ros_count'], tags=projection_tags)
self.gauge('projection.row.ros', projection_rows_ros, tags=projection_tags)
self.gauge('projection.row.wos', projection_rows_wos, tags=projection_tags)
self.gauge('projection.row.total', projection_rows_ros + projection_rows_wos, tags=projection_tags)
self.gauge('projection.disk.used.ros', projection_used_ros, tags=projection_tags)
self.gauge('projection.disk.used.wos', projection_used_wos, tags=projection_tags)
self.gauge('projection.disk.used', projection_used_ros + projection_used_wos, tags=projection_tags)
table_rows_ros += projection_rows_ros
table_rows_wos += projection_rows_wos
table_used_ros += projection_used_ros
table_used_wos += projection_used_wos
self.gauge('table.row.ros', table_rows_ros, tags=table_tags)
self.gauge('table.row.wos', table_rows_wos, tags=table_tags)
self.gauge('table.row.total', table_rows_ros + table_rows_wos, tags=table_tags)
self.gauge('table.disk.used.ros', table_used_ros, tags=table_tags)
self.gauge('table.disk.used.wos', table_used_wos, tags=table_tags)
self.gauge('table.disk.used', table_used_ros + table_used_wos, tags=table_tags)
node_rows_ros += table_rows_ros
node_rows_wos += table_rows_wos
node_used_ros += table_used_ros
node_used_wos += table_used_wos
self.gauge('node.row.ros', node_rows_ros, tags=node_tags)
self.gauge('node.row.wos', node_rows_wos, tags=node_tags)
self.gauge('node.row.total', node_rows_ros + node_rows_wos, tags=node_tags)
self.gauge('node.disk.used.ros', node_used_ros, tags=node_tags)
self.gauge('node.disk.used.wos', node_used_wos, tags=node_tags)
self.gauge('node.disk.used', node_used_ros + node_used_wos, tags=node_tags)
total_rows_ros += node_rows_ros
total_rows_wos += node_rows_wos
total_used_ros += node_used_ros
total_used_wos += node_used_wos
self.gauge('row.ros', total_rows_ros, tags=self._tags)
self.gauge('row.wos', total_rows_wos, tags=self._tags)
self.gauge('row.total', total_rows_ros + total_rows_wos, tags=self._tags)
self.gauge('disk.used.ros', total_used_ros, tags=self._tags)
self.gauge('disk.used.wos', total_used_wos, tags=self._tags)
self.gauge('disk.used', total_used_ros + total_used_wos, tags=self._tags)
def query_storage_containers(self):
# https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/SQLReferenceManual/SystemTables/MONITOR/STORAGE_CONTAINERS.htm
container_data = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: {'delete_vectors': 0})))
for sc in self.iter_rows(views.StorageContainers):
container = container_data[sc['node_name']][sc['projection_name']][sc['storage_type'].lower()]
container['delete_vectors'] += sc['delete_vector_count']
total_delete_vectors = 0
for node, projections in iteritems(container_data):
node_tags = ['node_name:{}'.format(node)]
node_tags.extend(self._tags)
node_delete_vectors = 0
for projection, containers in iteritems(projections):
projection_tags = ['projection_name:{}'.format(projection)]
projection_tags.extend(node_tags)
projection_delete_vectors = 0
for container_type, data in iteritems(containers):
container_tags = ['container_type:{}'.format(container_type)]
container_tags.extend(projection_tags)
container_type_delete_vectors = data['delete_vectors']
self.gauge('projection.delete_vectors', container_type_delete_vectors, tags=container_tags)
projection_delete_vectors += container_type_delete_vectors
node_delete_vectors += projection_delete_vectors
self.gauge('node.delete_vectors', node_delete_vectors, tags=node_tags)
total_delete_vectors += node_delete_vectors
self.gauge('delete_vectors', total_delete_vectors, tags=self._tags)
def query_host_resources(self):
# https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/SQLReferenceManual/SystemTables/MONITOR/HOST_RESOURCES.htm
for host in self.iter_rows(views.HostResources):
tags = ['host_name:{}'.format(host['host_name'])]
tags.extend(self._tags)
self.gauge('processor.cpu', host['processor_count'], tags=tags)
self.gauge('processor.core', host['processor_core_count'], tags=tags)
self.gauge('file.max', host['open_files_limit'], tags=tags)
self.gauge('file.open', host['opened_file_count'], tags=tags)
self.gauge('socket.open', host['opened_socket_count'], tags=tags)
self.gauge('thread.max', host['threads_limit'], tags=tags)
# Memory
total = host['total_memory_bytes']
usable = host['total_memory_free_bytes']
used = total - usable
self.gauge('memory.total', total, tags=tags)
self.gauge('memory.usable', usable, tags=tags)
self.gauge('memory.used', used, tags=tags)
if total:
utilized = used / total * 100
else:
utilized = 0
self.gauge('memory.utilized', utilized, tags=tags)
# Swap
total = host['total_swap_memory_bytes']
usable = host['total_swap_memory_free_bytes']
used = total - usable
self.gauge('memory.swap.total', total, tags=tags)
self.gauge('memory.swap.usable', usable, tags=tags)
self.gauge('memory.swap.used', used, tags=tags)
if total:
utilized = used / total * 100
else:
utilized = 0
self.gauge('memory.swap.utilized', utilized, tags=tags)
def query_query_metrics(self):
# https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/SQLReferenceManual/SystemTables/MONITOR/QUERY_METRICS.htm
for node in self.iter_rows(views.QueryMetrics):
tags = ['node_name:{}'.format(node['node_name'])]
tags.extend(self._tags)
self.gauge('connection.active', node['active_user_session_count'], tags=tags)
self.monotonic_count('connection.total', node['total_user_session_count'], tags=tags)
self.gauge('query.active', node['running_query_count'], tags=tags)
self.monotonic_count('query.total', node['executed_query_count'], tags=tags)
def query_resource_pool_status(self):
# https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/SQLReferenceManual/SystemTables/MONITOR/RESOURCE_POOL_STATUS.htm
for pool in self.iter_rows(views.ResourcePoolStatus):
tags = ['node_name:{}'.format(pool['node_name']), 'pool_name:{}'.format(pool['pool_name'])]
tags.extend(self._tags)
self.gauge(
'resource_pool.memory.borrowed', kilobytes_to_bytes(pool['general_memory_borrowed_kb']), tags=tags
)
self.gauge('resource_pool.memory.max', kilobytes_to_bytes(pool['max_memory_size_kb']), tags=tags)
self.gauge('resource_pool.memory.used', kilobytes_to_bytes(pool['memory_inuse_kb']), tags=tags)
self.gauge('resource_pool.query.running', pool['running_query_count'], tags=tags)
def query_disk_storage(self):
# https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/SQLReferenceManual/SystemTables/MONITOR/DISK_STORAGE.htm
for storage in self.iter_rows(views.DiskStorage):
tags = [
'node_name:{}'.format(storage['node_name']),
'storage_status:{}'.format(storage['storage_status']),
'storage_usage:{}'.format(storage['storage_usage']),
]
tags.extend(self._tags)
# Space
block_size = storage['disk_block_size_bytes']
usable = block_size * storage['disk_space_free_blocks']
used = block_size * storage['disk_space_used_blocks']
total = usable + used
self.gauge('storage.size', total, tags=tags)
self.gauge('storage.usable', usable, tags=tags)
self.gauge('storage.used', used, tags=tags)
if total:
utilized = used / total * 100
else:
utilized = 0
self.gauge('storage.utilized', utilized, tags=tags)
# Latency
latency = storage['latency']
self.gauge('storage.latency', latency, tags=tags)
if latency:
latency_reciprocal = 1 / latency
else:
latency_reciprocal = 0
# Throughput
throughput = storage['throughput']
self.gauge('storage.throughput', throughput, tags=tags)
if throughput:
throughput_reciprocal = 1 / throughput
else:
throughput_reciprocal = 0
# Time to read 1 MiB
self.gauge('storage.speed', latency_reciprocal + throughput_reciprocal, tags=tags)
def query_resource_usage(self):
# https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/SQLReferenceManual/SystemTables/MONITOR/RESOURCE_USAGE.htm
for node in self.iter_rows(views.ResourceUsage):
tags = ['node_name:{}'.format(node['node_name'])]
tags.extend(self._tags)
self.gauge('node.resource_requests', node['request_count'], tags=tags)
self.gauge('thread.active', node['active_thread_count'], tags=tags)
@AgentCheck.metadata_entrypoint
def query_version(self):
# https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/AdministratorsGuide/Diagnostics/DeterminingYourVersionOfVertica.htm
for v in self.iter_rows(views.Version):
version = v['version'].replace('Vertica Analytic Database v', '')
# Force the last part to represent the build part of semver
version = version.replace('-', '+', 1)
self.set_metadata('version', version)
def query_custom(self):
for custom_query in self._custom_queries:
query = custom_query.get('query')
if not query: # no cov
self.log.error('Custom query field `query` is required')
continue
columns = custom_query.get('columns')
if not columns: # no cov
self.log.error('Custom query field `columns` is required')
continue
self.log.debug('Running custom query for Vertica')
cursor = self._connection.cursor()
cursor.execute(query)
rows = cursor.iterate()
# Trigger query execution
try:
first_row = next(rows)
except Exception as e: # no cov
self.log.error('Error executing custom query: %s', e)
continue
for row in chain((first_row,), rows):
if not row: # no cov
self.log.debug('Custom query returned an empty result')
continue
if len(columns) != len(row): # no cov
self.log.error('Custom query result expected %s columns, got %s', len(columns), len(row))
continue
metric_info = []
query_tags = list(self._tags)
query_tags.extend(custom_query.get('tags', []))
for column, value in zip(columns, row):
# Columns can be ignored via configuration.
if not column: # no cov
continue
name = column.get('name')
if not name: # no cov
self.log.error('Column field `name` is required')
break
column_type = column.get('type')
if not column_type: # no cov
self.log.error('Column field `type` is required for column `%s`', name)
break
if column_type == 'tag':
query_tags.append('{}:{}'.format(name, value))
else:
if not hasattr(self, column_type):
self.log.error('Invalid submission method `%s` for metric column `%s`', column_type, name)
break
try:
metric_info.append((name, float(value), column_type))
except (ValueError, TypeError): # no cov
self.log.error('Non-numeric value `%s` for metric column `%s`', value, name)
break
# Only submit metrics if there were absolutely no errors - all or nothing.
else:
for info in metric_info:
metric, value, method = info
getattr(self, method)(metric, value, tags=query_tags)
def get_connection(self):
connection_options = {
'database': self._db,
'host': self._server,
'port': self._port,
'user': self._username,
'password': self._password,
'backup_server_node': self._backup_servers,
'connection_load_balance': self._connection_load_balance,
'connection_timeout': self._timeout,
}
if self._client_lib_log_level:
connection_options['log_level'] = self._client_lib_log_level
# log_path is required by vertica client for using logging
# when log_path is set to '', vertica won't log to a file
# but we still get logs via parent root logger
connection_options['log_path'] = ''
if self._use_tls:
tls_context = self.get_tls_context()
connection_options['ssl'] = tls_context
try:
connection = vertica.connect(**exclude_undefined_keys(connection_options))
except Exception as e:
self.log.error('Unable to connect to database `%s` as user `%s`: %s', self._db, self._username, e)
self.service_check(self.SERVICE_CHECK_CONNECT, self.CRITICAL, tags=self._tags)
else:
self.service_check(self.SERVICE_CHECK_CONNECT, self.OK, tags=self._tags)
return connection
def iter_rows(self, view):
cursor = self._connection.cursor('dict')
cursor.execute(view.query)
for row in cursor.iterate():
yield row
def parse_metric_groups(self):
# If you create a new function, please add this to `default_metric_groups` below and
# the config file (under `metric_groups`).
default_metric_groups = OrderedDict(
(
('licenses', self.query_licenses),
('license_audits', self.query_license_audits),
('system', self.query_system),
('nodes', self.query_nodes),
('projections', self.query_projections),
('projection_storage', self.query_projection_storage),
('storage_containers', self.query_storage_containers),
('host_resources', self.query_host_resources),
('query_metrics', self.query_query_metrics),
('resource_pool_status', self.query_resource_pool_status),
('disk_storage', self.query_disk_storage),
('resource_usage', self.query_resource_usage),
)
)
metric_groups = self.instance.get('metric_groups') or list(default_metric_groups)
# Ensure all metric groups are valid
invalid_groups = []
for group in metric_groups:
if group not in default_metric_groups:
invalid_groups.append(group)
if invalid_groups:
raise ConfigurationError(
'Invalid metric_groups found in vertica conf.yaml: {}'.format(', '.join(invalid_groups))
)
# License query needs to be run before getting system
if 'system' in metric_groups and 'licenses' not in metric_groups:
self.log.debug('Detected `system` metric group, adding the `licenses` to metric_groups.')
metric_groups.insert(0, 'licenses')
self._metric_groups.extend(
default_metric_groups[group] for group in default_metric_groups if group in metric_groups
)
| bsd-3-clause | 2,484,851,219,268,113,000 | 43.442188 | 135 | 0.589143 | false |
ofafa/pynotes | 2015.4.14.forum.py | 1 | 12428 | class panel:
def 瞿志豪(Michel):
Natsdaq
def vicent:
b84 華楊、悠遊卡董事
def Bruce:
EMBA95 矽谷創業,開曼控股
def 司徒嘉恆(Seetoo):
b84,台大經濟->國外法律,創拓法律事務
新創集資等等,到IPO或是併購階段從後端來探討
web 1.0時代,大學畢業做網路媒體,失敗跟當時的法律因素也有關
def 詹德弘(Joseph):
b80, Giga media,華威、之初、台式架構/美式架構,大小都有看過
前幾年把舊的東西拆掉,新的東西還沒有起來
def ???:
共同的問題:法律環境為什麼如此不友善?
往往最後要到開曼去
協助中小企業處改善經營的法律面問題
中小企業處法規調適會
不是不能,而是不為
def 羅智光律師:
not here yet
def panel:
Jack:太陽花->青年顧問->理解政府,政府應該建構生態系
prob1:
技術股?是否課稅?
選擇權:彈性不足
特別股:無法議定股票上面的權利,對投資人來說沒有辦法保障權益,長遠來看並不好
可轉換公司債:在雙方對於估值沒有共識的情況之下無法採用,在未上市的情況之下無法發行
股票面額:目前已經開放到一塊錢,但是否足夠彈性有待討論
閉鎖型公司(無形資產之鑑價):公開發行之前是屬於創業團隊跟投資人之間的,應該可以做適度的放寬
Mr. 瞿:
証所稅的修法把台灣的門都關起來了
新創公司的valuation,過去台灣有一套自己的土規則:賣舊股換新股,把賣得的價格來認公司的新股
自從証所稅開徵之後這條路就關掉了,所以考慮改成用stock option等方式突圍
改制之後很多作法跟原來舊有的現行法可能有衝突
問題壞在行政機關、經濟部跟金管會把很多東西鎖的很死
特別股在公司法裡面其實沒有限制,理論上可以在現行公司法的架構下面做特別股的約定
但是經濟部方面就有很多限制,例如特別股不能一股轉成多普通股,轉換的時候常常會被行政單位打槍
公司法對於stock option其實沒有太多限制,但是一家公司在公開發行的時候會有很多主管機關,例如stock option不能發給顧問、外部律師、partime員工等等,如果不能把選擇權作為一種資源的話很難leverage
金管會還規定stock發行單次不能超過10%,總額不能超過15%,造成很大的限制
其實有很多walk around,但是實在是浪費太多時間了
可轉換債:也是法律面沒有規定,大多是受限行政命令
conclustion:其實只要主管機關放寬行政命令的限制
Vicent:
曾經送上去,可轉換特別股的部分,原先送的時候有過,但真的要做的時候就被打槍了
盈餘轉??
運用民法做約定,可以做一些處理
上市規定有限制資本額,造成在資本小但市值大的公司難以上市
IPO虧損,只能當年認列???完全不懂阿阿阿阿
Bruce:
台美的差異:工業時代vs資訊時代
1)股票:有價證券==土地、廠房...etc,資金具有票面價值,可以做有形資產的直接轉換
資訊時代:證券==價值的載具,找第一筆投資是由創業者跟投資方雙方認定
pre-money vs post-money:prototype <-> pre-money;
knowledge worker:沒有有形的資產,耗的是腦袋,需要拜託投資人用溢價的時候來談,300w->20元/股
造成創業者被資本市場壓著打,經過一兩輪之後可能只剩下10~20%
2)conclusion:拋棄工業時代的思維,才能夠接上資訊時代
Mr. 司徒:
MBA地下網路雜誌,第一個把寫NBA review放到網路上(1997),大三的時候,做了幾年流量變高,開始吸引注意
1999-2000年的時代,公司法128條規定股份有限公司需要七名發起人,以及最低實收資本額的限制
驗資的規定都是騙人的,只要湊前過來讓會計師看一眼出個報告就可以了
當時要湊一百萬很難,造成沒有很快的機制可以弄出一個法人的身分去跟投資人談
最後結局就變成以員工的身分加入別人的網路公司
paper company: guys look, we can really create something!
出技術的人是外國的人,把東西賣出來弄到台灣,但是因為外人投資條例,需要會計師審核營運計畫等等
所有來找台灣的新創公司要做技術作價的幾乎結果都是不可能,最後變成要先從資金來做兩層,出技術的人需要先把技術賣掉,在用賣得的錢來買股權,這樣就是兩層稅...
閉鎖型公司:保留再談
Mr.詹:
實收資本額->保障債權人、保障交易安全的概念,會導致後面的部份卡住(資本三原則的堅持)
法規的瑣碎:想到什麼就寫在公司法裡面,小公司的問題都沒有被考慮到
favor外國人:對外國人比較好,例如公司法規定外資持股超越40%可以不適用對於技術股的限制
Mr.??:
公司法瑣碎?應該是很多原則性的規定,依照憲法的法律保留原則,「法律沒規定的你就不能做」
行政機關到底保護誰?政府把消費者當笨蛋、把創業者當笨蛋
公司自治?Shareholder agreement->直接透過章程進行自治
股份可能有屬人主義,持股超過一定比例就需要限制部分行為
股份本來就不是完全客觀的形態,會因為公司的形態跟產業而有不同
消除債跟股之間的差異
高鐵的特別股就是債!
公司法只管形式,不管實質
虧損的公司就不能上市,但科技公司看的是發展
政府認為公司虧損,所以你不能夠投資?什麼?
資本三原則:是個歷史包袱,資本一下去就是一個歷史的存在了XDDD
羅律師:
10多年前就接手公司法的修正計畫、亞太營運中心、國發會的創新創業法規,都試著在公部門調整
企併法->與公司法有衝突,只能用折衷的方式漸進式的去推
10幾年來公司法的演進,成果相當有限,只能walk around來處理
最大的欺騙就是公司的實收資本額
資產的狀況跟資產的淨值才是真的,實收資本額是一個欺騙大眾的方式
對於無知的人應該要教育他,而不是保護他
公司法是一個競爭市場,你不競爭的話就是被別人超越
公司限制多的話相對就是執行比較簡單,人頭股東的事情主管機關其實也都知道,但是現況就是需要繞路
黃金股->股權不等值,把黃金股留給技術團隊,目前在國發會推動的進展還是很遲緩
邵老師:
walk around不一定會成功,耗費很多精力、時間跟想像力
驗資有很大的爭議,但我們要花一節課來教,其他國家不用!
YC-> safe simple agreemet of xx ecoty
基本的地基怎麼樣跟別人蓋的一樣
改革動能都是一步一步被消耗掉,現在在跑道上的人剩很少了
台灣對外資很友善,對外資幾乎都沒有改善
panel:
對大部分人來說是不公平的,台灣的環境讓創業者跟法律專家暴露在很高的風險上,你知道怎麼去香港仲裁嗎?
台灣資本市場在過去是世界知名的,對small cap有利
前面這條路斷掉之後,很多大所就不少生意可以做
Bruce:
法律的設計是防弊重於興利,1999年的時候掌管法律的人自己開後門去鑽
硬要把工業時代的制度改成資訊時代的制度,可能會造成很多漏洞
從體制外去想這件事情,台灣跟英國一樣有很多離島,可以做做看阿XDDDDDD
Vicent:
稅的問題XDDD
台北市就沒稅收了
最後就會變成折衷的方案,而不是最好的方案
openration跟公司的設立分開
詹S:
函釋:專家打臉->專家是誰?不跟你講XDDD
司徒S:
積習不改才是真正的問題
五年之中已經修過三次公司法,不動就是不動
法律如果沒有可預測性跟安定性,會容易造成混亂,因此需要設定自治的空間
要想辦法脫離歷史的包袱
羅S:
157條,有時候函釋會超越法律
改的東西往往都不是實務上需要的東西,改的非常不到位
併購:以有限公司與股份有限公司為限->以具有法人資格的公司為限,這樣就好了阿!!!
要修就可以亂修XDDD
ex.加薪四法<->牴觸了股東是owner的概念
QA:
Q:這個國家不想要欠稅收,但是新創公司需要的是活下去
要替新創公司開路,應該要撇除以往大公司的案例
A:中小企業處已經在做改正,拿到股份就要開始計算價值,當他在第一次交易獲利的時候再獲利所得來課
A:投資方承擔不對等的風險,因為架構沒有彈性因此造成資本方跟創業家對立,清算、減資這些手段對投資方不利
A:RSU取代option的趨勢,等不及修法啦XDDD ,最直接的作法就是直接切一個新的出來
A:制度跟國際的遊戲規則跟國外差太多,他們會覺得台灣的公司太麻煩,就算想要投還會加上一個regulational discount
A:為了上市可能需要做增資,因為修法時程很長,可以先用外部措施開一道口,然後再逐步修
A:台灣根本就沒有證券市場應該有的infrastructure,公司自治是你要依照我的概念來運作
'台灣的公司自治就是你要依照我的規則運作'
公司代號5266,還要做公司規則檢查表XDDD,這個也不行,那個也不行...
A:對經濟罪犯的處罰非常輕,是個掏空的天堂,03-04突然取消了超過兩億的公司要公發的限制,所以沒有人知道公絲的運作到底在幹嘛,結果CEO把廠房壓了,把錢匯出去境外,號稱是要備料,但軟體哪裡需要備料
「強制公開發行」這件事情,實收資本額永遠都是一億xxxx萬,不會超過兩億->尊重公司自治,取消強制
資本三原則跟面額十元整個就是很荒謬,當時寫法律的人對於cooperation finance完全沒有概念
是行政官員還是立法委員在治國?一堆規定都是在細則,公司法反而是非常空泛的框架
閉鎖型公司很像是宅男公司的樣子
自經區->一國兩制的試驗區
這部公司法在那邊危害蠻大的
法治上需要破壞性創新才有可能突圍
公司法是基本法,定不好就會擾亂其他應用,基礎要動的更頻繁,才能跟上時代!
上市之後政府居然沒有監督、管制,會計師可以做的事情太多了,只要花錢就什麼都可以做
為什麼不能有公派的稽核,並且要rotate,看到台灣備掏空的金額都會覺得很不可思議
驗資在90年以前都是以政府來管,後來發現人力不夠就讓會計師事務所來處理
公司在薩摩亞,趕快搬走XDD
top down的改變,需要visionary的引導
行政權過大的問題
股權式crowd founding->一堆不可能
| cc0-1.0 | -4,045,274,517,115,713,000 | 33.045977 | 125 | 0.626772 | false |
garibaldu/boundary-seekers | Boundary Hunter Ideas/TensorFlow/RBF-BH-Network.py | 1 | 6690 | import tensorflow as tf
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import random
import math
np.random.seed(1234)
random.seed(1234)
plt.switch_backend("TkAgg")
def plotScatter(points, color):
xs = [x[0] for x in points]
ys = [y[1] for y in points]
plt.scatter(xs, ys, c=color)
def plot_weights(weights, center, color):
plot_centroid(center)
n = np.array([weights[0] * center[0] + weights[1] * center[1],
-weights[0],
-weights[1]])
byas = -1 * n[0]/n[2]
Xcoef = -1 * n[1]/n[2]
plt.plot([-1.0, 1.0], [-1*Xcoef + byas, Xcoef + byas], '{}-'.format(color))
print("B: " + str(byas))
print("XCoef: " + str(Xcoef))
def plot_centroid(centroid):
plt.plot(centroid[0], centroid[1], markersize=10, marker='x', color='g', mew=5)
def plot_incorrect(point):
plt.plot(point[0], point[1], markersize=5, marker='x', color='r', mew=5)
def generateChevronData():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if x >= y and x <= -y:
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def generate_split_data():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if x < 25 and x > -25 :
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def generate_clumps():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, int(totalPoints/2.0)):
x = random.randint(xBounds[0], 0)
y = random.randint(yBounds[0], 0)
if -x - 30 < y:
points.append([x/50.0,y/50.0])
targets.append(1.0)
else:
points.append([x/50.0,y/50.0])
targets.append(0.0)
for i in range(0, int(totalPoints/2.0)):
x = random.randint(0, xBounds[1])
y = random.randint(0, yBounds[1])
if -x + 30 > y:
points.append([x/50.0,y/50.0])
targets.append(1.0)
else:
points.append([x/50.0,y/50.0])
targets.append(0.0)
return np.array(points), np.array(targets)
def generate_rectangle_data():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if np.abs(x) < 15 and np.abs(y) < 15 :
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def sigmoid(phi):
return 1.0/(1.0 + tf.exp(-phi))
points, out = generate_rectangle_data()#generateChevronData()#generate_clumps()#generate_split_data()#
in_size = 2
out_size = 1
num_centroids = 1
num_outputs = 1
inputs = tf.placeholder('float64', [in_size])
targets = tf.placeholder('float64', [out_size])
centroids = tf.Variable(np.random.uniform(low=-1.0, high=1.0, size=(num_centroids, in_size)))
betas = tf.Variable(np.repeat(1.0, num_centroids))
hidden_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(num_centroids, in_size)))
output_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(num_outputs, num_centroids + 1)))
input_by_plane = lambda x: tf.subtract(inputs, x)
transformed_by_points = tf.map_fn(input_by_plane, centroids)
# Peform Computation
prob = tf.reduce_sum(tf.multiply(transformed_by_points, hidden_weights), 1)
square_diff = lambda c: tf.reduce_sum(tf.pow(tf.subtract(inputs, c), 2.0))
g = tf.exp(-1.0 * tf.multiply(betas, tf.map_fn(square_diff, centroids)))
hidden_out = sigmoid(tf.multiply(g, prob))#tf.add(0.5 * (1 - g), tf.multiply(g, prob))
#gated = tf.multiply(g, prob)
#hidden_out = sigmoid(gated)
hidden_out_prime = tf.concat([[1.0], hidden_out], 0)
output = sigmoid(tf.matmul(tf.transpose(tf.expand_dims(hidden_out_prime, 1)), tf.transpose(output_weights)))
errors = tf.pow(tf.subtract(tf.expand_dims(targets, 1), output), 2.0)
error = tf.reduce_sum(errors)
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(error)
clip_op_betas = tf.assign(betas, tf.clip_by_value(betas, 0, np.infty))
model = tf.global_variables_initializer()
with tf.Session() as session:
session.run(model)
for e in range(10000):
for d in range(len(points)):
session.run(train_op, feed_dict={inputs: points[d], targets: [out[d]]})
session.run(clip_op_betas)
if e % 10 == 0:
err = 0
for d in range(len(points)):
err += session.run(error, feed_dict={inputs: points[d], targets: [out[d]]})
#print(session.run(prob, feed_dict={inputs: points[d], targets: [out[d]]}))
#print(session.run(g, feed_dict={inputs: points[d], targets: [out[d]]}))
print(err)
print(session.run(betas))
incorrect = []
for d in range(len(points)):
o = session.run(output, feed_dict={inputs: points[d], targets: [out[d]]})
if not int(round(o[0,0])) == out[d]:
incorrect.append(points[d])
centroids = session.run(centroids)
betas = session.run(betas)
boundarys = session.run(hidden_weights)
# Plot points on graph
c1 = []
c2 = []
for i in range(0, len(points)):
if out[i] == 0:
c1.append(points[i])
else:
c2.append(points[i])
print("Type 0: ", len(c1))
print("Type 1: ", len(c2))
plotScatter(c1,'y')
plotScatter(c2, 'b')
for centroid in centroids:
plot_centroid(centroid)
for i in range(len(boundarys)):
plot_weights(boundarys[i], centroids[i], 'g')
#for plane in boundarys:
# plot_weights(boundarys, 'g')
for point in incorrect:
plot_incorrect(point)
#plot_weights(final_gate, 'g')
plt.gca().set_aspect('equal')
plt.xlim(xmin=-1.5, xmax=1.5)
plt.ylim(ymin=-1.5, ymax=1.5)
plt.show()
| mit | -5,844,633,788,650,325,000 | 26.991632 | 108 | 0.582511 | false |
hamdyaea/Daylight-GNU-Linux | Version4/usr/share/daylight/PySpeaking-GUI/speak.py | 1 | 1687 | # Developer : Hamdy Abou El Anein
import os
import sys
from easygui import *
print("IMPORTANT\n\nThis software work only if google_speech is installed on the system. To install it go to this link please : https://pypi.python.org/pypi/google_speech/\n\n")
def language():
global lang
msg = "What's the language do you want to make PySpeaking speak ?"
title = "PySpeaking-GUI"
choices = ["English", "French", "German", "Spanish","Japanese","Chinese","Italian","Arabic", "Russian"]
choice = choicebox(msg, title, choices)
if choice == "English":
lang = ' en '
textToSpeak()
elif choice == "French":
lang = ' fr '
textToSpeak()
elif choice == "German":
lang = ' de '
textToSpeak()
elif choice == "Spanish":
lang = ' es '
textToSpeak()
elif choice == "Japanese":
lang = ' ja '
textToSpeak()
elif choice == "Chinese":
lang = ' zh-CN '
textToSpeak()
elif choice == "Italian":
lang = ' it '
textToSpeak()
elif choice == "Arabic":
lang = ' ar '
textToSpeak()
elif choice == "Russian":
lang = ' ru '
textToSpeak()
else:
sys.exit(0)
def textToSpeak():
global fieldValues
msg = "Enter the text to speak"
title = "Enter the text to speak"
fieldNames = ["Text to speak"]
fieldValues = []
fieldValues = multenterbox(msg, title, fieldNames)
fieldValues[0]
speak()
def speak():
global lang, fieldValues
textValue = "google_speech -l" +str(lang) +str(" \"")+str(fieldValues[0].replace("'","\'"))+str("\"")
os.system(textValue)
language() | gpl-3.0 | 3,731,881,394,284,948,500 | 24.575758 | 177 | 0.578542 | false |
dubvulture/pyku | pyku/digit_classifier.py | 1 | 4575 | # coding=utf-8
import os
import cv2
import numpy as np
from .utils import DSIZE, TRAIN_DATA
class DigitClassifier(object):
@staticmethod
def _feature(image):
"""
It's faster but still accurate enough with DSIZE = 14.
~0.9983 precision and recall
:param image:
:return: raw pixels as feature vector
"""
image = cv2.resize(image, None, fx=DSIZE/28, fy=DSIZE/28,
interpolation=cv2.INTER_LINEAR)
ret = image.astype(np.float32) / 255
return ret.ravel()
@staticmethod
def _zoning(image):
"""
It works better with DSIZE = 28
~0.9967 precision and recall
:param image:
:return: #pixels/area ratio of each zone (7x7) as feature vector
"""
zones = []
for i in range(0, 28, 7):
for j in range(0, 28, 7):
roi = image[i:i+7, j:j+7]
val = (np.sum(roi)/255) / 49.
zones.append(val)
return np.array(zones, np.float32)
def __init__(self,
saved_model=None,
train_folder=None,
feature=_feature.__func__):
"""
:param saved_model: optional saved train set and labels as .npz
:param train_folder: optional custom train data to process
:param feature: feature function - compatible with saved_model
"""
self.feature = feature
if train_folder is not None:
self.train_set, self.train_labels, self.model = \
self.create_model(train_folder)
else:
if cv2.__version__[0] == '2':
self.model = cv2.KNearest()
else:
self.model = cv2.ml.KNearest_create()
if saved_model is None:
saved_model = TRAIN_DATA+'raw_pixel_data.npz'
with np.load(saved_model) as data:
self.train_set = data['train_set']
self.train_labels = data['train_labels']
if cv2.__version__[0] == '2':
self.model.train(self.train_set, self.train_labels)
else:
self.model.train(self.train_set, cv2.ml.ROW_SAMPLE,
self.train_labels)
def create_model(self, train_folder):
"""
Return the training set, its labels and the trained model
:param train_folder: folder where to retrieve data
:return: (train_set, train_labels, trained_model)
"""
digits = []
labels = []
for n in range(1, 10):
folder = train_folder + str(n)
samples = [pic for pic in os.listdir(folder)
if os.path.isfile(os.path.join(folder, pic))]
for sample in samples:
image = cv2.imread(os.path.join(folder, sample))
# Expecting black on white
image = 255 - cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, image = cv2.threshold(image, 0, 255,
cv2.THRESH_BINARY + cv2.THRESH_OTSU)
feat = self.feature(image)
digits.append(feat)
labels.append(n)
digits = np.array(digits, np.float32)
labels = np.array(labels, np.float32)
if cv2.__version__[0] == '2':
model = cv2.KNearest()
model.train(digits, labels)
else:
model = cv2.ml.KNearest_create()
model.train(digits, cv2.ml.ROW_SAMPLE, labels)
return digits, labels, model
def classify(self, image):
"""
Given a 28x28 image, returns an array representing the 2 highest
probable prediction
:param image:
:return: array of 2 highest prob-digit tuples
"""
if cv2.__version__[0] == '2':
res = self.model.find_nearest(np.array([self.feature(image)]), k=11)
else:
res = self.model.findNearest(np.array([self.feature(image)]), k=11)
hist = np.histogram(res[2], bins=9, range=(1, 10), normed=True)[0]
zipped = sorted(zip(hist, np.arange(1, 10)), reverse=True)
return np.array(zipped[:2])
def save_training(self, filename):
"""
Save traning set and labels of current model
:param filename: filename of new data.npz, it will be saved in 'train/'
"""
np.savez(os.path.join(TRAIN_DATA, filename),
train_set=self.train_set,
train_labels=self.train_labels) | gpl-3.0 | -8,652,264,273,431,171,000 | 35.903226 | 80 | 0.532022 | false |
jorgecarleitao/pt_law_parser | tests/test_full_doc.py | 1 | 3393 | from tests.test_basic import TestDocument
class TestDocStart(TestDocument):
"""
Test case intended to show that summary pages of a document are correctly
ignored.
"""
def test_107190_page_1(self):
file_name = 'tests/samples/107190.pdf'
self._run_test(file_name, [0])
self.assertEqual(0, len(self.device.titles))
self.assertEqual(0, len(self.device.result))
def test_113604_page_1(self):
file_name = 'tests/samples/113604.pdf'
self._run_test(file_name, [0])
self.assertEqual(0, len(self.device.titles))
self.assertEqual(0, len(self.device.result))
def test_113604_page_2(self):
file_name = 'tests/samples/113604.pdf'
self._run_test(file_name, [1])
self.assertEqual(0, len(self.device.titles))
self.assertEqual(0, len(self.device.result))
def test_130252(self):
file_name = 'tests/samples/130252.pdf'
self._run_test(file_name, [0])
self.assertEqual(self.device.result, [])
def test_131287(self):
file_name = 'tests/samples/131287.pdf'
self._run_test(file_name, [0])
self.assertEqual(self.device.result, [])
def test_131371(self):
file_name = 'tests/samples/131371.pdf'
self._run_test(file_name, [0, 1])
self.assertEqual(self.device.result, [])
def test_131783(self):
file_name = 'tests/samples/131783.pdf'
self._run_test(file_name, [0])
self.assertEqual(self.device.result, [])
def test_131869(self):
file_name = 'tests/samples/131869.pdf'
self._run_test(file_name, [0])
self.assertEqual(self.device.result, [])
def test_137070(self):
"""
Document from 2014.
"""
file_name = 'tests/samples/137070.pdf'
self._run_test(file_name, [0, 1, 2, 3])
self.assertEqual(self.device.result, [])
class TestDocEnd(TestDocument):
"""
Tests that we correctly ignore the content of the last pages.
"""
def test_130252(self):
file_name = 'tests/samples/130252.pdf'
self._run_test(file_name, [18, 19])
self.assertEqual(u'<p>O presente',
self.device.result[-1].as_html()[:13])
def test_131783(self):
file_name = 'tests/samples/131783.pdf'
self._run_test(file_name, [3])
# the contains only 4 paragraphs.
self.assertEqual(4, len(self.device.result))
def test_133880(self):
file_name = 'tests/samples/133880.pdf'
self._run_test(file_name, [15])
self.assertEqual(0, len(self.device.titles))
self.assertTrue(self.device.result[-1].as_html().startswith('<table>'))
def test_107190_empty_page(self):
file_name = 'tests/samples/107190.pdf'
self._run_test(file_name, [14])
self.assertEqual(0, len(self.device.titles))
self.assertEqual(0, len(self.device.result))
def test_107190(self):
file_name = 'tests/samples/107190.pdf'
self._run_test(file_name, [15])
self.assertEqual(0, len(self.device.titles))
self.assertEqual(0, len(self.device.result))
def test_137056(self):
file_name = 'tests/samples/137056.pdf'
self._run_test(file_name, [5])
self.assertEqual(0, len(self.device.titles))
self.assertEqual(0, len(self.device.result))
| mit | 585,957,194,112,291,200 | 28.763158 | 79 | 0.605659 | false |
darrencheng0817/AlgorithmLearning | Python/leetcode/MergeKLists.py | 1 | 1442 |
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
from heapq import heappush, heappop, heapreplace, heapify
h=[]
res=ListNode(0)
p=res
h = [(n.val, n) for n in lists if n]
heapify(h)
while h:
value,minNode=h[0]
p.next=minNode
if not minNode.next:
heappop(h)
else:
heapreplace(h,(minNode.next.val,minNode.next))
p=p.next
return res.next
def mergeKLists2(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
from heapq import heappush, heappop, heapreplace, heapify
h=[]
res=ListNode(0)
p=res
for n in lists:
if n:
h.append((n.val,n))
heapify(h)
while h:
value,minNode=heappop(h)
p.next=minNode
if minNode.next:
heappush(h, (minNode.next.val,minNode.next))
p=p.next
return res.next
so=Solution()
l1=ListNode(3)
l1.next=ListNode(5)
l1.next.next=ListNode(6)
l2=ListNode(7)
l2.next=ListNode(9)
input=[l1,l2]
res=so.mergeKLists2(input)
while res:
print(res.val)
res=res.next
| mit | -99,459,296,128,431,540 | 22.639344 | 65 | 0.511789 | false |
pkill-nine/qutebrowser | tests/unit/completion/test_models.py | 1 | 24554 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2017 Ryan Roden-Corrent (rcorre) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for completion models."""
import collections
from datetime import datetime
import pytest
from PyQt5.QtCore import QUrl
from qutebrowser.completion.models import miscmodels, urlmodel, configmodel
from qutebrowser.config import sections, value
from qutebrowser.utils import objreg
from qutebrowser.browser import history
def _check_completions(model, expected):
"""Check that a model contains the expected items in any order.
Args:
expected: A dict of form
{
CategoryName: [(name, desc, misc), ...],
CategoryName: [(name, desc, misc), ...],
...
}
"""
actual = {}
assert model.rowCount() == len(expected)
for i in range(0, model.rowCount()):
catidx = model.index(i, 0)
catname = model.data(catidx)
actual[catname] = []
for j in range(model.rowCount(catidx)):
name = model.data(model.index(j, 0, parent=catidx))
desc = model.data(model.index(j, 1, parent=catidx))
misc = model.data(model.index(j, 2, parent=catidx))
actual[catname].append((name, desc, misc))
assert actual == expected
# sanity-check the column_widths
assert len(model.column_widths) == 3
assert sum(model.column_widths) == 100
def _patch_cmdutils(monkeypatch, stubs, symbol):
"""Patch the cmdutils module to provide fake commands."""
cmd_utils = stubs.FakeCmdUtils({
'stop': stubs.FakeCommand(name='stop', desc='stop qutebrowser'),
'drop': stubs.FakeCommand(name='drop', desc='drop all user data'),
'roll': stubs.FakeCommand(name='roll', desc='never gonna give you up'),
'hide': stubs.FakeCommand(name='hide', hide=True),
'depr': stubs.FakeCommand(name='depr', deprecated=True),
})
monkeypatch.setattr(symbol, cmd_utils)
def _patch_configdata(monkeypatch, stubs, symbol):
"""Patch the configdata module to provide fake data."""
data = collections.OrderedDict([
('general', sections.KeyValue(
('time',
value.SettingValue(stubs.FakeConfigType('fast', 'slow'),
default='slow'),
'Is an illusion.\n\nLunchtime doubly so.'),
('volume',
value.SettingValue(stubs.FakeConfigType('0', '11'),
default='11'),
'Goes to 11'))),
('ui', sections.KeyValue(
('gesture',
value.SettingValue(stubs.FakeConfigType(('on', 'off')),
default='off'),
'Waggle your hands to control qutebrowser'),
('mind',
value.SettingValue(stubs.FakeConfigType(('on', 'off')),
default='off'),
'Enable mind-control ui (experimental)'),
('voice',
value.SettingValue(stubs.FakeConfigType(('on', 'off')),
default='off'),
'Whether to respond to voice commands'))),
('searchengines', sections.ValueList(
stubs.FakeConfigType(), stubs.FakeConfigType(),
('DEFAULT', 'https://duckduckgo.com/?q={}'),
)),
])
monkeypatch.setattr(symbol, data)
def _patch_config_section_desc(monkeypatch, stubs, symbol):
"""Patch the configdata module to provide fake SECTION_DESC."""
section_desc = {
'general': 'General/miscellaneous options.',
'ui': 'General options related to the user interface.',
'searchengines': 'Definitions of search engines ...',
}
monkeypatch.setattr(symbol, section_desc)
@pytest.fixture
def quickmarks(quickmark_manager_stub):
"""Pre-populate the quickmark-manager stub with some quickmarks."""
quickmark_manager_stub.marks = collections.OrderedDict([
('aw', 'https://wiki.archlinux.org'),
('wiki', 'https://wikipedia.org'),
('ddg', 'https://duckduckgo.com'),
])
return quickmark_manager_stub
@pytest.fixture
def bookmarks(bookmark_manager_stub):
"""Pre-populate the bookmark-manager stub with some quickmarks."""
bookmark_manager_stub.marks = collections.OrderedDict([
('https://github.com', 'GitHub'),
('https://python.org', 'Welcome to Python.org'),
('http://qutebrowser.org', 'qutebrowser | qutebrowser'),
])
return bookmark_manager_stub
@pytest.fixture
def web_history(init_sql, stubs, config_stub):
"""Fixture which provides a web-history object."""
config_stub.data['completion'] = {'timestamp-format': '%Y-%m-%d',
'web-history-max-items': -1}
stub = history.WebHistory()
objreg.register('web-history', stub)
yield stub
objreg.delete('web-history')
@pytest.fixture
def web_history_populated(web_history):
"""Pre-populate the web-history database."""
web_history.add_url(
url=QUrl('http://qutebrowser.org'),
title='qutebrowser',
atime=datetime(2015, 9, 5).timestamp()
)
web_history.add_url(
url=QUrl('https://python.org'),
title='Welcome to Python.org',
atime=datetime(2016, 3, 8).timestamp()
)
web_history.add_url(
url=QUrl('https://github.com'),
title='https://github.com',
atime=datetime(2016, 5, 1).timestamp()
)
return web_history
def test_command_completion(qtmodeltester, monkeypatch, stubs, config_stub,
key_config_stub):
"""Test the results of command completion.
Validates that:
- only non-hidden and non-deprecated commands are included
- the command description is shown in the desc column
- the binding (if any) is shown in the misc column
- aliases are included
"""
_patch_cmdutils(monkeypatch, stubs,
'qutebrowser.completion.models.miscmodels.cmdutils')
config_stub.data['aliases'] = {'rock': 'roll'}
key_config_stub.set_bindings_for('normal', {'s': 'stop',
'rr': 'roll',
'ro': 'rock'})
model = miscmodels.command()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
_check_completions(model, {
"Commands": [
('drop', 'drop all user data', ''),
('rock', "Alias for 'roll'", 'ro'),
('roll', 'never gonna give you up', 'rr'),
('stop', 'stop qutebrowser', 's'),
]
})
def test_help_completion(qtmodeltester, monkeypatch, stubs, key_config_stub):
"""Test the results of command completion.
Validates that:
- only non-deprecated commands are included
- the command description is shown in the desc column
- the binding (if any) is shown in the misc column
- aliases are included
- only the first line of a multiline description is shown
"""
module = 'qutebrowser.completion.models.miscmodels'
key_config_stub.set_bindings_for('normal', {'s': 'stop', 'rr': 'roll'})
_patch_cmdutils(monkeypatch, stubs, module + '.cmdutils')
_patch_configdata(monkeypatch, stubs, module + '.configdata.DATA')
model = miscmodels.helptopic()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
_check_completions(model, {
"Commands": [
(':drop', 'drop all user data', ''),
(':hide', '', ''),
(':roll', 'never gonna give you up', 'rr'),
(':stop', 'stop qutebrowser', 's'),
],
"Settings": [
('general->time', 'Is an illusion.', None),
('general->volume', 'Goes to 11', None),
('searchengines->DEFAULT', '', None),
('ui->gesture', 'Waggle your hands to control qutebrowser', None),
('ui->mind', 'Enable mind-control ui (experimental)', None),
('ui->voice', 'Whether to respond to voice commands', None),
]
})
def test_quickmark_completion(qtmodeltester, quickmarks):
"""Test the results of quickmark completion."""
model = miscmodels.quickmark()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
_check_completions(model, {
"Quickmarks": [
('aw', 'https://wiki.archlinux.org', None),
('wiki', 'https://wikipedia.org', None),
('ddg', 'https://duckduckgo.com', None),
]
})
@pytest.mark.parametrize('row, removed', [
(0, 'aw'),
(1, 'wiki'),
(2, 'ddg'),
])
def test_quickmark_completion_delete(qtmodeltester, quickmarks, row, removed):
"""Test deleting a quickmark from the quickmark completion model."""
model = miscmodels.quickmark()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
parent = model.index(0, 0)
idx = model.index(row, 0, parent)
before = set(quickmarks.marks.keys())
model.delete_cur_item(idx)
after = set(quickmarks.marks.keys())
assert before.difference(after) == {removed}
def test_bookmark_completion(qtmodeltester, bookmarks):
"""Test the results of bookmark completion."""
model = miscmodels.bookmark()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
_check_completions(model, {
"Bookmarks": [
('https://github.com', 'GitHub', None),
('https://python.org', 'Welcome to Python.org', None),
('http://qutebrowser.org', 'qutebrowser | qutebrowser', None),
]
})
@pytest.mark.parametrize('row, removed', [
(0, 'https://github.com'),
(1, 'https://python.org'),
(2, 'http://qutebrowser.org'),
])
def test_bookmark_completion_delete(qtmodeltester, bookmarks, row, removed):
"""Test deleting a quickmark from the quickmark completion model."""
model = miscmodels.bookmark()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
parent = model.index(0, 0)
idx = model.index(row, 0, parent)
before = set(bookmarks.marks.keys())
model.delete_cur_item(idx)
after = set(bookmarks.marks.keys())
assert before.difference(after) == {removed}
def test_url_completion(qtmodeltester, web_history_populated,
quickmarks, bookmarks):
"""Test the results of url completion.
Verify that:
- quickmarks, bookmarks, and urls are included
- entries are sorted by access time
- only the most recent entry is included for each url
"""
model = urlmodel.url()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
_check_completions(model, {
"Quickmarks": [
('https://wiki.archlinux.org', 'aw', None),
('https://wikipedia.org', 'wiki', None),
('https://duckduckgo.com', 'ddg', None),
],
"Bookmarks": [
('https://github.com', 'GitHub', None),
('https://python.org', 'Welcome to Python.org', None),
('http://qutebrowser.org', 'qutebrowser | qutebrowser', None),
],
"History": [
('https://github.com', 'https://github.com', '2016-05-01'),
('https://python.org', 'Welcome to Python.org', '2016-03-08'),
('http://qutebrowser.org', 'qutebrowser', '2015-09-05'),
],
})
@pytest.mark.parametrize('url, title, pattern, rowcount', [
('example.com', 'Site Title', '', 1),
('example.com', 'Site Title', 'ex', 1),
('example.com', 'Site Title', 'am', 1),
('example.com', 'Site Title', 'com', 1),
('example.com', 'Site Title', 'ex com', 1),
('example.com', 'Site Title', 'com ex', 0),
('example.com', 'Site Title', 'ex foo', 0),
('example.com', 'Site Title', 'foo com', 0),
('example.com', 'Site Title', 'exm', 0),
('example.com', 'Site Title', 'Si Ti', 1),
('example.com', 'Site Title', 'Ti Si', 0),
('example.com', '', 'foo', 0),
('foo_bar', '', '_', 1),
('foobar', '', '_', 0),
('foo%bar', '', '%', 1),
('foobar', '', '%', 0),
])
def test_url_completion_pattern(web_history, quickmark_manager_stub,
bookmark_manager_stub, url, title, pattern,
rowcount):
"""Test that url completion filters by url and title."""
web_history.add_url(QUrl(url), title)
model = urlmodel.url()
model.set_pattern(pattern)
# 2, 0 is History
assert model.rowCount(model.index(2, 0)) == rowcount
def test_url_completion_delete_bookmark(qtmodeltester, bookmarks,
web_history, quickmarks):
"""Test deleting a bookmark from the url completion model."""
model = urlmodel.url()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
parent = model.index(1, 0)
idx = model.index(1, 0, parent)
# sanity checks
assert model.data(parent) == "Bookmarks"
assert model.data(idx) == 'https://python.org'
assert 'https://github.com' in bookmarks.marks
len_before = len(bookmarks.marks)
model.delete_cur_item(idx)
assert 'https://python.org' not in bookmarks.marks
assert len_before == len(bookmarks.marks) + 1
def test_url_completion_delete_quickmark(qtmodeltester,
quickmarks, web_history, bookmarks,
qtbot):
"""Test deleting a bookmark from the url completion model."""
model = urlmodel.url()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
parent = model.index(0, 0)
idx = model.index(0, 0, parent)
# sanity checks
assert model.data(parent) == "Quickmarks"
assert model.data(idx) == 'https://wiki.archlinux.org'
assert 'ddg' in quickmarks.marks
len_before = len(quickmarks.marks)
model.delete_cur_item(idx)
assert 'aw' not in quickmarks.marks
assert len_before == len(quickmarks.marks) + 1
def test_url_completion_delete_history(qtmodeltester,
web_history_populated,
quickmarks, bookmarks):
"""Test deleting a history entry."""
model = urlmodel.url()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
parent = model.index(2, 0)
idx = model.index(1, 0, parent)
# sanity checks
assert model.data(parent) == "History"
assert model.data(idx) == 'https://python.org'
assert 'https://python.org' in web_history_populated
model.delete_cur_item(idx)
assert 'https://python.org' not in web_history_populated
def test_url_completion_zero_limit(config_stub, web_history, quickmarks,
bookmarks):
"""Make sure there's no history if the limit was set to zero."""
config_stub.data['completion']['web-history-max-items'] = 0
model = urlmodel.url()
model.set_pattern('')
category = model.index(2, 0) # "History" normally
assert model.data(category) is None
def test_session_completion(qtmodeltester, session_manager_stub):
session_manager_stub.sessions = ['default', '1', '2']
model = miscmodels.session()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
_check_completions(model, {
"Sessions": [('default', None, None),
('1', None, None),
('2', None, None)]
})
def test_tab_completion(qtmodeltester, fake_web_tab, app_stub, win_registry,
tabbed_browser_stubs):
tabbed_browser_stubs[0].tabs = [
fake_web_tab(QUrl('https://github.com'), 'GitHub', 0),
fake_web_tab(QUrl('https://wikipedia.org'), 'Wikipedia', 1),
fake_web_tab(QUrl('https://duckduckgo.com'), 'DuckDuckGo', 2),
]
tabbed_browser_stubs[1].tabs = [
fake_web_tab(QUrl('https://wiki.archlinux.org'), 'ArchWiki', 0),
]
model = miscmodels.buffer()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
_check_completions(model, {
'0': [
('0/1', 'https://github.com', 'GitHub'),
('0/2', 'https://wikipedia.org', 'Wikipedia'),
('0/3', 'https://duckduckgo.com', 'DuckDuckGo')
],
'1': [
('1/1', 'https://wiki.archlinux.org', 'ArchWiki'),
]
})
def test_tab_completion_delete(qtmodeltester, fake_web_tab, app_stub,
win_registry, tabbed_browser_stubs):
"""Verify closing a tab by deleting it from the completion widget."""
tabbed_browser_stubs[0].tabs = [
fake_web_tab(QUrl('https://github.com'), 'GitHub', 0),
fake_web_tab(QUrl('https://wikipedia.org'), 'Wikipedia', 1),
fake_web_tab(QUrl('https://duckduckgo.com'), 'DuckDuckGo', 2)
]
tabbed_browser_stubs[1].tabs = [
fake_web_tab(QUrl('https://wiki.archlinux.org'), 'ArchWiki', 0),
]
model = miscmodels.buffer()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
parent = model.index(0, 0)
idx = model.index(1, 0, parent)
# sanity checks
assert model.data(parent) == "0"
assert model.data(idx) == '0/2'
model.delete_cur_item(idx)
actual = [tab.url() for tab in tabbed_browser_stubs[0].tabs]
assert actual == [QUrl('https://github.com'),
QUrl('https://duckduckgo.com')]
def test_setting_section_completion(qtmodeltester, monkeypatch, stubs):
module = 'qutebrowser.completion.models.configmodel'
_patch_configdata(monkeypatch, stubs, module + '.configdata.DATA')
_patch_config_section_desc(monkeypatch, stubs,
module + '.configdata.SECTION_DESC')
model = configmodel.section()
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
_check_completions(model, {
"Sections": [
('general', 'General/miscellaneous options.', None),
('searchengines', 'Definitions of search engines ...', None),
('ui', 'General options related to the user interface.', None),
]
})
def test_setting_option_completion(qtmodeltester, monkeypatch, stubs,
config_stub):
module = 'qutebrowser.completion.models.configmodel'
_patch_configdata(monkeypatch, stubs, module + '.configdata.DATA')
config_stub.data = {'ui': {'gesture': 'off',
'mind': 'on',
'voice': 'sometimes'}}
model = configmodel.option('ui')
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
_check_completions(model, {
"ui": [
('gesture', 'Waggle your hands to control qutebrowser', 'off'),
('mind', 'Enable mind-control ui (experimental)', 'on'),
('voice', 'Whether to respond to voice commands', 'sometimes'),
]
})
def test_setting_option_completion_empty(monkeypatch, stubs, config_stub):
module = 'qutebrowser.completion.models.configmodel'
_patch_configdata(monkeypatch, stubs, module + '.configdata.DATA')
assert configmodel.option('typo') is None
def test_setting_option_completion_valuelist(qtmodeltester, monkeypatch, stubs,
config_stub):
module = 'qutebrowser.completion.models.configmodel'
_patch_configdata(monkeypatch, stubs, module + '.configdata.DATA')
config_stub.data = {
'searchengines': {
'DEFAULT': 'https://duckduckgo.com/?q={}'
}
}
model = configmodel.option('searchengines')
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
_check_completions(model, {
'searchengines': [('DEFAULT', '', 'https://duckduckgo.com/?q={}')]
})
def test_setting_value_completion(qtmodeltester, monkeypatch, stubs,
config_stub):
module = 'qutebrowser.completion.models.configmodel'
_patch_configdata(monkeypatch, stubs, module + '.configdata.DATA')
config_stub.data = {'general': {'volume': '0'}}
model = configmodel.value('general', 'volume')
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
_check_completions(model, {
"Current/Default": [
('0', 'Current value', None),
('11', 'Default value', None),
],
"Completions": [
('0', '', None),
('11', '', None),
]
})
def test_setting_value_completion_empty(monkeypatch, stubs, config_stub):
module = 'qutebrowser.completion.models.configmodel'
_patch_configdata(monkeypatch, stubs, module + '.configdata.DATA')
config_stub.data = {'general': {}}
assert configmodel.value('general', 'typo') is None
def test_bind_completion(qtmodeltester, monkeypatch, stubs, config_stub,
key_config_stub):
"""Test the results of keybinding command completion.
Validates that:
- only non-hidden and non-deprecated commands are included
- the command description is shown in the desc column
- the binding (if any) is shown in the misc column
- aliases are included
"""
_patch_cmdutils(monkeypatch, stubs,
'qutebrowser.completion.models.miscmodels.cmdutils')
config_stub.data['aliases'] = {'rock': 'roll'}
key_config_stub.set_bindings_for('normal', {'s': 'stop now',
'rr': 'roll',
'ro': 'rock'})
model = miscmodels.bind('s')
model.set_pattern('')
qtmodeltester.data_display_may_return_none = True
qtmodeltester.check(model)
_check_completions(model, {
"Current": [
('stop now', 'stop qutebrowser', 's'),
],
"Commands": [
('drop', 'drop all user data', ''),
('hide', '', ''),
('rock', "Alias for 'roll'", 'ro'),
('roll', 'never gonna give you up', 'rr'),
('stop', 'stop qutebrowser', ''),
]
})
def test_url_completion_benchmark(benchmark,
quickmark_manager_stub,
bookmark_manager_stub,
web_history):
"""Benchmark url completion."""
r = range(100000)
entries = {
'last_atime': list(r),
'url': ['http://example.com/{}'.format(i) for i in r],
'title': ['title{}'.format(i) for i in r]
}
web_history.completion.insert_batch(entries)
quickmark_manager_stub.marks = collections.OrderedDict([
('title{}'.format(i), 'example.com/{}'.format(i))
for i in range(1000)])
bookmark_manager_stub.marks = collections.OrderedDict([
('example.com/{}'.format(i), 'title{}'.format(i))
for i in range(1000)])
def bench():
model = urlmodel.url()
model.set_pattern('')
model.set_pattern('e')
model.set_pattern('ex')
model.set_pattern('ex ')
model.set_pattern('ex 1')
model.set_pattern('ex 12')
model.set_pattern('ex 123')
benchmark(bench)
| gpl-3.0 | -5,333,383,868,739,639,000 | 35.002933 | 79 | 0.590332 | false |
captainsafia/agate | agate/columns/base.py | 1 | 3431 | #!/usr/bin/env python
from collections import Mapping, Sequence
try:
from collections import OrderedDict
except ImportError: #pragma: no cover
from ordereddict import OrderedDict
import six
from agate.exceptions import ColumnDoesNotExistError
from agate.utils import memoize
class ColumnMapping(Mapping):
"""
Proxy access to :class:`Column` instances for :class:`.Table`.
:param table: :class:`.Table`.
"""
def __init__(self, table):
self._table = table
def __getitem__(self, k):
try:
i = self._table._column_names.index(k)
except ValueError:
raise ColumnDoesNotExistError(k)
return self._table._get_column(i)
def __iter__(self):
return ColumnIterator(self._table)
@memoize
def __len__(self):
return len(self._table._column_names)
class ColumnIterator(six.Iterator):
"""
Iterator over :class:`Column` instances within a :class:`.Table`.
:param table: :class:`.Table`.
"""
def __init__(self, table):
self._table = table
self._i = 0
def __next__(self):
try:
self._table._column_names[self._i]
except IndexError:
raise StopIteration
column = self._table._get_column(self._i)
self._i += 1
return column
class Column(Sequence):
"""
Proxy access to column data. Instances of :class:`Column` should
not be constructed directly. They are created by :class:`.Table`
instances.
:param table: The table that contains this column.
:param index: The index of this column in the table.
"""
def __init__(self, table, index):
self._table = table
self._index = index
def __unicode__(self):
data = self.get_data()
sample = ', '.join(six.text_type(d) for d in data[:5])
if len(data) > 5:
sample = '%s, ...' % sample
sample = '(%s)' % sample
return '<agate.columns.%s: %s>' % (self.__class__.__name__, sample)
def __str__(self):
return str(self.__unicode__())
def __getitem__(self, j):
return self.get_data()[j]
@memoize
def __len__(self):
return len(self.get_data())
def __eq__(self, other):
"""
Ensure equality test with lists works.
"""
return self.get_data() == other
def __ne__(self, other):
"""
Ensure inequality test with lists works.
"""
return not self.__eq__(other)
@memoize
def get_data(self):
"""
Get the data contained in this column as a :class:`tuple`.
"""
return tuple(r[self._index] for r in self._table._data)
@memoize
def get_data_without_nulls(self):
"""
Get the data contained in this column with any null values removed.
"""
return tuple(d for d in self.get_data() if d is not None)
@memoize
def get_data_sorted(self):
"""
Get the data contained in this column sorted.
"""
return sorted(self.get_data())
@memoize
def has_nulls(self):
"""
Returns `True` if this column contains null values.
"""
return None in self.get_data()
def aggregate(self, aggregation):
"""
Apply a :class:`.Aggregation` to this column and return the result.
"""
return aggregation.run(self)
| mit | -8,266,430,875,048,023,000 | 23.507143 | 75 | 0.568347 | false |
Pulgama/supriya | supriya/nonrealtime/Moment.py | 1 | 1816 | from supriya.nonrealtime.SessionObject import SessionObject
class Moment(SessionObject):
"""
A moment-in-time referencing a singleton non-realtime state.
::
>>> import supriya.nonrealtime
>>> session = supriya.nonrealtime.Session()
>>> moment = session.at(10.5)
"""
### CLASS VARIABLES ###
__documentation_section__ = "Session Objects"
__slots__ = ("_offset", "_propagate", "_session", "_state")
### INITIALIZER ###
def __init__(self, session, offset, state, propagate=True):
SessionObject.__init__(self, session)
self._offset = offset
self._state = state
self._propagate = bool(propagate)
### SPECIAL METHODS ###
def __enter__(self):
self.session.active_moments.append(self)
if self.propagate:
self.session._apply_transitions(self.state.offset)
return self
def __eq__(self, expr):
if not isinstance(expr, type(self)):
return False
if expr.session is not self.session:
return False
return expr.offset == self.offset
def __exit__(self, exc_type, exc_value, traceback):
self.session.active_moments.pop()
if self.propagate:
self.session._apply_transitions(self.state.offset)
def __lt__(self, expr):
if not isinstance(expr, type(self)) or expr.session is not self.session:
raise ValueError(expr)
return self.offset < expr.offset
def __repr__(self):
return "<{} @{!r}>".format(type(self).__name__, self.offset)
### PUBLIC PROPERTIES ###
@property
def offset(self):
return self._offset
@property
def propagate(self):
return self._propagate
@property
def state(self):
return self._state
| mit | -4,129,431,815,755,473,400 | 24.942857 | 80 | 0.590859 | false |
dennisss/sympy | sympy/functions/special/tests/test_error_functions.py | 2 | 24070 | from sympy import (
symbols, expand, expand_func, nan, oo, Float, conjugate, diff,
re, im, Abs, O, factorial, exp_polar, polar_lift, gruntz, limit,
Symbol, I, integrate, S,
sqrt, sin, cos, sinh, cosh, exp, log, pi, EulerGamma,
erf, erfc, erfi, erf2, erfinv, erfcinv, erf2inv,
gamma, uppergamma, loggamma,
Ei, expint, E1, li, Li, Si, Ci, Shi, Chi,
fresnels, fresnelc,
hyper, meijerg)
from sympy.functions.special.error_functions import _erfs, _eis
from sympy.core.function import ArgumentIndexError
from sympy.utilities.pytest import raises
x, y, z = symbols('x,y,z')
w = Symbol("w", real=True)
n = Symbol("n", integer=True)
def test_erf():
assert erf(nan) == nan
assert erf(oo) == 1
assert erf(-oo) == -1
assert erf(0) == 0
assert erf(I*oo) == oo*I
assert erf(-I*oo) == -oo*I
assert erf(-2) == -erf(2)
assert erf(-x*y) == -erf(x*y)
assert erf(-x - y) == -erf(x + y)
assert erf(erfinv(x)) == x
assert erf(erfcinv(x)) == 1 - x
assert erf(erf2inv(0, x)) == x
assert erf(erf2inv(0, erf(erfcinv(1 - erf(erfinv(x)))))) == x
assert erf(I).is_real is False
assert erf(0).is_real is True
assert conjugate(erf(z)) == erf(conjugate(z))
assert erf(x).as_leading_term(x) == 2*x/sqrt(pi)
assert erf(1/x).as_leading_term(x) == erf(1/x)
assert erf(z).rewrite('uppergamma') == sqrt(z**2)*erf(sqrt(z**2))/z
assert erf(z).rewrite('erfc') == S.One - erfc(z)
assert erf(z).rewrite('erfi') == -I*erfi(I*z)
assert erf(z).rewrite('fresnels') == (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erf(z).rewrite('fresnelc') == (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erf(z).rewrite('hyper') == 2*z*hyper([S.Half], [3*S.Half], -z**2)/sqrt(pi)
assert erf(z).rewrite('meijerg') == z*meijerg([S.Half], [], [0], [-S.Half], z**2)/sqrt(pi)
assert erf(z).rewrite('expint') == sqrt(z**2)/z - z*expint(S.Half, z**2)/sqrt(S.Pi)
assert limit(exp(x)*exp(x**2)*(erf(x + 1/exp(x)) - erf(x)), x, oo) == \
2/sqrt(pi)
assert limit((1 - erf(z))*exp(z**2)*z, z, oo) == 1/sqrt(pi)
assert limit((1 - erf(x))*exp(x**2)*sqrt(pi)*x, x, oo) == 1
assert limit(((1 - erf(x))*exp(x**2)*sqrt(pi)*x - 1)*2*x**2, x, oo) == -1
assert erf(x).as_real_imag() == \
((erf(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erf(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erf(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erf(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erf(x).fdiff(2))
def test_erf_series():
assert erf(x).series(x, 0, 7) == 2*x/sqrt(pi) - \
2*x**3/3/sqrt(pi) + x**5/5/sqrt(pi) + O(x**7)
def test_erf_evalf():
assert abs( erf(Float(2.0)) - 0.995322265 ) < 1E-8 # XXX
def test__erfs():
assert _erfs(z).diff(z) == -2/sqrt(S.Pi) + 2*z*_erfs(z)
assert _erfs(1/z).series(z) == \
z/sqrt(pi) - z**3/(2*sqrt(pi)) + 3*z**5/(4*sqrt(pi)) + O(z**6)
assert expand(erf(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== erf(z).diff(z)
assert _erfs(z).rewrite("intractable") == (-erf(z) + 1)*exp(z**2)
def test_erfc():
assert erfc(nan) == nan
assert erfc(oo) == 0
assert erfc(-oo) == 2
assert erfc(0) == 1
assert erfc(I*oo) == -oo*I
assert erfc(-I*oo) == oo*I
assert erfc(-x) == S(2) - erfc(x)
assert erfc(erfcinv(x)) == x
assert erfc(I).is_real is False
assert erfc(0).is_real is True
assert conjugate(erfc(z)) == erfc(conjugate(z))
assert erfc(x).as_leading_term(x) == S.One
assert erfc(1/x).as_leading_term(x) == erfc(1/x)
assert erfc(z).rewrite('erf') == 1 - erf(z)
assert erfc(z).rewrite('erfi') == 1 + I*erfi(I*z)
assert erfc(z).rewrite('fresnels') == 1 - (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erfc(z).rewrite('fresnelc') == 1 - (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erfc(z).rewrite('hyper') == 1 - 2*z*hyper([S.Half], [3*S.Half], -z**2)/sqrt(pi)
assert erfc(z).rewrite('meijerg') == 1 - z*meijerg([S.Half], [], [0], [-S.Half], z**2)/sqrt(pi)
assert erfc(z).rewrite('uppergamma') == 1 - sqrt(z**2)*erf(sqrt(z**2))/z
assert erfc(z).rewrite('expint') == S.One - sqrt(z**2)/z + z*expint(S.Half, z**2)/sqrt(S.Pi)
assert erfc(x).as_real_imag() == \
((erfc(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erfc(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erfc(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erfc(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erfc(x).fdiff(2))
def test_erfc_series():
assert erfc(x).series(x, 0, 7) == 1 - 2*x/sqrt(pi) + \
2*x**3/3/sqrt(pi) - x**5/5/sqrt(pi) + O(x**7)
def test_erfc_evalf():
assert abs( erfc(Float(2.0)) - 0.00467773 ) < 1E-8 # XXX
def test_erfi():
assert erfi(nan) == nan
assert erfi(oo) == S.Infinity
assert erfi(-oo) == S.NegativeInfinity
assert erfi(0) == S.Zero
assert erfi(I*oo) == I
assert erfi(-I*oo) == -I
assert erfi(-x) == -erfi(x)
assert erfi(I*erfinv(x)) == I*x
assert erfi(I*erfcinv(x)) == I*(1 - x)
assert erfi(I*erf2inv(0, x)) == I*x
assert erfi(I).is_real is False
assert erfi(0).is_real is True
assert conjugate(erfi(z)) == erfi(conjugate(z))
assert erfi(z).rewrite('erf') == -I*erf(I*z)
assert erfi(z).rewrite('erfc') == I*erfc(I*z) - I
assert erfi(z).rewrite('fresnels') == (1 - I)*(fresnelc(z*(1 + I)/sqrt(pi)) -
I*fresnels(z*(1 + I)/sqrt(pi)))
assert erfi(z).rewrite('fresnelc') == (1 - I)*(fresnelc(z*(1 + I)/sqrt(pi)) -
I*fresnels(z*(1 + I)/sqrt(pi)))
assert erfi(z).rewrite('hyper') == 2*z*hyper([S.Half], [3*S.Half], z**2)/sqrt(pi)
assert erfi(z).rewrite('meijerg') == z*meijerg([S.Half], [], [0], [-S.Half], -z**2)/sqrt(pi)
assert erfi(z).rewrite('uppergamma') == (sqrt(-z**2)/z*(uppergamma(S.Half,
-z**2)/sqrt(S.Pi) - S.One))
assert erfi(z).rewrite('expint') == sqrt(-z**2)/z - z*expint(S.Half, -z**2)/sqrt(S.Pi)
assert erfi(x).as_real_imag() == \
((erfi(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erfi(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erfi(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erfi(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erfi(x).fdiff(2))
def test_erfi_series():
assert erfi(x).series(x, 0, 7) == 2*x/sqrt(pi) + \
2*x**3/3/sqrt(pi) + x**5/5/sqrt(pi) + O(x**7)
def test_erfi_evalf():
assert abs( erfi(Float(2.0)) - 18.5648024145756 ) < 1E-13 # XXX
def test_erf2():
assert erf2(0, 0) == S.Zero
assert erf2(x, x) == S.Zero
assert erf2(nan, 0) == nan
assert erf2(-oo, y) == erf(y) + 1
assert erf2( oo, y) == erf(y) - 1
assert erf2( x, oo) == 1 - erf(x)
assert erf2( x,-oo) == -1 - erf(x)
assert erf2(x, erf2inv(x, y)) == y
assert erf2(-x, -y) == -erf2(x,y)
assert erf2(-x, y) == erf(y) + erf(x)
assert erf2( x, -y) == -erf(y) - erf(x)
assert erf2(x, y).rewrite('fresnels') == erf(y).rewrite(fresnels)-erf(x).rewrite(fresnels)
assert erf2(x, y).rewrite('fresnelc') == erf(y).rewrite(fresnelc)-erf(x).rewrite(fresnelc)
assert erf2(x, y).rewrite('hyper') == erf(y).rewrite(hyper)-erf(x).rewrite(hyper)
assert erf2(x, y).rewrite('meijerg') == erf(y).rewrite(meijerg)-erf(x).rewrite(meijerg)
assert erf2(x, y).rewrite('uppergamma') == erf(y).rewrite(uppergamma) - erf(x).rewrite(uppergamma)
assert erf2(x, y).rewrite('expint') == erf(y).rewrite(expint)-erf(x).rewrite(expint)
assert erf2(I, 0).is_real is False
assert erf2(0, 0).is_real is True
#assert conjugate(erf2(x, y)) == erf2(conjugate(x), conjugate(y))
assert erf2(x, y).rewrite('erf') == erf(y) - erf(x)
assert erf2(x, y).rewrite('erfc') == erfc(x) - erfc(y)
assert erf2(x, y).rewrite('erfi') == I*(erfi(I*x) - erfi(I*y))
raises(ArgumentIndexError, lambda: erfi(x).fdiff(3))
def test_erfinv():
assert erfinv(0) == 0
assert erfinv(1) == S.Infinity
assert erfinv(nan) == S.NaN
assert erfinv(erf(w)) == w
assert erfinv(erf(-w)) == -w
assert erfinv(x).diff() == sqrt(pi)*exp(erfinv(x)**2)/2
assert erfinv(z).rewrite('erfcinv') == erfcinv(1-z)
def test_erfinv_evalf():
assert abs( erfinv(Float(0.2)) - 0.179143454621292 ) < 1E-13
def test_erfcinv():
assert erfcinv(1) == 0
assert erfcinv(0) == S.Infinity
assert erfcinv(nan) == S.NaN
assert erfcinv(x).diff() == -sqrt(pi)*exp(erfcinv(x)**2)/2
assert erfcinv(z).rewrite('erfinv') == erfinv(1-z)
def test_erf2inv():
assert erf2inv(0, 0) == S.Zero
assert erf2inv(0, 1) == S.Infinity
assert erf2inv(1, 0) == S.One
assert erf2inv(0, y) == erfinv(y)
assert erf2inv(oo,y) == erfcinv(-y)
assert erf2inv(x, y).diff(x) == exp(-x**2 + erf2inv(x, y)**2)
assert erf2inv(x, y).diff(y) == sqrt(pi)*exp(erf2inv(x, y)**2)/2
# NOTE we multiply by exp_polar(I*pi) and need this to be on the principal
# branch, hence take x in the lower half plane (d=0).
def mytn(expr1, expr2, expr3, x, d=0):
from sympy.utilities.randtest import test_numerically, random_complex_number
subs = {}
for a in expr1.free_symbols:
if a != x:
subs[a] = random_complex_number()
return expr2 == expr3 and test_numerically(expr1.subs(subs),
expr2.subs(subs), x, d=d)
def mytd(expr1, expr2, x):
from sympy.utilities.randtest import test_derivative_numerically, \
random_complex_number
subs = {}
for a in expr1.free_symbols:
if a != x:
subs[a] = random_complex_number()
return expr1.diff(x) == expr2 and test_derivative_numerically(expr1.subs(subs), x)
def tn_branch(func, s=None):
from sympy import I, pi, exp_polar
from random import uniform
def fn(x):
if s is None:
return func(x)
return func(s, x)
c = uniform(1, 5)
expr = fn(c*exp_polar(I*pi)) - fn(c*exp_polar(-I*pi))
eps = 1e-15
expr2 = fn(-c + eps*I) - fn(-c - eps*I)
return abs(expr.n() - expr2.n()).n() < 1e-10
def test_ei():
pos = Symbol('p', positive=True)
neg = Symbol('n', negative=True)
assert Ei(-pos) == Ei(polar_lift(-1)*pos) - I*pi
assert Ei(neg) == Ei(polar_lift(neg)) - I*pi
assert tn_branch(Ei)
assert mytd(Ei(x), exp(x)/x, x)
assert mytn(Ei(x), Ei(x).rewrite(uppergamma),
-uppergamma(0, x*polar_lift(-1)) - I*pi, x)
assert mytn(Ei(x), Ei(x).rewrite(expint),
-expint(1, x*polar_lift(-1)) - I*pi, x)
assert Ei(x).rewrite(expint).rewrite(Ei) == Ei(x)
assert Ei(x*exp_polar(2*I*pi)) == Ei(x) + 2*I*pi
assert Ei(x*exp_polar(-2*I*pi)) == Ei(x) - 2*I*pi
assert mytn(Ei(x), Ei(x).rewrite(Shi), Chi(x) + Shi(x), x)
assert mytn(Ei(x*polar_lift(I)), Ei(x*polar_lift(I)).rewrite(Si),
Ci(x) + I*Si(x) + I*pi/2, x)
assert Ei(log(x)).rewrite(li) == li(x)
assert Ei(2*log(x)).rewrite(li) == li(x**2)
assert gruntz(Ei(x+exp(-x))*exp(-x)*x, x, oo) == 1
assert Ei(x).series(x) == EulerGamma + log(x) + x + x**2/4 + \
x**3/18 + x**4/96 + x**5/600 + O(x**6)
def test_expint():
assert mytn(expint(x, y), expint(x, y).rewrite(uppergamma),
y**(x - 1)*uppergamma(1 - x, y), x)
assert mytd(
expint(x, y), -y**(x - 1)*meijerg([], [1, 1], [0, 0, 1 - x], [], y), x)
assert mytd(expint(x, y), -expint(x - 1, y), y)
assert mytn(expint(1, x), expint(1, x).rewrite(Ei),
-Ei(x*polar_lift(-1)) + I*pi, x)
assert expint(-4, x) == exp(-x)/x + 4*exp(-x)/x**2 + 12*exp(-x)/x**3 \
+ 24*exp(-x)/x**4 + 24*exp(-x)/x**5
assert expint(-S(3)/2, x) == \
exp(-x)/x + 3*exp(-x)/(2*x**2) - 3*sqrt(pi)*erf(sqrt(x))/(4*x**S('5/2')) \
+ 3*sqrt(pi)/(4*x**S('5/2'))
assert tn_branch(expint, 1)
assert tn_branch(expint, 2)
assert tn_branch(expint, 3)
assert tn_branch(expint, 1.7)
assert tn_branch(expint, pi)
assert expint(y, x*exp_polar(2*I*pi)) == \
x**(y - 1)*(exp(2*I*pi*y) - 1)*gamma(-y + 1) + expint(y, x)
assert expint(y, x*exp_polar(-2*I*pi)) == \
x**(y - 1)*(exp(-2*I*pi*y) - 1)*gamma(-y + 1) + expint(y, x)
assert expint(2, x*exp_polar(2*I*pi)) == 2*I*pi*x + expint(2, x)
assert expint(2, x*exp_polar(-2*I*pi)) == -2*I*pi*x + expint(2, x)
assert expint(1, x).rewrite(Ei).rewrite(expint) == expint(1, x)
assert mytn(E1(x), E1(x).rewrite(Shi), Shi(x) - Chi(x), x)
assert mytn(E1(polar_lift(I)*x), E1(polar_lift(I)*x).rewrite(Si),
-Ci(x) + I*Si(x) - I*pi/2, x)
assert mytn(expint(2, x), expint(2, x).rewrite(Ei).rewrite(expint),
-x*E1(x) + exp(-x), x)
assert mytn(expint(3, x), expint(3, x).rewrite(Ei).rewrite(expint),
x**2*E1(x)/2 + (1 - x)*exp(-x)/2, x)
assert expint(S(3)/2, z).nseries(z) == \
2 + 2*z - z**2/3 + z**3/15 - z**4/84 + z**5/540 - \
2*sqrt(pi)*sqrt(z) + O(z**6)
assert E1(z).series(z) == -EulerGamma - log(z) + z - \
z**2/4 + z**3/18 - z**4/96 + z**5/600 + O(z**6)
assert expint(4, z).series(z) == S(1)/3 - z/2 + z**2/2 + \
z**3*(log(z)/6 - S(11)/36 + EulerGamma/6) - z**4/24 + \
z**5/240 + O(z**6)
def test__eis():
assert _eis(z).diff(z) == -_eis(z) + 1/z
assert _eis(1/z).series(z) == \
z + z**2 + 2*z**3 + 6*z**4 + 24*z**5 + O(z**6)
assert Ei(z).rewrite('tractable') == exp(z)*_eis(z)
assert li(z).rewrite('tractable') == z*_eis(log(z))
assert _eis(z).rewrite('intractable') == exp(-z)*Ei(z)
assert expand(li(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== li(z).diff(z)
assert expand(Ei(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== Ei(z).diff(z)
assert _eis(z).series(z, n=3) == EulerGamma + log(z) + z*(-log(z) - \
EulerGamma + 1) + z**2*(log(z)/2 - S(3)/4 + EulerGamma/2) + O(z**3*log(z))
def tn_arg(func):
def test(arg, e1, e2):
from random import uniform
v = uniform(1, 5)
v1 = func(arg*x).subs(x, v).n()
v2 = func(e1*v + e2*1e-15).n()
return abs(v1 - v2).n() < 1e-10
return test(exp_polar(I*pi/2), I, 1) and \
test(exp_polar(-I*pi/2), -I, 1) and \
test(exp_polar(I*pi), -1, I) and \
test(exp_polar(-I*pi), -1, -I)
def test_li():
z = Symbol("z")
zr = Symbol("z", real=True)
zp = Symbol("z", positive=True)
zn = Symbol("z", negative=True)
assert li(0) == 0
assert li(1) == -oo
assert li(oo) == oo
assert isinstance(li(z), li)
assert diff(li(z), z) == 1/log(z)
assert conjugate(li(z)) == li(conjugate(z))
assert conjugate(li(-zr)) == li(-zr)
assert conjugate(li(-zp)) == conjugate(li(-zp))
assert conjugate(li(zn)) == conjugate(li(zn))
assert li(z).rewrite(Li) == Li(z) + li(2)
assert li(z).rewrite(Ei) == Ei(log(z))
assert li(z).rewrite(uppergamma) == (-log(1/log(z))/2 - log(-log(z)) +
log(log(z))/2 - expint(1, -log(z)))
assert li(z).rewrite(Si) == (-log(I*log(z)) - log(1/log(z))/2 +
log(log(z))/2 + Ci(I*log(z)) + Shi(log(z)))
assert li(z).rewrite(Ci) == (-log(I*log(z)) - log(1/log(z))/2 +
log(log(z))/2 + Ci(I*log(z)) + Shi(log(z)))
assert li(z).rewrite(Shi) == (-log(1/log(z))/2 + log(log(z))/2 +
Chi(log(z)) - Shi(log(z)))
assert li(z).rewrite(Chi) == (-log(1/log(z))/2 + log(log(z))/2 +
Chi(log(z)) - Shi(log(z)))
assert li(z).rewrite(hyper) ==(log(z)*hyper((1, 1), (2, 2), log(z)) -
log(1/log(z))/2 + log(log(z))/2 + EulerGamma)
assert li(z).rewrite(meijerg) == (-log(1/log(z))/2 - log(-log(z)) + log(log(z))/2 -
meijerg(((), (1,)), ((0, 0), ()), -log(z)))
assert gruntz(1/li(z), z, oo) == 0
def test_Li():
assert Li(2) == 0
assert Li(oo) == oo
assert isinstance(Li(z), Li)
assert diff(Li(z), z) == 1/log(z)
assert gruntz(1/Li(z), z, oo) == 0
assert Li(z).rewrite(li) == li(z) - li(2)
def test_si():
assert Si(I*x) == I*Shi(x)
assert Shi(I*x) == I*Si(x)
assert Si(-I*x) == -I*Shi(x)
assert Shi(-I*x) == -I*Si(x)
assert Si(-x) == -Si(x)
assert Shi(-x) == -Shi(x)
assert Si(exp_polar(2*pi*I)*x) == Si(x)
assert Si(exp_polar(-2*pi*I)*x) == Si(x)
assert Shi(exp_polar(2*pi*I)*x) == Shi(x)
assert Shi(exp_polar(-2*pi*I)*x) == Shi(x)
assert Si(oo) == pi/2
assert Si(-oo) == -pi/2
assert Shi(oo) == oo
assert Shi(-oo) == -oo
assert mytd(Si(x), sin(x)/x, x)
assert mytd(Shi(x), sinh(x)/x, x)
assert mytn(Si(x), Si(x).rewrite(Ei),
-I*(-Ei(x*exp_polar(-I*pi/2))/2
+ Ei(x*exp_polar(I*pi/2))/2 - I*pi) + pi/2, x)
assert mytn(Si(x), Si(x).rewrite(expint),
-I*(-expint(1, x*exp_polar(-I*pi/2))/2 +
expint(1, x*exp_polar(I*pi/2))/2) + pi/2, x)
assert mytn(Shi(x), Shi(x).rewrite(Ei),
Ei(x)/2 - Ei(x*exp_polar(I*pi))/2 + I*pi/2, x)
assert mytn(Shi(x), Shi(x).rewrite(expint),
expint(1, x)/2 - expint(1, x*exp_polar(I*pi))/2 - I*pi/2, x)
assert tn_arg(Si)
assert tn_arg(Shi)
assert Si(x).nseries(x, n=8) == \
x - x**3/18 + x**5/600 - x**7/35280 + O(x**9)
assert Shi(x).nseries(x, n=8) == \
x + x**3/18 + x**5/600 + x**7/35280 + O(x**9)
assert Si(sin(x)).nseries(x, n=5) == x - 2*x**3/9 + 17*x**5/450 + O(x**6)
assert Si(x).nseries(x, 1, n=3) == \
Si(1) + (x - 1)*sin(1) + (x - 1)**2*(-sin(1)/2 + cos(1)/2) + O((x - 1)**3, (x, 1))
def test_ci():
m1 = exp_polar(I*pi)
m1_ = exp_polar(-I*pi)
pI = exp_polar(I*pi/2)
mI = exp_polar(-I*pi/2)
assert Ci(m1*x) == Ci(x) + I*pi
assert Ci(m1_*x) == Ci(x) - I*pi
assert Ci(pI*x) == Chi(x) + I*pi/2
assert Ci(mI*x) == Chi(x) - I*pi/2
assert Chi(m1*x) == Chi(x) + I*pi
assert Chi(m1_*x) == Chi(x) - I*pi
assert Chi(pI*x) == Ci(x) + I*pi/2
assert Chi(mI*x) == Ci(x) - I*pi/2
assert Ci(exp_polar(2*I*pi)*x) == Ci(x) + 2*I*pi
assert Chi(exp_polar(-2*I*pi)*x) == Chi(x) - 2*I*pi
assert Chi(exp_polar(2*I*pi)*x) == Chi(x) + 2*I*pi
assert Ci(exp_polar(-2*I*pi)*x) == Ci(x) - 2*I*pi
assert Ci(oo) == 0
assert Ci(-oo) == I*pi
assert Chi(oo) == oo
assert Chi(-oo) == oo
assert mytd(Ci(x), cos(x)/x, x)
assert mytd(Chi(x), cosh(x)/x, x)
assert mytn(Ci(x), Ci(x).rewrite(Ei),
Ei(x*exp_polar(-I*pi/2))/2 + Ei(x*exp_polar(I*pi/2))/2, x)
assert mytn(Chi(x), Chi(x).rewrite(Ei),
Ei(x)/2 + Ei(x*exp_polar(I*pi))/2 - I*pi/2, x)
assert tn_arg(Ci)
assert tn_arg(Chi)
from sympy import O, EulerGamma, log, limit
assert Ci(x).nseries(x, n=4) == \
EulerGamma + log(x) - x**2/4 + x**4/96 + O(x**5)
assert Chi(x).nseries(x, n=4) == \
EulerGamma + log(x) + x**2/4 + x**4/96 + O(x**5)
assert limit(log(x) - Ci(2*x), x, 0) == -log(2) - EulerGamma
def test_fresnel():
assert fresnels(0) == 0
assert fresnels(oo) == S.Half
assert fresnels(-oo) == -S.Half
assert fresnels(z) == fresnels(z)
assert fresnels(-z) == -fresnels(z)
assert fresnels(I*z) == -I*fresnels(z)
assert fresnels(-I*z) == I*fresnels(z)
assert conjugate(fresnels(z)) == fresnels(conjugate(z))
assert fresnels(z).diff(z) == sin(pi*z**2/2)
assert fresnels(z).rewrite(erf) == (S.One + I)/4 * (
erf((S.One + I)/2*sqrt(pi)*z) - I*erf((S.One - I)/2*sqrt(pi)*z))
assert fresnels(z).rewrite(hyper) == \
pi*z**3/6 * hyper([S(3)/4], [S(3)/2, S(7)/4], -pi**2*z**4/16)
assert fresnels(z).series(z, n=15) == \
pi*z**3/6 - pi**3*z**7/336 + pi**5*z**11/42240 + O(z**15)
assert fresnels(w).is_real is True
assert fresnels(z).as_real_imag() == \
((fresnels(re(z) - I*re(z)*Abs(im(z))/Abs(re(z)))/2 +
fresnels(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))/2,
I*(fresnels(re(z) - I*re(z)*Abs(im(z))/Abs(re(z))) -
fresnels(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))) *
re(z)*Abs(im(z))/(2*im(z)*Abs(re(z)))))
assert fresnels(2 + 3*I).as_real_imag() == (
fresnels(2 + 3*I)/2 + fresnels(2 - 3*I)/2,
I*(fresnels(2 - 3*I) - fresnels(2 + 3*I))/2
)
assert expand_func(integrate(fresnels(z), z)) == \
z*fresnels(z) + cos(pi*z**2/2)/pi
assert fresnels(z).rewrite(meijerg) == sqrt(2)*pi*z**(S(9)/4) * \
meijerg(((), (1,)), ((S(3)/4,),
(S(1)/4, 0)), -pi**2*z**4/16)/(2*(-z)**(S(3)/4)*(z**2)**(S(3)/4))
assert fresnelc(0) == 0
assert fresnelc(oo) == S.Half
assert fresnelc(-oo) == -S.Half
assert fresnelc(z) == fresnelc(z)
assert fresnelc(-z) == -fresnelc(z)
assert fresnelc(I*z) == I*fresnelc(z)
assert fresnelc(-I*z) == -I*fresnelc(z)
assert conjugate(fresnelc(z)) == fresnelc(conjugate(z))
assert fresnelc(z).diff(z) == cos(pi*z**2/2)
assert fresnelc(z).rewrite(erf) == (S.One - I)/4 * (
erf((S.One + I)/2*sqrt(pi)*z) + I*erf((S.One - I)/2*sqrt(pi)*z))
assert fresnelc(z).rewrite(hyper) == \
z * hyper([S.One/4], [S.One/2, S(5)/4], -pi**2*z**4/16)
assert fresnelc(z).series(z, n=15) == \
z - pi**2*z**5/40 + pi**4*z**9/3456 - pi**6*z**13/599040 + O(z**15)
# issue 6510
assert fresnels(z).series(z, S.Infinity) == \
(-1/(pi**2*z**3) + O(z**(-6), (z, oo)))*sin(pi*z**2/2) + \
(3/(pi**3*z**5) - 1/(pi*z) + O(z**(-6), (z, oo)))*cos(pi*z**2/2) + S.Half
assert fresnelc(z).series(z, S.Infinity) == \
(-1/(pi**2*z**3) + O(z**(-6), (z, oo)))*cos(pi*z**2/2) + \
(-3/(pi**3*z**5) + 1/(pi*z) + O(z**(-6), (z, oo)))*sin(pi*z**2/2) + S.Half
assert fresnels(1/z).series(z) == \
(-z**3/pi**2 + O(z**6))*sin(pi/(2*z**2)) + (-z/pi + 3*z**5/pi**3 + \
O(z**6))*cos(pi/(2*z**2)) + S.Half
assert fresnelc(1/z).series(z) == \
(-z**3/pi**2 + O(z**6))*cos(pi/(2*z**2)) + (z/pi - 3*z**5/pi**3 + \
O(z**6))*sin(pi/(2*z**2)) + S.Half
assert fresnelc(w).is_real is True
assert fresnelc(z).as_real_imag() == \
((fresnelc(re(z) - I*re(z)*Abs(im(z))/Abs(re(z)))/2 +
fresnelc(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))/2,
I*(fresnelc(re(z) - I*re(z)*Abs(im(z))/Abs(re(z))) -
fresnelc(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))) *
re(z)*Abs(im(z))/(2*im(z)*Abs(re(z)))))
assert fresnelc(2 + 3*I).as_real_imag() == (
fresnelc(2 - 3*I)/2 + fresnelc(2 + 3*I)/2,
I*(fresnelc(2 - 3*I) - fresnelc(2 + 3*I))/2
)
assert expand_func(integrate(fresnelc(z), z)) == \
z*fresnelc(z) - sin(pi*z**2/2)/pi
assert fresnelc(z).rewrite(meijerg) == sqrt(2)*pi*z**(S(3)/4) * \
meijerg(((), (1,)), ((S(1)/4,),
(S(3)/4, 0)), -pi**2*z**4/16)/(2*(-z)**(S(1)/4)*(z**2)**(S(1)/4))
from sympy.utilities.randtest import test_numerically
test_numerically(re(fresnels(z)), fresnels(z).as_real_imag()[0], z)
test_numerically(im(fresnels(z)), fresnels(z).as_real_imag()[1], z)
test_numerically(fresnels(z), fresnels(z).rewrite(hyper), z)
test_numerically(fresnels(z), fresnels(z).rewrite(meijerg), z)
test_numerically(re(fresnelc(z)), fresnelc(z).as_real_imag()[0], z)
test_numerically(im(fresnelc(z)), fresnelc(z).as_real_imag()[1], z)
test_numerically(fresnelc(z), fresnelc(z).rewrite(hyper), z)
test_numerically(fresnelc(z), fresnelc(z).rewrite(meijerg), z)
| bsd-3-clause | -8,657,577,822,490,563,000 | 34.712166 | 102 | 0.523889 | false |
kdebrab/pandas | pandas/core/resample.py | 1 | 52045 | from datetime import timedelta
import numpy as np
import warnings
import copy
from textwrap import dedent
import pandas as pd
from pandas.core.groupby.base import GroupByMixin
from pandas.core.groupby.ops import BinGrouper
from pandas.core.groupby.groupby import (
_GroupBy, GroupBy, groupby, _pipe_template
)
from pandas.core.groupby.grouper import Grouper
from pandas.core.groupby.generic import SeriesGroupBy, PanelGroupBy
from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod
from pandas.core.indexes.datetimes import DatetimeIndex, date_range
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset, Tick, Day, delta_to_nanoseconds
from pandas.core.indexes.period import PeriodIndex
from pandas.errors import AbstractMethodError
import pandas.core.algorithms as algos
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas._libs import lib
from pandas._libs.tslibs import Timestamp, NaT
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.util._decorators import Appender, Substitution
from pandas.core.generic import _shared_docs
_shared_docs_kwargs = dict()
class Resampler(_GroupBy):
"""
Class for resampling datetimelike data, a groupby-like operation.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.resample(...) to use Resampler.
Parameters
----------
obj : pandas object
groupby : a TimeGrouper object
axis : int, default 0
kind : str or None
'period', 'timestamp' to override default index treatement
Notes
-----
After resampling, see aggregate, apply, and transform functions.
Returns
-------
a Resampler of the appropriate type
"""
# to the groupby descriptor
_attributes = ['freq', 'axis', 'closed', 'label', 'convention',
'loffset', 'base', 'kind']
def __init__(self, obj, groupby=None, axis=0, kind=None, **kwargs):
self.groupby = groupby
self.keys = None
self.sort = True
self.axis = axis
self.kind = kind
self.squeeze = False
self.group_keys = True
self.as_index = True
self.exclusions = set()
self.binner = None
self.grouper = None
if self.groupby is not None:
self.groupby._set_grouper(self._convert_obj(obj), sort=True)
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self.groupby, k))
for k in self._attributes if
getattr(self.groupby, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self.__class__.__name__,
attrs=', '.join(attrs))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self._attributes:
return getattr(self.groupby, attr)
if attr in self.obj:
return self[attr]
return object.__getattribute__(self, attr)
@property
def obj(self):
return self.groupby.obj
@property
def ax(self):
return self.groupby.ax
@property
def _typ(self):
""" masquerade for compat as a Series or a DataFrame """
if isinstance(self._selected_obj, pd.Series):
return 'series'
return 'dataframe'
@property
def _from_selection(self):
""" is the resampling from a DataFrame column or MultiIndex level """
# upsampling and PeriodIndex resampling do not work
# with selection, this state used to catch and raise an error
return (self.groupby is not None and
(self.groupby.key is not None or
self.groupby.level is not None))
def _convert_obj(self, obj):
"""
provide any conversions for the object in order to correctly handle
Parameters
----------
obj : the object to be resampled
Returns
-------
obj : converted object
"""
obj = obj._consolidate()
return obj
def _get_binner_for_time(self):
raise AbstractMethodError(self)
def _set_binner(self):
"""
setup our binners
cache these as we are an immutable object
"""
if self.binner is None:
self.binner, self.grouper = self._get_binner()
def _get_binner(self):
"""
create the BinGrouper, assume that self.set_grouper(obj)
has already been called
"""
binner, bins, binlabels = self._get_binner_for_time()
bin_grouper = BinGrouper(bins, binlabels, indexer=self.groupby.indexer)
return binner, bin_grouper
def _assure_grouper(self):
""" make sure that we are creating our binner & grouper """
self._set_binner()
@Substitution(klass='Resampler',
versionadded='.. versionadded:: 0.23.0',
examples="""
>>> df = pd.DataFrame({'A': [1, 2, 3, 4]},
... index=pd.date_range('2012-08-02', periods=4))
>>> df
A
2012-08-02 1
2012-08-03 2
2012-08-04 3
2012-08-05 4
To get the difference between each 2-day period's maximum and minimum value in
one pass, you can do
>>> df.resample('2D').pipe(lambda x: x.max() - x.min())
A
2012-08-02 1
2012-08-04 1""")
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return super(Resampler, self).pipe(func, *args, **kwargs)
_agg_doc = dedent("""
Examples
--------
>>> s = pd.Series([1,2,3,4,5],
index=pd.date_range('20130101', periods=5,freq='s'))
2013-01-01 00:00:00 1
2013-01-01 00:00:01 2
2013-01-01 00:00:02 3
2013-01-01 00:00:03 4
2013-01-01 00:00:04 5
Freq: S, dtype: int64
>>> r = s.resample('2s')
DatetimeIndexResampler [freq=<2 * Seconds>, axis=0, closed=left,
label=left, convention=start, base=0]
>>> r.agg(np.sum)
2013-01-01 00:00:00 3
2013-01-01 00:00:02 7
2013-01-01 00:00:04 5
Freq: 2S, dtype: int64
>>> r.agg(['sum','mean','max'])
sum mean max
2013-01-01 00:00:00 3 1.5 2
2013-01-01 00:00:02 7 3.5 4
2013-01-01 00:00:04 5 5.0 5
>>> r.agg({'result' : lambda x: x.mean() / x.std(),
'total' : np.sum})
total result
2013-01-01 00:00:00 3 2.121320
2013-01-01 00:00:02 7 4.949747
2013-01-01 00:00:04 5 NaN
See also
--------
pandas.DataFrame.groupby.aggregate
pandas.DataFrame.resample.transform
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='DataFrame',
versionadded='',
axis=''))
def aggregate(self, arg, *args, **kwargs):
self._set_binner()
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
result = self._groupby_and_aggregate(arg,
*args,
**kwargs)
result = self._apply_loffset(result)
return result
agg = aggregate
apply = aggregate
def transform(self, arg, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> resampled.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
return self._selected_obj.groupby(self.groupby).transform(
arg, *args, **kwargs)
def _downsample(self, f):
raise AbstractMethodError(self)
def _upsample(self, f, limit=None, fill_value=None):
raise AbstractMethodError(self)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
self._set_binner()
grouper = self.grouper
if subset is None:
subset = self.obj
grouped = groupby(subset, by=None, grouper=grouper, axis=self.axis)
# try the key selection
try:
return grouped[key]
except KeyError:
return grouped
def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs):
""" re-evaluate the obj with a groupby aggregation """
if grouper is None:
self._set_binner()
grouper = self.grouper
obj = self._selected_obj
try:
grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis)
except TypeError:
# panel grouper
grouped = PanelGroupBy(obj, grouper=grouper, axis=self.axis)
try:
if isinstance(obj, ABCDataFrame) and compat.callable(how):
# Check if the function is reducing or not.
result = grouped._aggregate_item_by_item(how, *args, **kwargs)
else:
result = grouped.aggregate(how, *args, **kwargs)
except Exception:
# we have a non-reducing function
# try to evaluate
result = grouped.apply(how, *args, **kwargs)
result = self._apply_loffset(result)
return self._wrap_result(result)
def _apply_loffset(self, result):
"""
if loffset is set, offset the result index
This is NOT an idempotent routine, it will be applied
exactly once to the result.
Parameters
----------
result : Series or DataFrame
the result of resample
"""
needs_offset = (
isinstance(self.loffset, (DateOffset, timedelta)) and
isinstance(result.index, DatetimeIndex) and
len(result.index) > 0
)
if needs_offset:
result.index = result.index + self.loffset
self.loffset = None
return result
def _get_resampler_for_grouping(self, groupby, **kwargs):
""" return the correct class for resampling with groupby """
return self._resampler_for_grouping(self, groupby=groupby, **kwargs)
def _wrap_result(self, result):
""" potentially wrap any results """
if isinstance(result, ABCSeries) and self._selection is not None:
result.name = self._selection
if isinstance(result, ABCSeries) and result.empty:
obj = self.obj
result.index = obj.index._shallow_copy(freq=to_offset(self.freq))
result.name = getattr(obj, 'name', None)
return result
def pad(self, limit=None):
"""
Forward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
Returns
-------
an upsampled Series
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self._upsample('pad', limit=limit)
ffill = pad
def nearest(self, limit=None):
"""
Fill values with nearest neighbor starting from center
Parameters
----------
limit : integer, optional
limit of how many values to fill
.. versionadded:: 0.21.0
Returns
-------
an upsampled Series
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self._upsample('nearest', limit=limit)
def backfill(self, limit=None):
"""
Backward fill the new missing values in the resampled data.
In statistics, imputation is the process of replacing missing data with
substituted values [1]_. When resampling data, missing values may
appear (e.g., when the resampling frequency is higher than the original
frequency). The backward fill will replace NaN values that appeared in
the resampled data with the next value in the original sequence.
Missing values that existed in the original data will not be modified.
Parameters
----------
limit : integer, optional
Limit of how many values to fill.
Returns
-------
Series, DataFrame
An upsampled Series or DataFrame with backward filled NaN values.
See Also
--------
bfill : Alias of backfill.
fillna : Fill NaN values using the specified method, which can be
'backfill'.
nearest : Fill NaN values with nearest neighbor starting from center.
pad : Forward fill NaN values.
pandas.Series.fillna : Fill NaN values in the Series using the
specified method, which can be 'backfill'.
pandas.DataFrame.fillna : Fill NaN values in the DataFrame using the
specified method, which can be 'backfill'.
References
----------
.. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
Examples
--------
Resampling a Series:
>>> s = pd.Series([1, 2, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
2018-01-01 02:00:00 3
Freq: H, dtype: int64
>>> s.resample('30min').backfill()
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('15min').backfill(limit=2)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 NaN
2018-01-01 00:30:00 2.0
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
2018-01-01 01:15:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 01:45:00 3.0
2018-01-01 02:00:00 3.0
Freq: 15T, dtype: float64
Resampling a DataFrame that has missing values:
>>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
... index=pd.date_range('20180101', periods=3,
... freq='h'))
>>> df
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 01:00:00 NaN 3
2018-01-01 02:00:00 6.0 5
>>> df.resample('30min').backfill()
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 00:30:00 NaN 3
2018-01-01 01:00:00 NaN 3
2018-01-01 01:30:00 6.0 5
2018-01-01 02:00:00 6.0 5
>>> df.resample('15min').backfill(limit=2)
a b
2018-01-01 00:00:00 2.0 1.0
2018-01-01 00:15:00 NaN NaN
2018-01-01 00:30:00 NaN 3.0
2018-01-01 00:45:00 NaN 3.0
2018-01-01 01:00:00 NaN 3.0
2018-01-01 01:15:00 NaN NaN
2018-01-01 01:30:00 6.0 5.0
2018-01-01 01:45:00 6.0 5.0
2018-01-01 02:00:00 6.0 5.0
"""
return self._upsample('backfill', limit=limit)
bfill = backfill
def fillna(self, method, limit=None):
"""
Fill missing values introduced by upsampling.
In statistics, imputation is the process of replacing missing data with
substituted values [1]_. When resampling data, missing values may
appear (e.g., when the resampling frequency is higher than the original
frequency).
Missing values that existed in the original data will
not be modified.
Parameters
----------
method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'}
Method to use for filling holes in resampled data
* 'pad' or 'ffill': use previous valid observation to fill gap
(forward fill).
* 'backfill' or 'bfill': use next valid observation to fill gap.
* 'nearest': use nearest valid observation to fill gap.
limit : integer, optional
Limit of how many consecutive missing values to fill.
Returns
-------
Series or DataFrame
An upsampled Series or DataFrame with missing values filled.
See Also
--------
backfill : Backward fill NaN values in the resampled data.
pad : Forward fill NaN values in the resampled data.
nearest : Fill NaN values in the resampled data
with nearest neighbor starting from center.
interpolate : Fill NaN values using interpolation.
pandas.Series.fillna : Fill NaN values in the Series using the
specified method, which can be 'bfill' and 'ffill'.
pandas.DataFrame.fillna : Fill NaN values in the DataFrame using the
specified method, which can be 'bfill' and 'ffill'.
Examples
--------
Resampling a Series:
>>> s = pd.Series([1, 2, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
2018-01-01 02:00:00 3
Freq: H, dtype: int64
Without filling the missing values you get:
>>> s.resample("30min").asfreq()
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 2.0
2018-01-01 01:30:00 NaN
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> s.resample('30min').fillna("backfill")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('15min').fillna("backfill", limit=2)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 NaN
2018-01-01 00:30:00 2.0
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
2018-01-01 01:15:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 01:45:00 3.0
2018-01-01 02:00:00 3.0
Freq: 15T, dtype: float64
>>> s.resample('30min').fillna("pad")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 1
2018-01-01 01:00:00 2
2018-01-01 01:30:00 2
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('30min').fillna("nearest")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
Missing values present before the upsampling are not affected.
>>> sm = pd.Series([1, None, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> sm
2018-01-01 00:00:00 1.0
2018-01-01 01:00:00 NaN
2018-01-01 02:00:00 3.0
Freq: H, dtype: float64
>>> sm.resample('30min').fillna('backfill')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> sm.resample('30min').fillna('pad')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 1.0
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 NaN
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> sm.resample('30min').fillna('nearest')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
DataFrame resampling is done column-wise. All the same options are
available.
>>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
... index=pd.date_range('20180101', periods=3,
... freq='h'))
>>> df
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 01:00:00 NaN 3
2018-01-01 02:00:00 6.0 5
>>> df.resample('30min').fillna("bfill")
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 00:30:00 NaN 3
2018-01-01 01:00:00 NaN 3
2018-01-01 01:30:00 6.0 5
2018-01-01 02:00:00 6.0 5
References
----------
.. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
"""
return self._upsample(method, limit=limit)
@Appender(_shared_docs['interpolate'] % _shared_docs_kwargs)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', limit_area=None,
downcast=None, **kwargs):
"""
Interpolate values according to different methods.
.. versionadded:: 0.18.1
"""
result = self._upsample(None)
return result.interpolate(method=method, axis=axis, limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast, **kwargs)
def asfreq(self, fill_value=None):
"""
return the values at the new freq,
essentially a reindex
Parameters
----------
fill_value: scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
.. versionadded:: 0.20.0
See Also
--------
Series.asfreq
DataFrame.asfreq
"""
return self._upsample('asfreq', fill_value=fill_value)
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_resampler_func('std', args, kwargs)
return self._downsample('std', ddof=ddof)
def var(self, ddof=1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_resampler_func('var', args, kwargs)
return self._downsample('var', ddof=ddof)
@Appender(GroupBy.size.__doc__)
def size(self):
# It's a special case as higher level does return
# a copy of 0-len objects. GH14962
result = self._downsample('size')
if not len(self.ax) and isinstance(self._selected_obj, ABCDataFrame):
result = pd.Series([], index=result.index, dtype='int64')
return result
# downsample methods
for method in ['sum', 'prod']:
def f(self, _method=method, min_count=0, *args, **kwargs):
nv.validate_resampler_func(_method, args, kwargs)
return self._downsample(_method, min_count=min_count)
f.__doc__ = getattr(GroupBy, method).__doc__
setattr(Resampler, method, f)
# downsample methods
for method in ['min', 'max', 'first', 'last', 'mean', 'sem',
'median', 'ohlc']:
def f(self, _method=method, *args, **kwargs):
nv.validate_resampler_func(_method, args, kwargs)
return self._downsample(_method)
f.__doc__ = getattr(GroupBy, method).__doc__
setattr(Resampler, method, f)
# groupby & aggregate methods
for method in ['count']:
def f(self, _method=method):
return self._downsample(_method)
f.__doc__ = getattr(GroupBy, method).__doc__
setattr(Resampler, method, f)
# series only methods
for method in ['nunique']:
def f(self, _method=method):
return self._downsample(_method)
f.__doc__ = getattr(SeriesGroupBy, method).__doc__
setattr(Resampler, method, f)
def _maybe_process_deprecations(r, how=None, fill_method=None, limit=None):
""" potentially we might have a deprecation warning, show it
but call the appropriate methods anyhow """
if how is not None:
# .resample(..., how='sum')
if isinstance(how, compat.string_types):
method = "{0}()".format(how)
# .resample(..., how=lambda x: ....)
else:
method = ".apply(<func>)"
# if we have both a how and fill_method, then show
# the following warning
if fill_method is None:
warnings.warn("how in .resample() is deprecated\n"
"the new syntax is "
".resample(...).{method}".format(
method=method),
FutureWarning, stacklevel=3)
r = r.aggregate(how)
if fill_method is not None:
# show the prior function call
method = '.' + method if how is not None else ''
args = "limit={0}".format(limit) if limit is not None else ""
warnings.warn("fill_method is deprecated to .resample()\n"
"the new syntax is .resample(...){method}"
".{fill_method}({args})".format(
method=method,
fill_method=fill_method,
args=args),
FutureWarning, stacklevel=3)
if how is not None:
r = getattr(r, fill_method)(limit=limit)
else:
r = r.aggregate(fill_method, limit=limit)
return r
class _GroupByMixin(GroupByMixin):
""" provide the groupby facilities """
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop('parent', None)
groupby = kwargs.pop('groupby', None)
if parent is None:
parent = obj
# initialize our GroupByMixin object with
# the resampler attributes
for attr in self._attributes:
setattr(self, attr, kwargs.get(attr, getattr(parent, attr)))
super(_GroupByMixin, self).__init__(None)
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
self.groupby = copy.copy(parent.groupby)
def _apply(self, f, **kwargs):
"""
dispatch to _upsample; we are stripping all of the _upsample kwargs and
performing the original function call on the grouped object
"""
def func(x):
x = self._shallow_copy(x, groupby=self.groupby)
if isinstance(f, compat.string_types):
return getattr(x, f)(**kwargs)
return x.apply(f, **kwargs)
result = self._groupby.apply(func)
return self._wrap_result(result)
_upsample = _apply
_downsample = _apply
_groupby_and_aggregate = _apply
class DatetimeIndexResampler(Resampler):
@property
def _resampler_for_grouping(self):
return DatetimeIndexResamplerGroupby
def _get_binner_for_time(self):
# this is how we are actually creating the bins
if self.kind == 'period':
return self.groupby._get_time_period_bins(self.ax)
return self.groupby._get_time_bins(self.ax)
def _downsample(self, how, **kwargs):
"""
Downsample the cython defined function
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
"""
self._set_binner()
how = self._is_cython_func(how) or how
ax = self.ax
obj = self._selected_obj
if not len(ax):
# reset to the new freq
obj = obj.copy()
obj.index.freq = self.freq
return obj
# do we have a regular frequency
if ax.freq is not None or ax.inferred_freq is not None:
if len(self.grouper.binlabels) > len(ax) and how is None:
# let's do an asfreq
return self.asfreq()
# we are downsampling
# we want to call the actual grouper method here
result = obj.groupby(
self.grouper, axis=self.axis).aggregate(how, **kwargs)
result = self._apply_loffset(result)
return self._wrap_result(result)
def _adjust_binner_for_upsample(self, binner):
""" adjust our binner when upsampling """
if self.closed == 'right':
binner = binner[1:]
else:
binner = binner[:-1]
return binner
def _upsample(self, method, limit=None, fill_value=None):
"""
method : string {'backfill', 'bfill', 'pad',
'ffill', 'asfreq'} method for upsampling
limit : int, default None
Maximum size gap to fill when reindexing
fill_value : scalar, default None
Value to use for missing values
See also
--------
.fillna
"""
self._set_binner()
if self.axis:
raise AssertionError('axis must be 0')
if self._from_selection:
raise ValueError("Upsampling from level= or on= selection"
" is not supported, use .set_index(...)"
" to explicitly set index to"
" datetime-like")
ax = self.ax
obj = self._selected_obj
binner = self.binner
res_index = self._adjust_binner_for_upsample(binner)
# if we have the same frequency as our axis, then we are equal sampling
if limit is None and to_offset(ax.inferred_freq) == self.freq:
result = obj.copy()
result.index = res_index
else:
result = obj.reindex(res_index, method=method,
limit=limit, fill_value=fill_value)
result = self._apply_loffset(result)
return self._wrap_result(result)
def _wrap_result(self, result):
result = super(DatetimeIndexResampler, self)._wrap_result(result)
# we may have a different kind that we were asked originally
# convert if needed
if self.kind == 'period' and not isinstance(result.index, PeriodIndex):
result.index = result.index.to_period(self.freq)
return result
class DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler):
"""
Provides a resample of a groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return DatetimeIndexResampler
class PeriodIndexResampler(DatetimeIndexResampler):
@property
def _resampler_for_grouping(self):
return PeriodIndexResamplerGroupby
def _get_binner_for_time(self):
if self.kind == 'timestamp':
return super(PeriodIndexResampler, self)._get_binner_for_time()
return self.groupby._get_period_bins(self.ax)
def _convert_obj(self, obj):
obj = super(PeriodIndexResampler, self)._convert_obj(obj)
if self._from_selection:
# see GH 14008, GH 12871
msg = ("Resampling from level= or on= selection"
" with a PeriodIndex is not currently supported,"
" use .set_index(...) to explicitly set index")
raise NotImplementedError(msg)
if self.loffset is not None:
# Cannot apply loffset/timedelta to PeriodIndex -> convert to
# timestamps
self.kind = 'timestamp'
# convert to timestamp
if self.kind == 'timestamp':
obj = obj.to_timestamp(how=self.convention)
return obj
def _downsample(self, how, **kwargs):
"""
Downsample the cython defined function
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
"""
# we may need to actually resample as if we are timestamps
if self.kind == 'timestamp':
return super(PeriodIndexResampler, self)._downsample(how, **kwargs)
how = self._is_cython_func(how) or how
ax = self.ax
if is_subperiod(ax.freq, self.freq):
# Downsampling
return self._groupby_and_aggregate(how, grouper=self.grouper)
elif is_superperiod(ax.freq, self.freq):
if how == 'ohlc':
# GH #13083
# upsampling to subperiods is handled as an asfreq, which works
# for pure aggregating/reducing methods
# OHLC reduces along the time dimension, but creates multiple
# values for each period -> handle by _groupby_and_aggregate()
return self._groupby_and_aggregate(how, grouper=self.grouper)
return self.asfreq()
elif ax.freq == self.freq:
return self.asfreq()
raise IncompatibleFrequency(
'Frequency {} cannot be resampled to {}, as they are not '
'sub or super periods'.format(ax.freq, self.freq))
def _upsample(self, method, limit=None, fill_value=None):
"""
method : string {'backfill', 'bfill', 'pad', 'ffill'}
method for upsampling
limit : int, default None
Maximum size gap to fill when reindexing
fill_value : scalar, default None
Value to use for missing values
See also
--------
.fillna
"""
# we may need to actually resample as if we are timestamps
if self.kind == 'timestamp':
return super(PeriodIndexResampler, self)._upsample(
method, limit=limit, fill_value=fill_value)
self._set_binner()
ax = self.ax
obj = self.obj
new_index = self.binner
# Start vs. end of period
memb = ax.asfreq(self.freq, how=self.convention)
# Get the fill indexer
indexer = memb.get_indexer(new_index, method=method, limit=limit)
return self._wrap_result(_take_new_index(
obj, indexer, new_index, axis=self.axis))
class PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler):
"""
Provides a resample of a groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return PeriodIndexResampler
class TimedeltaIndexResampler(DatetimeIndexResampler):
@property
def _resampler_for_grouping(self):
return TimedeltaIndexResamplerGroupby
def _get_binner_for_time(self):
return self.groupby._get_time_delta_bins(self.ax)
def _adjust_binner_for_upsample(self, binner):
""" adjust our binner when upsampling """
ax = self.ax
if is_subperiod(ax.freq, self.freq):
# We are actually downsampling
# but are in the asfreq path
# GH 12926
if self.closed == 'right':
binner = binner[1:]
else:
binner = binner[:-1]
return binner
class TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler):
"""
Provides a resample of a groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return TimedeltaIndexResampler
def resample(obj, kind=None, **kwds):
""" create a TimeGrouper and return our resampler """
tg = TimeGrouper(**kwds)
return tg._get_resampler(obj, kind=kind)
resample.__doc__ = Resampler.__doc__
def get_resampler_for_grouping(groupby, rule, how=None, fill_method=None,
limit=None, kind=None, **kwargs):
""" return our appropriate resampler when grouping as well """
# .resample uses 'on' similar to how .groupby uses 'key'
kwargs['key'] = kwargs.pop('on', None)
tg = TimeGrouper(freq=rule, **kwargs)
resampler = tg._get_resampler(groupby.obj, kind=kind)
r = resampler._get_resampler_for_grouping(groupby=groupby)
return _maybe_process_deprecations(r,
how=how,
fill_method=fill_method,
limit=limit)
class TimeGrouper(Grouper):
"""
Custom groupby class for time-interval grouping
Parameters
----------
freq : pandas date offset or offset alias for identifying bin edges
closed : closed end of interval; 'left' or 'right'
label : interval boundary to use for labeling; 'left' or 'right'
convention : {'start', 'end', 'e', 's'}
If axis is PeriodIndex
"""
_attributes = Grouper._attributes + ('closed', 'label', 'how',
'loffset', 'kind', 'convention',
'base')
def __init__(self, freq='Min', closed=None, label=None, how='mean',
axis=0, fill_method=None, limit=None, loffset=None,
kind=None, convention=None, base=0, **kwargs):
# Check for correctness of the keyword arguments which would
# otherwise silently use the default if misspelled
if label not in {None, 'left', 'right'}:
raise ValueError('Unsupported value {} for `label`'.format(label))
if closed not in {None, 'left', 'right'}:
raise ValueError('Unsupported value {} for `closed`'.format(
closed))
if convention not in {None, 'start', 'end', 'e', 's'}:
raise ValueError('Unsupported value {} for `convention`'
.format(convention))
freq = to_offset(freq)
end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'])
rule = freq.rule_code
if (rule in end_types or
('-' in rule and rule[:rule.find('-')] in end_types)):
if closed is None:
closed = 'right'
if label is None:
label = 'right'
else:
if closed is None:
closed = 'left'
if label is None:
label = 'left'
self.closed = closed
self.label = label
self.kind = kind
self.convention = convention or 'E'
self.convention = self.convention.lower()
if isinstance(loffset, compat.string_types):
loffset = to_offset(loffset)
self.loffset = loffset
self.how = how
self.fill_method = fill_method
self.limit = limit
self.base = base
# always sort time groupers
kwargs['sort'] = True
super(TimeGrouper, self).__init__(freq=freq, axis=axis, **kwargs)
def _get_resampler(self, obj, kind=None):
"""
return my resampler or raise if we have an invalid axis
Parameters
----------
obj : input object
kind : string, optional
'period','timestamp','timedelta' are valid
Returns
-------
a Resampler
Raises
------
TypeError if incompatible axis
"""
self._set_grouper(obj)
ax = self.ax
if isinstance(ax, DatetimeIndex):
return DatetimeIndexResampler(obj,
groupby=self,
kind=kind,
axis=self.axis)
elif isinstance(ax, PeriodIndex) or kind == 'period':
return PeriodIndexResampler(obj,
groupby=self,
kind=kind,
axis=self.axis)
elif isinstance(ax, TimedeltaIndex):
return TimedeltaIndexResampler(obj,
groupby=self,
axis=self.axis)
raise TypeError("Only valid with DatetimeIndex, "
"TimedeltaIndex or PeriodIndex, "
"but got an instance of %r" % type(ax).__name__)
def _get_grouper(self, obj, validate=True):
# create the resampler and return our binner
r = self._get_resampler(obj)
r._set_binner()
return r.binner, r.grouper, r.obj
def _get_time_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError('axis must be a DatetimeIndex, but got '
'an instance of %r' % type(ax).__name__)
if len(ax) == 0:
binner = labels = DatetimeIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
first, last = ax.min(), ax.max()
first, last = _get_range_edges(first, last, self.freq,
closed=self.closed,
base=self.base)
tz = ax.tz
# GH #12037
# use first/last directly instead of call replace() on them
# because replace() will swallow the nanosecond part
# thus last bin maybe slightly before the end if the end contains
# nanosecond part and lead to `Values falls after last bin` error
binner = labels = DatetimeIndex(freq=self.freq,
start=first,
end=last,
tz=tz,
name=ax.name)
# GH 15549
# In edge case of tz-aware resapmling binner last index can be
# less than the last variable in data object, this happens because of
# DST time change
if len(binner) > 1 and binner[-1] < last:
extra_date_range = pd.date_range(binner[-1], last + self.freq,
freq=self.freq, tz=tz,
name=ax.name)
binner = labels = binner.append(extra_date_range[1:])
# a little hack
trimmed = False
if (len(binner) > 2 and binner[-2] == last and
self.closed == 'right'):
binner = binner[:-1]
trimmed = True
ax_values = ax.asi8
binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
# general version, knowing nothing about relative frequencies
bins = lib.generate_bins_dt64(
ax_values, bin_edges, self.closed, hasnans=ax.hasnans)
if self.closed == 'right':
labels = binner
if self.label == 'right':
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
else:
if self.label == 'right':
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
if ax.hasnans:
binner = binner.insert(0, NaT)
labels = labels.insert(0, NaT)
# if we end up with more labels than bins
# adjust the labels
# GH4076
if len(bins) < len(labels):
labels = labels[:len(bins)]
return binner, bins, labels
def _adjust_bin_edges(self, binner, ax_values):
# Some hacks for > daily data, see #1471, #1458, #1483
bin_edges = binner.asi8
if self.freq != 'D' and is_superperiod(self.freq, 'D'):
day_nanos = delta_to_nanoseconds(timedelta(1))
if self.closed == 'right':
bin_edges = bin_edges + day_nanos - 1
# intraday values on last day
if bin_edges[-2] > ax_values.max():
bin_edges = bin_edges[:-1]
binner = binner[:-1]
return binner, bin_edges
def _get_time_delta_bins(self, ax):
if not isinstance(ax, TimedeltaIndex):
raise TypeError('axis must be a TimedeltaIndex, but got '
'an instance of %r' % type(ax).__name__)
if not len(ax):
binner = labels = TimedeltaIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
start = ax[0]
end = ax[-1]
labels = binner = TimedeltaIndex(start=start,
end=end,
freq=self.freq,
name=ax.name)
end_stamps = labels + 1
bins = ax.searchsorted(end_stamps, side='left')
# Addresses GH #10530
if self.base > 0:
labels += type(self.freq)(self.base)
return binner, bins, labels
def _get_time_period_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError('axis must be a DatetimeIndex, but got '
'an instance of %r' % type(ax).__name__)
if not len(ax):
binner = labels = PeriodIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
labels = binner = PeriodIndex(start=ax[0],
end=ax[-1],
freq=self.freq,
name=ax.name)
end_stamps = (labels + 1).asfreq(self.freq, 's').to_timestamp()
if ax.tzinfo:
end_stamps = end_stamps.tz_localize(ax.tzinfo)
bins = ax.searchsorted(end_stamps, side='left')
return binner, bins, labels
def _get_period_bins(self, ax):
if not isinstance(ax, PeriodIndex):
raise TypeError('axis must be a PeriodIndex, but got '
'an instance of %r' % type(ax).__name__)
memb = ax.asfreq(self.freq, how=self.convention)
# NaT handling as in pandas._lib.lib.generate_bins_dt64()
nat_count = 0
if memb.hasnans:
nat_count = np.sum(memb._isnan)
memb = memb[~memb._isnan]
# if index contains no valid (non-NaT) values, return empty index
if not len(memb):
binner = labels = PeriodIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
start = ax.min().asfreq(self.freq, how=self.convention)
end = ax.max().asfreq(self.freq, how='end')
labels = binner = PeriodIndex(start=start, end=end,
freq=self.freq, name=ax.name)
i8 = memb.asi8
freq_mult = self.freq.n
# when upsampling to subperiods, we need to generate enough bins
expected_bins_count = len(binner) * freq_mult
i8_extend = expected_bins_count - (i8[-1] - i8[0])
rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult)
rng += freq_mult
bins = memb.searchsorted(rng, side='left')
if nat_count > 0:
# NaT handling as in pandas._lib.lib.generate_bins_dt64()
# shift bins by the number of NaT
bins += nat_count
bins = np.insert(bins, 0, nat_count)
binner = binner.insert(0, NaT)
labels = labels.insert(0, NaT)
return binner, bins, labels
def _take_new_index(obj, indexer, new_index, axis=0):
from pandas.core.api import Series, DataFrame
if isinstance(obj, Series):
new_values = algos.take_1d(obj.values, indexer)
return Series(new_values, index=new_index, name=obj.name)
elif isinstance(obj, DataFrame):
if axis == 1:
raise NotImplementedError("axis 1 is not supported")
return DataFrame(obj._data.reindex_indexer(
new_axis=new_index, indexer=indexer, axis=1))
else:
raise ValueError("'obj' should be either a Series or a DataFrame")
def _get_range_edges(first, last, offset, closed='left', base=0):
if isinstance(offset, compat.string_types):
offset = to_offset(offset)
if isinstance(offset, Tick):
is_day = isinstance(offset, Day)
day_nanos = delta_to_nanoseconds(timedelta(1))
# #1165
if (is_day and day_nanos % offset.nanos == 0) or not is_day:
return _adjust_dates_anchored(first, last, offset,
closed=closed, base=base)
if not isinstance(offset, Tick): # and first.time() != last.time():
# hack!
first = first.normalize()
last = last.normalize()
if closed == 'left':
first = Timestamp(offset.rollback(first))
else:
first = Timestamp(first - offset)
last = Timestamp(last + offset)
return first, last
def _adjust_dates_anchored(first, last, offset, closed='right', base=0):
# First and last offsets should be calculated from the start day to fix an
# error cause by resampling across multiple days when a one day period is
# not a multiple of the frequency.
#
# See https://github.com/pandas-dev/pandas/issues/8683
# 14682 - Since we need to drop the TZ information to perform
# the adjustment in the presence of a DST change,
# save TZ Info and the DST state of the first and last parameters
# so that we can accurately rebuild them at the end.
first_tzinfo = first.tzinfo
last_tzinfo = last.tzinfo
first_dst = bool(first.dst())
last_dst = bool(last.dst())
first = first.tz_localize(None)
last = last.tz_localize(None)
start_day_nanos = first.normalize().value
base_nanos = (base % offset.n) * offset.nanos // offset.n
start_day_nanos += base_nanos
foffset = (first.value - start_day_nanos) % offset.nanos
loffset = (last.value - start_day_nanos) % offset.nanos
if closed == 'right':
if foffset > 0:
# roll back
fresult = first.value - foffset
else:
fresult = first.value - offset.nanos
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
# already the end of the road
lresult = last.value
else: # closed == 'left'
if foffset > 0:
fresult = first.value - foffset
else:
# start of the road
fresult = first.value
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
lresult = last.value + offset.nanos
return (Timestamp(fresult).tz_localize(first_tzinfo, ambiguous=first_dst),
Timestamp(lresult).tz_localize(last_tzinfo, ambiguous=last_dst))
def asfreq(obj, freq, method=None, how=None, normalize=False, fill_value=None):
"""
Utility frequency conversion method for Series/DataFrame
"""
if isinstance(obj.index, PeriodIndex):
if method is not None:
raise NotImplementedError("'method' argument is not supported")
if how is None:
how = 'E'
new_obj = obj.copy()
new_obj.index = obj.index.asfreq(freq, how=how)
elif len(obj.index) == 0:
new_obj = obj.copy()
new_obj.index = obj.index._shallow_copy(freq=to_offset(freq))
else:
dti = date_range(obj.index[0], obj.index[-1], freq=freq)
dti.name = obj.index.name
new_obj = obj.reindex(dti, method=method, fill_value=fill_value)
if normalize:
new_obj.index = new_obj.index.normalize()
return new_obj
| bsd-3-clause | -5,954,581,995,773,467,000 | 31.630094 | 79 | 0.550562 | false |
grze/parentheses | clc/eucadmin/eucadmin/describeinstances.py | 1 | 2047 | # Software License Agreement (BSD License)
#
# Copyright (c) 2013 Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from boto.roboto.param import Param
import eucadmin
class DescribeInstances(eucadmin.EucadminRequest):
ServiceClass = eucadmin.EucAdmin
ServicePath = '/services/Eucalyptus'
Args = [Param(name='InstanceId', long_name='InstanceId', ptype='array',
optional=True)]
def __init__(self, **args):
eucadmin.EucadminRequest.__init__(self, **args)
self.list_markers = ['reservationSet', 'instancesSet', 'tagSet']
self.item_markers = ['item']
self.get_connection().APIVersion = '2012-07-20' # cheap hack
def main(self, **args):
return self.send(**args)
| gpl-3.0 | -8,749,896,958,695,378,000 | 40.77551 | 77 | 0.737665 | false |
ryansb/disq | tests/test_job_commands.py | 1 | 2851 | # Copyright 2015 Ryan Brown <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pytest
import time
import disq
def test_round_trip(dq):
qname = 'rttq'
assert dq.getjob('empty', timeout_ms=1) is None
id = dq.addjob(qname, 'foobar')
assert id
job = dq.getjob(qname, timeout_ms=1)
assert len(job) == 3
assert job[0] == qname
assert job[1] == id
assert job[2] == b'foobar'
id = dq.addjob(qname, 'foobar')
jobs = dq.getjobs(qname, timeout_ms=1)
job = jobs[0]
assert job[0] == qname
assert job[1] == id
assert job[2] == b'foobar'
def test_del_job(dq):
qname = 'delq'
assert dq.getjob(qname, timeout_ms=1) is None
id = dq.addjob(qname, 'foobar')
assert dq.qlen(qname) == 1
assert dq.deljob(id) == 1
assert dq.qlen(qname) == 0
def test_expiring_job(dq):
qname = 'expq'
assert dq.getjob(qname, timeout_ms=1) is None
dq.addjob(qname, 'foobar', ttl_secs=1)
assert dq.qlen(qname) == 1
time.sleep(1.5)
assert dq.qlen(qname) == 0
def test_delay_job(dq):
qname = 'delayq'
assert dq.getjob(qname, timeout_ms=1) is None
dq.addjob(qname, 'foobar', delay_secs=1)
assert dq.qlen(qname) == 0
time.sleep(0.5)
assert dq.qlen(qname) == 0
time.sleep(1)
assert dq.qlen(qname) == 1
def test_async_job(dq):
qname = 'delayq'
assert dq.getjob(qname, timeout_ms=1) is None
dq.addjob(qname, 'foobar', async=True)
assert dq.getjob(qname)
def test_unreplicated_job(dq, dq2):
qname = 'unreplq'
assert dq.getjob(qname, timeout_ms=1) is None
assert dq2.getjob(qname, timeout_ms=1) is None
id = dq.addjob(qname, 'foobar', replicate=1)
print(id,)
assert dq2.getjob(qname, timeout_ms=1) is None
assert dq.getjob(qname, timeout_ms=1)
def test_overcrowded_job(dq, dq2):
qname = 'crowdedq'
assert dq.getjob(qname, timeout_ms=1) is None
for i in range(11):
dq.addjob(qname, 'foobar {0}'.format(i), maxlen=10)
with pytest.raises(disq.ResponseError):
dq.addjob(qname, 'foobar', maxlen=10)
def test_json_job():
qname = 'jsonq'
job = {'hello': 'world'}
q = disq.Disque()
q.set_response_callback('GETJOB', disq.parsers.read_json_job)
q.addjob(qname, json.dumps(job))
j = q.getjob(qname)
assert j[2] == job
| apache-2.0 | 4,155,654,131,376,370,000 | 26.152381 | 74 | 0.649947 | false |
cmars/pystdf | pystdf/TestSummarizer.py | 1 | 7154 | #
# PySTDF - The Pythonic STDF Parser
# Copyright (C) 2006 Casey Marshall
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from pystdf.Pipeline import EventSource
from pystdf.V4 import ptr, mpr, ftr, tsr
def filterNull(value):
if value == 4294967295:
return None
return value
class TestSummarizer(EventSource):
FLAG_SYNTH = 0x80
FLAG_OVERALL = 0x01
PTR_TEST_TXT = 0x00
MPR_TEST_TXT = 0x01
FTR_TEST_TXT = 0x02
TSR_TEST_NAM = 0x03
TSR_SEQ_NAME = 0x04
TSR_TEST_LBL = 0x05
def __init__(self):
EventSource.__init__(self, ['testSummaryReady'])
def testSummaryReady(self, dataSource): pass
def getOverallTsrs(self):
return self.overallTsrs.values()
def getSiteTsrs(self):
return self.summaryTsrs.values()
def getSiteSynthTsrs(self):
for siteTest, execCnt in self.testExecs.iteritems():
site, test = siteTest
tsrRow = [0, site, ' ', test,
execCnt[0],
self.testFails.get(siteTest, [0])[0],
self.testInvalid.get(siteTest, [0])[0],
None, None, None]
yield tsrRow
def before_begin(self, dataSource):
self.testExecs = dict()
self.testFails = dict()
self.testInvalid = dict()
self.summaryTsrs = dict()
self.overallTsrs = dict()
# Map of all test numbers to test names
self.testAliasMap = dict()
self.unitsMap = dict()
self.limitsMap = dict()
# Functional summary information
self.cyclCntMap = dict()
self.relVadrMap = dict()
self.failPinMap = dict()
def before_complete(self, dataSource):
testKeys = set(self.testFails.keys())
summaryTsrKeys = set(self.summaryTsrs.keys())
# Determine which summary bin records need to be synthed
# from part records.
self.synthSummaryTsrKeys = testKeys - summaryTsrKeys
# Determine which overall bin records need to be synthed
# for siteTest, row in self.summaryTsrs.iteritems():
# if not self.overallTsrs.has_key(siteTest[1]):
# overallCount = self.synthOverallTsrs.setdefault(siteTest[1], [0])
# overallCount[0] += row[tsr.FAIL_CNT]
# for siteTest, partCount in self.testFails.iteritems():
# if not self.overallTsrs.has_key(siteTest[1]):
# overallCount = self.synthOverallTsrs.setdefault(siteTest[1], [0])
# overallCount[0] += partCount[0]
self.testSummaryReady(dataSource)
def before_send(self, dataSource, data):
table, row = data
if table.name == ptr.name:
self.onPtr(row)
elif table.name == mpr.name:
self.onMpr(row)
elif table.name == ftr.name:
self.onFtr(row)
elif table.name == tsr.name:
self.onTsr(row)
def onPtr(self, row):
execCount = self.testExecs.setdefault(
(row[ptr.SITE_NUM], row[ptr.TEST_NUM]), [0])
execCount[0] += 1
if row[ptr.TEST_FLG] & 0x80 > 0:
failCount = self.testFails.setdefault(
(row[ptr.SITE_NUM], row[ptr.TEST_NUM]), [0])
failCount[0] += 1
if row[ptr.TEST_FLG] & 0x41 > 0:
invalidCount = self.testInvalid.setdefault(
(row[ptr.SITE_NUM], row[ptr.TEST_NUM]), [0])
invalidCount[0] += 1
aliases = self.testAliasMap.setdefault(row[ptr.TEST_NUM], set())
aliases.add((row[ptr.TEST_TXT], self.PTR_TEST_TXT))
if ptr.UNITS < len(row) and row[ptr.UNITS]:
units = self.unitsMap.setdefault(row[ptr.TEST_NUM], [None])
units[0] = row[ptr.UNITS]
if row[ptr.OPT_FLAG] is not None and row[ptr.OPT_FLAG] & 0x40 == 0:
loLimit = row[ptr.LO_LIMIT]
else:
loLimit = None
if row[ptr.OPT_FLAG] is not None and row[ptr.OPT_FLAG] & 0x80 == 0:
hiLimit = row[ptr.HI_LIMIT]
else:
hiLimit = None
if loLimit is not None or hiLimit is not None:
limits = self.limitsMap.setdefault(row[ptr.TEST_NUM], set())
limits.add((loLimit, hiLimit))
def onMpr(self, row):
if row[mpr.TEST_FLG] & 0x80 > 0:
failCount = self.testFails.setdefault(
(row[mpr.SITE_NUM], row[mpr.TEST_NUM]), [0])
failCount[0] += 1
if row[ptr.TEST_FLG] & 0x41 > 0:
invalidCount = self.testInvalid.setdefault(
(row[ptr.SITE_NUM], row[ptr.TEST_NUM]), [0])
invalidCount[0] += 1
aliases = self.testAliasMap.setdefault(row[mpr.TEST_NUM], set())
aliases.add((row[mpr.TEST_TXT], self.MPR_TEST_TXT))
if mpr.UNITS < len(row) and row[mpr.UNITS]:
units = self.unitsMap.setdefault(row[mpr.TEST_NUM], [None])
units[0] = row[mpr.UNITS]
if row[mpr.OPT_FLAG] is not None and row[mpr.OPT_FLAG] & 0x40 == 0:
loLimit = row[mpr.LO_LIMIT]
else:
loLimit = None
if row[mpr.OPT_FLAG] is not None and row[mpr.OPT_FLAG] & 0x80 == 0:
hiLimit = row[mpr.HI_LIMIT]
else:
hiLimit = None
if loLimit is not None or hiLimit is not None:
limits = self.limitsMap.setdefault(row[mpr.TEST_NUM], set())
limits.add((loLimit, hiLimit))
def onFtr(self, row):
if row[ftr.TEST_FLG] & 0x80 > 0:
countList = self.testFails.setdefault(
(row[ftr.SITE_NUM], row[ftr.TEST_NUM]), [0])
countList[0] += 1
if row[ftr.OPT_FLAG] is not None:
if row[ftr.OPT_FLAG] & 0x01 > 0:
countList = self.cyclCntMap.setdefault((row[ftr.TEST_NUM], row[ftr.CYCL_CNT]), [0])
countList[0] += 1
if row[ftr.OPT_FLAG] & 0x02 > 0:
countList = self.relVadrMap.setdefault((row[ftr.TEST_NUM], row[ftr.REL_VADR]), [0])
countList[0] += 1
if ftr.RTN_STAT < len(row) and ftr.RTN_INDX < len(row) \
and row[ftr.RTN_STAT] and row[ftr.RTN_INDX]:
for i, rtnStat in enumerate(row[ftr.RTN_STAT]):
if rtnStat > 4 and i < len(row[ftr.RTN_INDX]): # A failing return state...
pmrIndx = row[ftr.RTN_INDX][i]
countList = self.failPinMap.setdefault((row[ftr.TEST_NUM], pmrIndx), [0])
countList[0] += 1
aliases = self.testAliasMap.setdefault(row[ftr.TEST_NUM], set())
aliases.add((row[ftr.TEST_TXT], self.FTR_TEST_TXT))
def onTsr(self, row):
if row[tsr.HEAD_NUM] == 255:
self.overallTsrs[row[tsr.TEST_NUM]] = [
filterNull(value) for value in row]
else:
self.summaryTsrs[(row[tsr.SITE_NUM],row[tsr.TEST_NUM])] = [
filterNull(value) for value in row]
aliases = self.testAliasMap.setdefault(row[tsr.TEST_NUM], set())
aliases.add((row[tsr.TEST_NAM], self.TSR_TEST_NAM))
aliases.add((row[tsr.SEQ_NAME], self.TSR_SEQ_NAME))
aliases.add((row[tsr.TEST_LBL], self.TSR_TEST_LBL))
| gpl-2.0 | 1,444,826,504,898,870 | 35.131313 | 91 | 0.638803 | false |
brkwon/arista_eapi | modules/arista_eapi_urllib2.py | 1 | 3060 | # EAPI call program using urllib2 and json
import urllib2
#import random
import json
# eAPI JSON template
enableCmd = {"input" : "test", "cmd" : "enable"}
jsonCmd = {"params" : {"format" : "json", "version" : 1, "cmds" : "command"}, "jsonrpc" : "2.0", "method" : "runCmds", "id" : 0}
# Create json based on enable_password and eAPI command
def jsonCreate(eapi_command, enable_password):
if enable_password == None:
jsonCmd["params"]["cmds"] = [eapi_command]
return jsonCmd
else:
enableCmd["input"] = enable_password
jsonCmd["params"]["cmds"] = [enableCmd] + eapi_command
return jsonCmd
# HTTP REST request function for eAPI call
def switchReq(switch, username, password, jsonCmds):
credential = switch
urlString = "http://" +credential+ "/command-api"
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, urlString, username, password)
# create an authenticate hander, opener and install opener
auth = urllib2.HTTPBasicAuthHandler(password_manager)
opener = urllib2.build_opener(auth)
urllib2.install_opener(opener)
# create request call
request = urllib2.Request(urlString, jsonCmds)
request.add_header("Content-type", "application/json")
# call switch via urillib2, and close ti
f = urllib2.urlopen(request)
response = f.read()
f.close()
return response
# Add VLAN to a switch
def show_vlan(switch, username, password):
# Create JSON eapi-command
json_data = jsonCreate("show vlan", None)
jsonCmds = json.dumps(json_data)
# Send JSON command to the switch
response = switchReq(switch, username, password, jsonCmds)
# Strip VLAN ids for return
json_string = json.loads(response)
result = [str(item) for item in (json_string['result'][0]['vlans'].keys())]
return result
# Check if supplied VLAN is in switch or not
def check_vlan(switch, username, password, vlans):
# Create JSON eapi-command
json_data = jsonCreate("show vlan", None)
jsonCmds = json.dumps(json_data)
# Send JSON command to the switch
response = switchReq(switch, username, password, jsonCmds)
# Strip VLAN ids for checkup
json_string = json.loads(response)
result = [str(item) for item in (json_string['result'][0]['vlans'].keys())]
if (str(vlans) in result) == True:
return True
else:
return False
# Add VLAN to a switch
def add_vlan(switch, username, password, enable_password, vlans):
eapi_command = ["configure", "vlan " +vlans]
json_data = jsonCreate(eapi_command, enable_password)
jsonCmds = json.dumps(json_data)
response = switchReq(switch, username, password, jsonCmds)
# Delete VLAN to a switch
def del_vlan(switch, username, password, enable_password, vlans):
eapi_command = ["configure", "no vlan " +vlans]
json_data = jsonCreate(eapi_command, enable_password)
jsonCmds = json.dumps(json_data)
response = switchReq(switch, username, password, jsonCmds)
| lgpl-3.0 | -2,830,876,098,484,342,300 | 32.26087 | 128 | 0.675817 | false |
chdecultot/frappe | frappe/model/base_document.py | 1 | 25095 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from six import iteritems, string_types
import datetime
import frappe, sys
from frappe import _
from frappe.utils import (cint, flt, now, cstr, strip_html, getdate, get_datetime, to_timedelta,
sanitize_html, sanitize_email, cast_fieldtype)
from frappe.model import default_fields
from frappe.model.naming import set_new_name
from frappe.model.utils.link_count import notify_link_count
from frappe.modules import load_doctype_module
from frappe.model import display_fieldtypes
from frappe.model.db_schema import type_map, varchar_len
from frappe.utils.password import get_decrypted_password, set_encrypted_password
_classes = {}
def get_controller(doctype):
"""Returns the **class** object of the given DocType.
For `custom` type, returns `frappe.model.document.Document`.
:param doctype: DocType name as string."""
from frappe.model.document import Document
global _classes
if not doctype in _classes:
module_name, custom = frappe.db.get_value("DocType", doctype, ["module", "custom"]) \
or ["Core", False]
if custom:
_class = Document
else:
module = load_doctype_module(doctype, module_name)
classname = doctype.replace(" ", "").replace("-", "")
if hasattr(module, classname):
_class = getattr(module, classname)
if issubclass(_class, BaseDocument):
_class = getattr(module, classname)
else:
raise ImportError(doctype)
else:
raise ImportError(doctype)
_classes[doctype] = _class
return _classes[doctype]
class BaseDocument(object):
ignore_in_getter = ("doctype", "_meta", "meta", "_table_fields", "_valid_columns")
def __init__(self, d):
self.update(d)
self.dont_update_if_missing = []
if hasattr(self, "__setup__"):
self.__setup__()
@property
def meta(self):
if not hasattr(self, "_meta"):
self._meta = frappe.get_meta(self.doctype)
return self._meta
def update(self, d):
if "doctype" in d:
self.set("doctype", d.get("doctype"))
# first set default field values of base document
for key in default_fields:
if key in d:
self.set(key, d.get(key))
for key, value in iteritems(d):
self.set(key, value)
return self
def update_if_missing(self, d):
if isinstance(d, BaseDocument):
d = d.get_valid_dict()
if "doctype" in d:
self.set("doctype", d.get("doctype"))
for key, value in iteritems(d):
# dont_update_if_missing is a list of fieldnames, for which, you don't want to set default value
if (self.get(key) is None) and (value is not None) and (key not in self.dont_update_if_missing):
self.set(key, value)
def get_db_value(self, key):
return frappe.db.get_value(self.doctype, self.name, key)
def get(self, key=None, filters=None, limit=None, default=None):
if key:
if isinstance(key, dict):
return _filter(self.get_all_children(), key, limit=limit)
if filters:
if isinstance(filters, dict):
value = _filter(self.__dict__.get(key, []), filters, limit=limit)
else:
default = filters
filters = None
value = self.__dict__.get(key, default)
else:
value = self.__dict__.get(key, default)
if value is None and key not in self.ignore_in_getter \
and key in (d.fieldname for d in self.meta.get_table_fields()):
self.set(key, [])
value = self.__dict__.get(key)
return value
else:
return self.__dict__
def getone(self, key, filters=None):
return self.get(key, filters=filters, limit=1)[0]
def set(self, key, value, as_value=False):
if isinstance(value, list) and not as_value:
self.__dict__[key] = []
self.extend(key, value)
else:
self.__dict__[key] = value
def delete_key(self, key):
if key in self.__dict__:
del self.__dict__[key]
def append(self, key, value=None):
if value==None:
value={}
if isinstance(value, (dict, BaseDocument)):
if not self.__dict__.get(key):
self.__dict__[key] = []
value = self._init_child(value, key)
self.__dict__[key].append(value)
# reference parent document
value.parent_doc = self
return value
else:
raise ValueError(
"Document attached to child table must be a dict or BaseDocument, not " + str(type(value))[1:-1]
)
def extend(self, key, value):
if isinstance(value, list):
for v in value:
self.append(key, v)
else:
raise ValueError
def remove(self, doc):
self.get(doc.parentfield).remove(doc)
def _init_child(self, value, key):
if not self.doctype:
return value
if not isinstance(value, BaseDocument):
if "doctype" not in value:
value["doctype"] = self.get_table_field_doctype(key)
if not value["doctype"]:
raise AttributeError(key)
value = get_controller(value["doctype"])(value)
value.init_valid_columns()
value.parent = self.name
value.parenttype = self.doctype
value.parentfield = key
if value.docstatus is None:
value.docstatus = 0
if not getattr(value, "idx", None):
value.idx = len(self.get(key) or []) + 1
if not getattr(value, "name", None):
value.__dict__['__islocal'] = 1
return value
def get_valid_dict(self, sanitize=True, convert_dates_to_str=False):
d = frappe._dict()
for fieldname in self.meta.get_valid_columns():
d[fieldname] = self.get(fieldname)
# if no need for sanitization and value is None, continue
if not sanitize and d[fieldname] is None:
continue
df = self.meta.get_field(fieldname)
if df:
if df.fieldtype=="Check":
if d[fieldname]==None:
d[fieldname] = 0
elif (not isinstance(d[fieldname], int) or d[fieldname] > 1):
d[fieldname] = 1 if cint(d[fieldname]) else 0
elif df.fieldtype=="Int" and not isinstance(d[fieldname], int):
d[fieldname] = cint(d[fieldname])
elif df.fieldtype in ("Currency", "Float", "Percent") and not isinstance(d[fieldname], float):
d[fieldname] = flt(d[fieldname])
elif df.fieldtype in ("Datetime", "Date") and d[fieldname]=="":
d[fieldname] = None
elif df.get("unique") and cstr(d[fieldname]).strip()=="":
# unique empty field should be set to None
d[fieldname] = None
if isinstance(d[fieldname], list) and df.fieldtype != 'Table':
frappe.throw(_('Value for {0} cannot be a list').format(_(df.label)))
if convert_dates_to_str and isinstance(d[fieldname], (datetime.datetime, datetime.time, datetime.timedelta)):
d[fieldname] = str(d[fieldname])
return d
def init_valid_columns(self):
for key in default_fields:
if key not in self.__dict__:
self.__dict__[key] = None
if key in ("idx", "docstatus") and self.__dict__[key] is None:
self.__dict__[key] = 0
for key in self.get_valid_columns():
if key not in self.__dict__:
self.__dict__[key] = None
def get_valid_columns(self):
if self.doctype not in frappe.local.valid_columns:
if self.doctype in ("DocField", "DocPerm") and self.parent in ("DocType", "DocField", "DocPerm"):
from frappe.model.meta import get_table_columns
valid = get_table_columns(self.doctype)
else:
valid = self.meta.get_valid_columns()
frappe.local.valid_columns[self.doctype] = valid
return frappe.local.valid_columns[self.doctype]
def is_new(self):
return self.get("__islocal")
def as_dict(self, no_nulls=False, no_default_fields=False, convert_dates_to_str=False):
doc = self.get_valid_dict(convert_dates_to_str=convert_dates_to_str)
doc["doctype"] = self.doctype
for df in self.meta.get_table_fields():
children = self.get(df.fieldname) or []
doc[df.fieldname] = [d.as_dict(no_nulls=no_nulls) for d in children]
if no_nulls:
for k in list(doc):
if doc[k] is None:
del doc[k]
if no_default_fields:
for k in list(doc):
if k in default_fields:
del doc[k]
for key in ("_user_tags", "__islocal", "__onload", "_liked_by", "__run_link_triggers"):
if self.get(key):
doc[key] = self.get(key)
return doc
def as_json(self):
return frappe.as_json(self.as_dict())
def get_table_field_doctype(self, fieldname):
return self.meta.get_field(fieldname).options
def get_parentfield_of_doctype(self, doctype):
fieldname = [df.fieldname for df in self.meta.get_table_fields() if df.options==doctype]
return fieldname[0] if fieldname else None
def db_insert(self):
"""INSERT the document (with valid columns) in the database."""
if not self.name:
# name will be set by document class in most cases
set_new_name(self)
if not self.creation:
self.creation = self.modified = now()
self.created_by = self.modifield_by = frappe.session.user
d = self.get_valid_dict(convert_dates_to_str=True)
columns = list(d)
try:
frappe.db.sql("""insert into `tab{doctype}`
({columns}) values ({values})""".format(
doctype = self.doctype,
columns = ", ".join(["`"+c+"`" for c in columns]),
values = ", ".join(["%s"] * len(columns))
), list(d.values()))
except Exception as e:
if e.args[0]==1062:
if "PRIMARY" in cstr(e.args[1]):
if self.meta.autoname=="hash":
# hash collision? try again
self.name = None
self.db_insert()
return
frappe.msgprint(_("Duplicate name {0} {1}").format(self.doctype, self.name))
raise frappe.DuplicateEntryError(self.doctype, self.name, e)
elif "Duplicate" in cstr(e.args[1]):
# unique constraint
self.show_unique_validation_message(e)
else:
raise
else:
raise
self.set("__islocal", False)
def db_update(self):
if self.get("__islocal") or not self.name:
self.db_insert()
return
d = self.get_valid_dict(convert_dates_to_str=True)
# don't update name, as case might've been changed
name = d['name']
del d['name']
columns = list(d)
try:
frappe.db.sql("""update `tab{doctype}`
set {values} where name=%s""".format(
doctype = self.doctype,
values = ", ".join(["`"+c+"`=%s" for c in columns])
), list(d.values()) + [name])
except Exception as e:
if e.args[0]==1062 and "Duplicate" in cstr(e.args[1]):
self.show_unique_validation_message(e)
else:
raise
def show_unique_validation_message(self, e):
type, value, traceback = sys.exc_info()
fieldname, label = str(e).split("'")[-2], None
# unique_first_fieldname_second_fieldname is the constraint name
# created using frappe.db.add_unique
if "unique_" in fieldname:
fieldname = fieldname.split("_", 1)[1]
df = self.meta.get_field(fieldname)
if df:
label = df.label
frappe.msgprint(_("{0} must be unique".format(label or fieldname)))
# this is used to preserve traceback
raise frappe.UniqueValidationError(self.doctype, self.name, e)
def update_modified(self):
'''Update modified timestamp'''
self.set("modified", now())
frappe.db.set_value(self.doctype, self.name, 'modified', self.modified, update_modified=False)
def _fix_numeric_types(self):
for df in self.meta.get("fields"):
if df.fieldtype == "Check":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif self.get(df.fieldname) is not None:
if df.fieldtype == "Int":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif df.fieldtype in ("Float", "Currency", "Percent"):
self.set(df.fieldname, flt(self.get(df.fieldname)))
if self.docstatus is not None:
self.docstatus = cint(self.docstatus)
def _get_missing_mandatory_fields(self):
"""Get mandatory fields that do not have any values"""
def get_msg(df):
if df.fieldtype == "Table":
return "{}: {}: {}".format(_("Error"), _("Data missing in table"), _(df.label))
elif self.parentfield:
return "{}: {} {} #{}: {}: {}".format(_("Error"), frappe.bold(_(self.doctype)),
_("Row"), self.idx, _("Value missing for"), _(df.label))
else:
return _("Error: Value missing for {0}: {1}").format(_(df.parent), _(df.label))
missing = []
for df in self.meta.get("fields", {"reqd": ('=', 1)}):
if self.get(df.fieldname) in (None, []) or not strip_html(cstr(self.get(df.fieldname))).strip():
missing.append((df.fieldname, get_msg(df)))
# check for missing parent and parenttype
if self.meta.istable:
for fieldname in ("parent", "parenttype"):
if not self.get(fieldname):
missing.append((fieldname, get_msg(frappe._dict(label=fieldname))))
return missing
def get_invalid_links(self, is_submittable=False):
'''Returns list of invalid links and also updates fetch values if not set'''
def get_msg(df, docname):
if self.parentfield:
return "{} #{}: {}: {}".format(_("Row"), self.idx, _(df.label), docname)
else:
return "{}: {}".format(_(df.label), docname)
invalid_links = []
cancelled_links = []
for df in (self.meta.get_link_fields()
+ self.meta.get("fields", {"fieldtype": ('=', "Dynamic Link")})):
docname = self.get(df.fieldname)
if docname:
if df.fieldtype=="Link":
doctype = df.options
if not doctype:
frappe.throw(_("Options not set for link field {0}").format(df.fieldname))
else:
doctype = self.get(df.options)
if not doctype:
frappe.throw(_("{0} must be set first").format(self.meta.get_label(df.options)))
# MySQL is case insensitive. Preserve case of the original docname in the Link Field.
# get a map of values ot fetch along with this link query
# that are mapped as link_fieldname.source_fieldname in Options of
# Readonly or Data or Text type fields
fields_to_fetch = [
_df for _df in self.meta.get_fields_to_fetch(df.fieldname)
if not self.get(_df.fieldname)
]
if not fields_to_fetch:
# cache a single value type
values = frappe._dict(name=frappe.db.get_value(doctype, docname,
'name', cache=True))
else:
values_to_fetch = ['name'] + [_df.fetch_from.split('.')[-1]
for _df in fields_to_fetch]
# don't cache if fetching other values too
values = frappe.db.get_value(doctype, docname,
values_to_fetch, as_dict=True)
if frappe.get_meta(doctype).issingle:
values.name = doctype
if values:
setattr(self, df.fieldname, values.name)
for _df in fields_to_fetch:
setattr(self, _df.fieldname, values[_df.fetch_from.split('.')[-1]])
notify_link_count(doctype, docname)
if not values.name:
invalid_links.append((df.fieldname, docname, get_msg(df, docname)))
elif (df.fieldname != "amended_from"
and (is_submittable or self.meta.is_submittable) and frappe.get_meta(doctype).is_submittable
and cint(frappe.db.get_value(doctype, docname, "docstatus"))==2):
cancelled_links.append((df.fieldname, docname, get_msg(df, docname)))
return invalid_links, cancelled_links
def _validate_selects(self):
if frappe.flags.in_import:
return
for df in self.meta.get_select_fields():
if df.fieldname=="naming_series" or not (self.get(df.fieldname) and df.options):
continue
options = (df.options or "").split("\n")
# if only empty options
if not filter(None, options):
continue
# strip and set
self.set(df.fieldname, cstr(self.get(df.fieldname)).strip())
value = self.get(df.fieldname)
if value not in options and not (frappe.flags.in_test and value.startswith("_T-")):
# show an elaborate message
prefix = _("Row #{0}:").format(self.idx) if self.get("parentfield") else ""
label = _(self.meta.get_label(df.fieldname))
comma_options = '", "'.join(_(each) for each in options)
frappe.throw(_('{0} {1} cannot be "{2}". It should be one of "{3}"').format(prefix, label,
value, comma_options))
def _validate_constants(self):
if frappe.flags.in_import or self.is_new() or self.flags.ignore_validate_constants:
return
constants = [d.fieldname for d in self.meta.get("fields", {"set_only_once": ('=',1)})]
if constants:
values = frappe.db.get_value(self.doctype, self.name, constants, as_dict=True)
for fieldname in constants:
df = self.meta.get_field(fieldname)
# This conversion to string only when fieldtype is Date
if df.fieldtype == 'Date' or df.fieldtype == 'Datetime':
value = str(values.get(fieldname))
else:
value = values.get(fieldname)
if self.get(fieldname) != value:
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(fieldname)),
frappe.CannotChangeConstantError)
def _validate_length(self):
if frappe.flags.in_install:
return
if self.meta.issingle:
# single doctype value type is mediumtext
return
for fieldname, value in iteritems(self.get_valid_dict()):
df = self.meta.get_field(fieldname)
if df and df.fieldtype in type_map and type_map[df.fieldtype][0]=="varchar":
max_length = cint(df.get("length")) or cint(varchar_len)
if len(cstr(value)) > max_length:
if self.parentfield and self.idx:
reference = _("{0}, Row {1}").format(_(self.doctype), self.idx)
else:
reference = "{0} {1}".format(_(self.doctype), self.name)
frappe.throw(_("{0}: '{1}' ({3}) will get truncated, as max characters allowed is {2}")\
.format(reference, _(df.label), max_length, value), frappe.CharacterLengthExceededError, title=_('Value too big'))
def _validate_update_after_submit(self):
# get the full doc with children
db_values = frappe.get_doc(self.doctype, self.name).as_dict()
for key in self.as_dict():
df = self.meta.get_field(key)
db_value = db_values.get(key)
if df and not df.allow_on_submit and (self.get(key) or db_value):
if df.fieldtype=="Table":
# just check if the table size has changed
# individual fields will be checked in the loop for children
self_value = len(self.get(key))
db_value = len(db_value)
else:
self_value = self.get_value(key)
if self_value != db_value:
frappe.throw(_("Not allowed to change {0} after submission").format(df.label),
frappe.UpdateAfterSubmitError)
def _sanitize_content(self):
"""Sanitize HTML and Email in field values. Used to prevent XSS.
- Ignore if 'Ignore XSS Filter' is checked or fieldtype is 'Code'
"""
if frappe.flags.in_install:
return
for fieldname, value in self.get_valid_dict().items():
if not value or not isinstance(value, string_types):
continue
value = frappe.as_unicode(value)
if (u"<" not in value and u">" not in value):
# doesn't look like html so no need
continue
elif "<!-- markdown -->" in value and not ("<script" in value or "javascript:" in value):
# should be handled separately via the markdown converter function
continue
df = self.meta.get_field(fieldname)
sanitized_value = value
if df and df.get("fieldtype") in ("Data", "Code", "Small Text") and df.get("options")=="Email":
sanitized_value = sanitize_email(value)
elif df and (df.get("ignore_xss_filter")
or (df.get("fieldtype")=="Code" and df.get("options")!="Email")
or df.get("fieldtype") in ("Attach", "Attach Image")
# cancelled and submit but not update after submit should be ignored
or self.docstatus==2
or (self.docstatus==1 and not df.get("allow_on_submit"))):
continue
else:
sanitized_value = sanitize_html(value, linkify=df.fieldtype=='Text Editor')
self.set(fieldname, sanitized_value)
def _save_passwords(self):
'''Save password field values in __Auth table'''
if self.flags.ignore_save_passwords is True:
return
for df in self.meta.get('fields', {'fieldtype': ('=', 'Password')}):
if self.flags.ignore_save_passwords and df.fieldname in self.flags.ignore_save_passwords: continue
new_password = self.get(df.fieldname)
if new_password and not self.is_dummy_password(new_password):
# is not a dummy password like '*****'
set_encrypted_password(self.doctype, self.name, new_password, df.fieldname)
# set dummy password like '*****'
self.set(df.fieldname, '*'*len(new_password))
def get_password(self, fieldname='password', raise_exception=True):
if self.get(fieldname) and not self.is_dummy_password(self.get(fieldname)):
return self.get(fieldname)
return get_decrypted_password(self.doctype, self.name, fieldname, raise_exception=raise_exception)
def is_dummy_password(self, pwd):
return ''.join(set(pwd))=='*'
def precision(self, fieldname, parentfield=None):
"""Returns float precision for a particular field (or get global default).
:param fieldname: Fieldname for which precision is required.
:param parentfield: If fieldname is in child table."""
from frappe.model.meta import get_field_precision
if parentfield and not isinstance(parentfield, string_types):
parentfield = parentfield.parentfield
cache_key = parentfield or "main"
if not hasattr(self, "_precision"):
self._precision = frappe._dict()
if cache_key not in self._precision:
self._precision[cache_key] = frappe._dict()
if fieldname not in self._precision[cache_key]:
self._precision[cache_key][fieldname] = None
doctype = self.meta.get_field(parentfield).options if parentfield else self.doctype
df = frappe.get_meta(doctype).get_field(fieldname)
if df.fieldtype in ("Currency", "Float", "Percent"):
self._precision[cache_key][fieldname] = get_field_precision(df, self)
return self._precision[cache_key][fieldname]
def get_formatted(self, fieldname, doc=None, currency=None, absolute_value=False, translated=False):
from frappe.utils.formatters import format_value
df = self.meta.get_field(fieldname)
if not df and fieldname in default_fields:
from frappe.model.meta import get_default_df
df = get_default_df(fieldname)
val = self.get(fieldname)
if translated:
val = _(val)
if absolute_value and isinstance(val, (int, float)):
val = abs(self.get(fieldname))
if not doc:
doc = getattr(self, "parent_doc", None) or self
return format_value(val, df=df, doc=doc, currency=currency)
def is_print_hide(self, fieldname, df=None, for_print=True):
"""Returns true if fieldname is to be hidden for print.
Print Hide can be set via the Print Format Builder or in the controller as a list
of hidden fields. Example
class MyDoc(Document):
def __setup__(self):
self.print_hide = ["field1", "field2"]
:param fieldname: Fieldname to be checked if hidden.
"""
meta_df = self.meta.get_field(fieldname)
if meta_df and meta_df.get("__print_hide"):
return True
print_hide = 0
if self.get(fieldname)==0 and not self.meta.istable:
print_hide = ( df and df.print_hide_if_no_value ) or ( meta_df and meta_df.print_hide_if_no_value )
if not print_hide:
if df and df.print_hide is not None:
print_hide = df.print_hide
elif meta_df:
print_hide = meta_df.print_hide
return print_hide
def in_format_data(self, fieldname):
"""Returns True if shown via Print Format::`format_data` property.
Called from within standard print format."""
doc = getattr(self, "parent_doc", self)
if hasattr(doc, "format_data_map"):
return fieldname in doc.format_data_map
else:
return True
def reset_values_if_no_permlevel_access(self, has_access_to, high_permlevel_fields):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
to_reset = []
for df in high_permlevel_fields:
if df.permlevel not in has_access_to and df.fieldtype not in display_fieldtypes:
to_reset.append(df)
if to_reset:
if self.is_new():
# if new, set default value
ref_doc = frappe.new_doc(self.doctype)
else:
# get values from old doc
if self.parent:
self.parent_doc.get_latest()
ref_doc = [d for d in self.parent_doc.get(self.parentfield) if d.name == self.name][0]
else:
ref_doc = self.get_latest()
for df in to_reset:
self.set(df.fieldname, ref_doc.get(df.fieldname))
def get_value(self, fieldname):
df = self.meta.get_field(fieldname)
val = self.get(fieldname)
return self.cast(val, df)
def cast(self, value, df):
return cast_fieldtype(df.fieldtype, value)
def _extract_images_from_text_editor(self):
from frappe.utils.file_manager import extract_images_from_doc
if self.doctype != "DocType":
for df in self.meta.get("fields", {"fieldtype": ('=', "Text Editor")}):
extract_images_from_doc(self, df.fieldname)
def _filter(data, filters, limit=None):
"""pass filters as:
{"key": "val", "key": ["!=", "val"],
"key": ["in", "val"], "key": ["not in", "val"], "key": "^val",
"key" : True (exists), "key": False (does not exist) }"""
out, _filters = [], {}
if not data:
return out
# setup filters as tuples
if filters:
for f in filters:
fval = filters[f]
if not isinstance(fval, (tuple, list)):
if fval is True:
fval = ("not None", fval)
elif fval is False:
fval = ("None", fval)
elif isinstance(fval, string_types) and fval.startswith("^"):
fval = ("^", fval[1:])
else:
fval = ("=", fval)
_filters[f] = fval
for d in data:
add = True
for f, fval in iteritems(_filters):
if not frappe.compare(getattr(d, f, None), fval[0], fval[1]):
add = False
break
if add:
out.append(d)
if limit and (len(out)-1)==limit:
break
return out
| mit | -9,026,169,210,520,244,000 | 29.905172 | 120 | 0.66308 | false |
Sult/evetool | evetool/settings.py | 1 | 4146 | """
Django settings for evetool project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u^ez9)ak2z9*x(ujdaoxtmfysb@hb4!li3-x8d4&@&la4jd2_q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'evetool.urls'
WSGI_APPLICATION = 'evetool.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': "evetool",
'USER': "sult",
'PASSWORD': "admin",
'HOST': "localhost",
'PORT': "",
},
'eveassets': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': "evetool_eveassets",
'USER': "eve",
'PASSWORD': "admin",
'HOST': "localhost",
'PORT': "",
},
'metrics': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': "evetool_metrics",
'USER': "eve",
'PASSWORD': "admin",
'HOST': "localhost",
'PORT': "",
},
}
DATABASE_ROUTERS = ['evetool.router.Router']
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_collected')
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
MEDIA_URL = '/media/'
# STATICFILES_FINDERS = (
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# # 'django.contrib.staticfiles.finders.DefaultStorageFinder',
# )
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#installed packages
'django_extensions',
#my packages
'metrics',
'eveassets',
'users',
'apis',
'tasks',
'characters',
)
LOGIN_URL = "/"
#### Basevalues for all sorts ofthings
IMAGE_SIZES = (
("Tiny", 32),
("Small", 64),
("Medium", 128),
("Large", 256),
("Huge", 512),
("Special", 200),
)
#amount of allowed api requests to the evesite per second
EVE_API_REQUESTS = 10
| mit | 8,719,298,617,498,412,000 | 23.975904 | 73 | 0.646406 | false |
SEL-Columbia/commcare-hq | corehq/apps/users/bulkupload.py | 1 | 19528 | from StringIO import StringIO
import logging
from couchdbkit.exceptions import (
BulkSaveError,
MultipleResultsFound,
ResourceNotFound,
)
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from corehq.apps.groups.models import Group
from corehq.apps.users.forms import CommCareAccountForm
from corehq.apps.users.util import normalize_username, raw_username
from corehq.apps.users.models import CommCareUser, CouchUser
from corehq.apps.domain.models import Domain
from couchexport.writers import Excel2007ExportWriter
from dimagi.utils.excel import flatten_json, json_to_headers, \
alphanumeric_sort_key
from corehq.apps.commtrack.util import get_supply_point, submit_mapping_case_block
from corehq.apps.commtrack.models import CommTrackUser, SupplyPointCase
from soil import DownloadBase
class UserUploadError(Exception):
pass
required_headers = set(['username'])
allowed_headers = set(['password', 'phone-number', 'email', 'user_id', 'name', 'group', 'data', 'language']) | required_headers
def check_headers(user_specs):
headers = set(user_specs.fieldnames)
illegal_headers = headers - allowed_headers
missing_headers = required_headers - headers
messages = []
for header_set, label in (missing_headers, 'required'), (illegal_headers, 'illegal'):
if header_set:
messages.append(_('The following are {label} column headers: {headers}.').format(
label=label, headers=', '.join(header_set)))
if messages:
raise UserUploadError('\n'.join(messages))
class GroupMemoizer(object):
"""
If you use this to get a group, do not set group.name directly;
use group_memoizer.rename_group(group, name) instead.
"""
def __init__(self, domain):
self.groups_by_name = {}
self.groups_by_id = {}
self.groups = set()
self.domain = domain
def load_all(self):
for group in Group.by_domain(self.domain):
self.add_group(group)
def add_group(self, new_group):
# todo
# this has the possibility of missing two rows one with id one with name
# that actually refer to the same group
# and overwriting one with the other
assert new_group.name
if new_group.get_id:
self.groups_by_id[new_group.get_id] = new_group
self.groups_by_name[new_group.name] = new_group
self.groups.add(new_group)
def by_name(self, group_name):
if not self.groups_by_name.has_key(group_name):
group = Group.by_name(self.domain, group_name)
if not group:
self.groups_by_name[group_name] = None
return None
self.add_group(group)
return self.groups_by_name[group_name]
def get(self, group_id):
if not self.groups_by_id.has_key(group_id):
group = Group.get(group_id)
if group.domain != self.domain:
raise ResourceNotFound()
self.add_group(group)
return self.groups_by_id[group_id]
def create(self, domain, name):
group = Group(domain=domain, name=name)
self.add_group(group)
return group
def rename_group(self, group, name):
# This isn't always true, you can rename A => B and then B => C,
# and what was A will now be called B when you try to change
# what was B to be called C. That's fine, but you don't want to
# delete someone else's entry
if self.groups_by_name.get(group.name) is group:
del self.groups_by_name[group.name]
group.name = name
self.add_group(group)
def save_all(self):
Group.bulk_save(self.groups)
def _fmt_phone(phone_number):
if phone_number and not isinstance(phone_number, basestring):
phone_number = str(int(phone_number))
return phone_number.lstrip("+")
class LocationCache(object):
def __init__(self):
self.cache = {}
def get(self, site_code, domain):
if not site_code:
return None
if site_code in self.cache:
return self.cache[site_code]
else:
supply_point = get_supply_point(
domain,
site_code
)['case']
self.cache[site_code] = supply_point
return supply_point
class UserLocMapping(object):
def __init__(self, username, domain, location_cache):
self.username = username
self.domain = domain
self.to_add = set()
self.to_remove = set()
self.location_cache = location_cache
def get_supply_point_from_location(self, sms_code):
return self.location_cache.get(sms_code, self.domain)
def save(self):
"""
Calculate which locations need added or removed, then submit
one caseblock to handle this
"""
user = CommTrackUser.get_by_username(self.username)
if not user:
raise UserUploadError(_('no username with {} found!'.format(self.username)))
# have to rewrap since we need to force it to a commtrack user
user = CommTrackUser.wrap(user.to_json())
current_locations = user.locations
current_location_codes = [loc.site_code for loc in current_locations]
commit_list = {}
messages = []
def _add_loc(loc, clear=False):
sp = self.get_supply_point_from_location(loc)
if sp is None:
messages.append(_("No supply point found for location '{}'. "
"Make sure the location type is not set to administrative only "
"and that the location has a valid sms code."
).format(loc or ''))
else:
commit_list.update(user.supply_point_index_mapping(sp, clear))
for loc in self.to_add:
if loc not in current_location_codes:
_add_loc(loc)
for loc in self.to_remove:
if loc in current_location_codes:
_add_loc(loc, clear=True)
if commit_list:
submit_mapping_case_block(user, commit_list)
return messages
def create_or_update_locations(domain, location_specs, log):
location_cache = LocationCache()
users = {}
for row in location_specs:
username = row.get('username')
try:
username = normalize_username(username, domain)
except ValidationError:
log['errors'].append(_("Username must be a valid email address: %s") % username)
else:
location_code = unicode(row.get('location-sms-code'))
if username in users:
user_mapping = users[username]
else:
user_mapping = UserLocMapping(username, domain, location_cache)
users[username] = user_mapping
if row.get('remove') == 'y':
user_mapping.to_remove.add(location_code)
else:
user_mapping.to_add.add(location_code)
for username, mapping in users.iteritems():
try:
messages = mapping.save()
log['errors'].extend(messages)
except UserUploadError as e:
log['errors'].append(_('Unable to update locations for {user} because {message}'.format(
user=username, message=e
)))
def create_or_update_groups(domain, group_specs, log):
group_memoizer = GroupMemoizer(domain)
group_memoizer.load_all()
group_names = set()
for row in group_specs:
group_id = row.get('id')
group_name = row.get('name')
case_sharing = row.get('case-sharing')
reporting = row.get('reporting')
data = row.get('data')
# check that group_names are unique
if group_name in group_names:
log['errors'].append('Your spreadsheet has multiple groups called "%s" and only the first was processed' % group_name)
continue
else:
group_names.add(group_name)
# check that there's a group_id or a group_name
if not group_id and not group_name:
log['errors'].append('Your spreadsheet has a group with no name or id and it has been ignored')
continue
try:
if group_id:
group = group_memoizer.get(group_id)
else:
group = group_memoizer.by_name(group_name)
if not group:
group = group_memoizer.create(domain=domain, name=group_name)
except ResourceNotFound:
log["errors"].append('There are no groups on CommCare HQ with id "%s"' % group_id)
except MultipleResultsFound:
log["errors"].append("There are multiple groups on CommCare HQ named: %s" % group_name)
else:
if group_name:
group_memoizer.rename_group(group, group_name)
group.case_sharing = case_sharing
group.reporting = reporting
group.metadata = data
return group_memoizer
def create_or_update_users_and_groups(domain, user_specs, group_specs, location_specs, task=None):
ret = {"errors": [], "rows": []}
total = len(user_specs) + len(group_specs) + len(location_specs)
def _set_progress(progress):
if task is not None:
DownloadBase.set_progress(task, progress, total)
group_memoizer = create_or_update_groups(domain, group_specs, log=ret)
current = len(group_specs)
usernames = set()
user_ids = set()
allowed_groups = set(group_memoizer.groups)
allowed_group_names = [group.name for group in allowed_groups]
try:
for row in user_specs:
_set_progress(current)
current += 1
data, email, group_names, language, name, password, phone_number, user_id, username = (
row.get(k) for k in sorted(allowed_headers)
)
if password:
password = unicode(password)
group_names = group_names or []
try:
username = normalize_username(str(username), domain)
except TypeError:
username = None
except ValidationError:
ret['rows'].append({
'username': username,
'row': row,
'flag': _('username cannot contain spaces or symbols'),
})
continue
status_row = {
'username': raw_username(username) if username else None,
'row': row,
}
if username in usernames or user_id in user_ids:
status_row['flag'] = 'repeat'
elif not username and not user_id:
status_row['flag'] = 'missing-data'
else:
try:
if username:
usernames.add(username)
if user_id:
user_ids.add(user_id)
if user_id:
user = CommCareUser.get_by_user_id(user_id, domain)
else:
user = CommCareUser.get_by_username(username)
def is_password(password):
if not password:
return False
for c in password:
if c != "*":
return True
return False
if user:
if user.domain != domain:
raise UserUploadError(_(
'User with username %(username)r is '
'somehow in domain %(domain)r'
) % {'username': user.username, 'domain': user.domain})
if username and user.username != username:
user.change_username(username)
if is_password(password):
user.set_password(password)
status_row['flag'] = 'updated'
else:
if len(raw_username(username)) > CommCareAccountForm.max_len_username:
ret['rows'].append({
'username': username,
'row': row,
'flag': _("username cannot contain greater than %d characters" %
CommCareAccountForm.max_len_username)
})
continue
if not is_password(password):
raise UserUploadError(_("Cannot create a new user with a blank password"))
user = CommCareUser.create(domain, username, password, uuid=user_id or '', commit=False)
status_row['flag'] = 'created'
if phone_number:
user.add_phone_number(_fmt_phone(phone_number), default=True)
if name:
user.set_full_name(name)
if data:
user.user_data.update(data)
if language:
user.language = language
if email:
user.email = email
user.save()
if is_password(password):
# Without this line, digest auth doesn't work.
# With this line, digest auth works.
# Other than that, I'm not sure what's going on
user.get_django_user().check_password(password)
for group_id in Group.by_user(user, wrap=False):
group = group_memoizer.get(group_id)
if group.name not in group_names:
group.remove_user(user, save=False)
for group_name in group_names:
if group_name not in allowed_group_names:
raise UserUploadError(_(
"Can't add to group '%s' "
"(try adding it to your spreadsheet)"
) % group_name)
group_memoizer.by_name(group_name).add_user(user, save=False)
except (UserUploadError, CouchUser.Inconsistent) as e:
status_row['flag'] = unicode(e)
ret["rows"].append(status_row)
finally:
try:
group_memoizer.save_all()
except BulkSaveError as e:
_error_message = (
"Oops! We were not able to save some of your group changes. "
"Please make sure no one else is editing your groups "
"and try again."
)
logging.exception((
'BulkSaveError saving groups. '
'User saw error message "%s". Errors: %s'
) % (_error_message, e.errors))
ret['errors'].append(_error_message)
create_or_update_locations(domain, location_specs, log=ret)
_set_progress(total)
return ret
class GroupNameError(Exception):
def __init__(self, blank_groups):
self.blank_groups = blank_groups
@property
def message(self):
return "The following group ids have a blank name: %s." % (
', '.join([group.get_id for group in self.blank_groups])
)
def get_location_rows(domain):
users = CommTrackUser.by_domain(domain)
mappings = []
for user in users:
locations = user.locations
for location in locations:
mappings.append([
user.raw_username,
location.site_code,
location.name
])
return mappings
def dump_users_and_groups(response, domain):
file = StringIO()
writer = Excel2007ExportWriter()
users = CommCareUser.by_domain(domain)
user_data_keys = set()
user_groups_length = 0
user_dicts = []
group_data_keys = set()
group_dicts = []
group_memoizer = GroupMemoizer(domain=domain)
# load groups manually instead of calling group_memoizer.load_all()
# so that we can detect blank groups
blank_groups = set()
for group in Group.by_domain(domain):
if group.name:
group_memoizer.add_group(group)
else:
blank_groups.add(group)
if blank_groups:
raise GroupNameError(blank_groups=blank_groups)
for user in users:
data = user.user_data
group_names = sorted(map(
lambda id: group_memoizer.get(id).name,
Group.by_user(user, wrap=False)
), key=alphanumeric_sort_key)
# exclude password and user_id
user_dicts.append({
'data': data,
'group': group_names,
'name': user.full_name,
# dummy display string for passwords
'password': "********",
'phone-number': user.phone_number,
'email': user.email,
'username': user.raw_username,
'language': user.language,
'user_id': user._id,
})
user_data_keys.update(user.user_data.keys() if user.user_data else {})
user_groups_length = max(user_groups_length, len(group_names))
sorted_groups = sorted(group_memoizer.groups, key=lambda group: alphanumeric_sort_key(group.name))
for group in sorted_groups:
group_dicts.append({
'id': group.get_id,
'name': group.name,
'case-sharing': group.case_sharing,
'reporting': group.reporting,
'data': group.metadata,
})
group_data_keys.update(group.metadata.keys() if group.metadata else {})
# include obscured password column for adding new users
user_headers = ['username', 'password', 'name', 'phone-number', 'email', 'language', 'user_id']
user_headers.extend(json_to_headers(
{'data': dict([(key, None) for key in user_data_keys])}
))
user_headers.extend(json_to_headers(
{'group': range(1, user_groups_length + 1)}
))
group_headers = ['id', 'name', 'case-sharing?', 'reporting?']
group_headers.extend(json_to_headers(
{'data': dict([(key, None) for key in group_data_keys])}
))
headers = [
('users', [user_headers]),
('groups', [group_headers]),
]
commtrack_enabled = Domain.get_by_name(domain).commtrack_enabled
if commtrack_enabled:
headers.append(
('locations', [['username', 'location-sms-code', 'location name (optional)']])
)
writer.open(
header_table=headers,
file=file,
)
def get_user_rows():
for user_dict in user_dicts:
row = dict(flatten_json(user_dict))
yield [row.get(header) or '' for header in user_headers]
def get_group_rows():
for group_dict in group_dicts:
row = dict(flatten_json(group_dict))
yield [row.get(header) or '' for header in group_headers]
rows = [
('users', get_user_rows()),
('groups', get_group_rows()),
]
if commtrack_enabled:
rows.append(
('locations', get_location_rows(domain))
)
writer.write(rows)
writer.close()
response.write(file.getvalue())
| bsd-3-clause | 4,689,480,229,207,205,000 | 35.500935 | 130 | 0.552284 | false |
Jarn/jarn.viewdoc | jarn/viewdoc/testing.py | 1 | 1874 | import sys
import os
import unittest
import tempfile
import shutil
import functools
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
from os.path import realpath, isdir
class ChdirStack(object):
"""Stack of current working directories."""
def __init__(self):
self.stack = []
def __len__(self):
return len(self.stack)
def push(self, dir):
"""Push cwd on stack and change to 'dir'.
"""
self.stack.append(os.getcwd())
os.chdir(dir or os.getcwd())
def pop(self):
"""Pop dir off stack and change to it.
"""
if len(self.stack):
os.chdir(self.stack.pop())
class JailSetup(unittest.TestCase):
"""Manage a temporary working directory."""
dirstack = None
tempdir = None
def setUp(self):
self.dirstack = ChdirStack()
try:
self.tempdir = realpath(self.mkdtemp())
self.dirstack.push(self.tempdir)
except:
self.cleanUp()
raise
def tearDown(self):
self.cleanUp()
def cleanUp(self):
if self.dirstack is not None:
while self.dirstack:
self.dirstack.pop()
if self.tempdir is not None:
if isdir(self.tempdir):
shutil.rmtree(self.tempdir)
def mkdtemp(self):
return tempfile.mkdtemp()
def mkfile(self, name, body=''):
with open(name, 'wt') as file:
file.write(body)
def quiet(func):
"""Decorator swallowing stdout and stderr output.
"""
def wrapper(*args, **kw):
saved = sys.stdout, sys.stderr
sys.stdout = sys.stderr = StringIO()
try:
return func(*args, **kw)
finally:
sys.stdout, sys.stderr = saved
return functools.wraps(func)(wrapper)
| bsd-2-clause | -3,124,150,782,722,566,000 | 21.309524 | 53 | 0.570971 | false |
halfakop/Teacup_Firmware | configtool/gui.py | 1 | 19693 |
import sys
import time
try:
import wx
except:
print("ImportError: No module named wx\n\n"
"wxPython is not installed. This program requires wxPython to run.\n"
"See your package manager and/or http://wxpython.org/download.php.")
time.sleep(10)
sys.exit(-1)
import os.path
from configtool.data import reHelpText
from configtool.decoration import Decoration
from configtool.settings import Settings, SettingsDlg
from configtool.printerpanel import PrinterPanel
from configtool.boardpanel import BoardPanel
from configtool.build import Build, Upload
from configtool.data import reInclude
ID_LOAD_PRINTER = 1000
ID_SAVE_PRINTER = 1001
ID_SAVE_PRINTER_AS = 1002
ID_LOAD_BOARD = 1010
ID_SAVE_BOARD = 1011
ID_SAVE_BOARD_AS = 1012
ID_LOAD_CONFIG = 1020
ID_SAVE_CONFIG = 1021
ID_BUILD = 1030
ID_UPLOAD = 1031
ID_SETTINGS = 1040
ID_HELP = 1050
ID_REPORT = 1051
ID_ABOUT = 1052
class ConfigFrame(wx.Frame):
def __init__(self, settings):
wx.Frame.__init__(self, None, -1, "Teacup Configtool", size = (880, 550))
self.Bind(wx.EVT_CLOSE, self.onClose)
self.Bind(wx.EVT_SIZE, self.onResize)
self.deco = Decoration()
panel = wx.Panel(self, -1)
panel.SetBackgroundColour(self.deco.getBackgroundColour())
panel.Bind(wx.EVT_PAINT, self.deco.onPaintBackground)
self.settings = settings
self.settings.app = self
self.settings.font = wx.Font(8, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD)
self.heaters = []
self.savePrtEna = False
self.saveBrdEna = False
self.protPrtFile = False
self.protBrdFile = False
sz = wx.BoxSizer(wx.HORIZONTAL)
self.nb = wx.Notebook(panel, wx.ID_ANY, size = (880, 550),
style = wx.BK_DEFAULT)
self.nb.SetBackgroundColour(self.deco.getBackgroundColour())
self.nb.SetFont(self.settings.font)
self.printerFileName = None
self.printerTabDecor = ""
self.printerBaseText = "Printer"
self.pgPrinter = PrinterPanel(self, self.nb, self.settings)
self.nb.AddPage(self.pgPrinter, self.printerBaseText)
self.boardFileName = None
self.boardTabDecor = ""
self.boardBaseText = "Board"
self.pgBoard = BoardPanel(self, self.nb, self.settings)
self.nb.AddPage(self.pgBoard, self.boardBaseText)
panel.Fit()
self.panel = panel
sz.Add(self.nb, 1, wx.EXPAND + wx.ALL, 5)
self.SetSizer(sz)
self.makeMenu()
def onClose(self, evt):
if not self.pgPrinter.confirmLoseChanges("exit"):
return
if not self.pgBoard.confirmLoseChanges("exit"):
return
self.Destroy()
def onResize(self, evt):
self.panel.SetSize(self.GetClientSize())
self.Refresh()
evt.Skip();
def setPrinterTabFile(self, fn):
self.printerFileName = fn
self.updatePrinterTab()
def setPrinterTabDecor(self, prefix):
self.printerTabDecor = prefix
self.updatePrinterTab()
def updatePrinterTab(self):
txt = self.printerTabDecor + self.printerBaseText
if self.printerFileName:
txt += " <%s>" % self.printerFileName
self.nb.SetPageText(0, txt)
def setBoardTabFile(self, fn):
self.boardFileName = fn
self.updateBoardTab()
def setBoardTabDecor(self, prefix):
self.boardTabDecor = prefix
self.updateBoardTab()
def updateBoardTab(self):
txt = self.boardTabDecor + self.boardBaseText
if self.boardFileName:
txt += " <%s>" % self.boardFileName
self.nb.SetPageText(1, txt)
def setHeaters(self, ht):
self.heaters = ht
self.pgPrinter.setHeaters(ht)
def makeMenu(self):
file_menu = wx.Menu()
file_menu.Append(ID_LOAD_CONFIG, "Load config.h",
"Load config.h and its named printer and board files.")
self.Bind(wx.EVT_MENU, self.onLoadConfig, id = ID_LOAD_CONFIG)
file_menu.Enable(ID_LOAD_CONFIG, False)
file_menu.Append(ID_SAVE_CONFIG, "Save config.h", "Save config.h file.")
self.Bind(wx.EVT_MENU, self.onSaveConfig, id = ID_SAVE_CONFIG)
file_menu.Enable(ID_SAVE_CONFIG, False)
file_menu.AppendSeparator()
file_menu.Append(ID_LOAD_PRINTER, "Load printer",
"Load a printer configuration file.")
self.Bind(wx.EVT_MENU, self.pgPrinter.onLoadConfig, id = ID_LOAD_PRINTER)
file_menu.Append(ID_SAVE_PRINTER, "Save printer",
"Save printer configuration.")
self.Bind(wx.EVT_MENU, self.onSavePrinterConfig, id = ID_SAVE_PRINTER)
file_menu.Enable(ID_SAVE_PRINTER, False)
file_menu.Append(ID_SAVE_PRINTER_AS, "Save printer as...",
"Save printer configuration to a new file.")
self.Bind(wx.EVT_MENU, self.onSavePrinterConfigAs, id = ID_SAVE_PRINTER_AS)
file_menu.Enable(ID_SAVE_PRINTER_AS, False)
file_menu.AppendSeparator()
file_menu.Append(ID_LOAD_BOARD, "Load board",
"Load a board configuration file.")
self.Bind(wx.EVT_MENU, self.pgBoard.onLoadConfig, id = ID_LOAD_BOARD)
file_menu.Append(ID_SAVE_BOARD, "Save board", "Save board configuration.")
self.Bind(wx.EVT_MENU, self.onSaveBoardConfig, id = ID_SAVE_BOARD)
file_menu.Enable(ID_SAVE_BOARD, False)
file_menu.Append(ID_SAVE_BOARD_AS, "Save board as...",
"Save board configuration to a new file.")
self.Bind(wx.EVT_MENU, self.onSaveBoardConfigAs, id = ID_SAVE_BOARD_AS)
file_menu.Enable(ID_SAVE_BOARD_AS, False)
file_menu.AppendSeparator()
file_menu.Append(wx.ID_EXIT, "E&xit", "Exit the application.")
self.Bind(wx.EVT_MENU, self.onClose, id = wx.ID_EXIT)
self.fileMenu = file_menu
menu_bar = wx.MenuBar()
menu_bar.Append(file_menu, "&File")
edit_menu = wx.Menu()
edit_menu.Append(ID_SETTINGS, "Settings", "Change settings.")
self.Bind(wx.EVT_MENU, self.onEditSettings, id = ID_SETTINGS)
self.editMenu = edit_menu
menu_bar.Append(edit_menu, "&Edit")
build_menu = wx.Menu()
build_menu.Append(ID_BUILD, "Build", "Build the executable.")
self.Bind(wx.EVT_MENU, self.onBuild, id = ID_BUILD)
build_menu.Append(ID_UPLOAD, "Upload", "Upload the executable.")
self.Bind(wx.EVT_MENU, self.onUpload, id = ID_UPLOAD)
self.buildMenu = build_menu
menu_bar.Append(build_menu, "&Build")
help_menu = wx.Menu()
help_menu.Append(ID_HELP, "Help", "Find help.")
self.Bind(wx.EVT_MENU, self.onHelp, id = ID_HELP)
help_menu.Append(ID_REPORT, "Report problem",
"Report a problem to Teacup maintainers.")
self.Bind(wx.EVT_MENU, self.onReportProblem, id = ID_REPORT)
help_menu.AppendSeparator()
help_menu.Append(ID_ABOUT, "About Teacup")
self.Bind(wx.EVT_MENU, self.onAbout, id = ID_ABOUT)
self.helpMenu = help_menu
menu_bar.Append(help_menu, "&Help")
self.SetMenuBar(menu_bar)
loadFlag = self.checkEnableLoadConfig()
self.checkEnableUpload()
if loadFlag:
self.loadConfigFile("config.h")
def onSaveBoardConfig(self, evt):
rc = self.pgBoard.onSaveConfig(evt)
if rc:
self.checkEnableLoadConfig()
return rc
def onSaveBoardConfigAs(self, evt):
rc = self.pgBoard.onSaveConfigAs(evt)
if rc:
self.checkEnableLoadConfig()
return rc
def onSavePrinterConfig(self, evt):
rc = self.pgPrinter.onSaveConfig(evt)
if rc:
self.checkEnableLoadConfig()
return rc
def onSavePrinterConfigAs(self, evt):
rc = self.pgPrinter.onSaveConfigAs(evt)
if rc:
self.checkEnableLoadConfig()
return rc
def checkEnableLoadConfig(self):
fn = os.path.join(self.settings.folder, "config.h")
if os.path.isfile(fn):
self.fileMenu.Enable(ID_LOAD_CONFIG, True)
self.buildMenu.Enable(ID_BUILD, True)
return True
else:
self.fileMenu.Enable(ID_LOAD_CONFIG, False)
self.buildMenu.Enable(ID_BUILD, False)
return False
def checkEnableUpload(self):
fn = os.path.join(self.settings.folder, "teacup.hex")
if os.path.isfile(fn):
self.buildMenu.Enable(ID_UPLOAD, True)
else:
self.buildMenu.Enable(ID_UPLOAD, False)
def enableSavePrinter(self, saveFlag, saveAsFlag):
self.fileMenu.Enable(ID_SAVE_PRINTER, saveFlag)
self.fileMenu.Enable(ID_SAVE_PRINTER_AS, saveAsFlag)
self.savePrtEna = saveAsFlag
self.protPrtFile = not saveFlag
if self.savePrtEna and self.saveBrdEna:
self.enableSaveConfig(True)
else:
self.enableSaveConfig(False)
def enableSaveBoard(self, saveFlag, saveAsFlag):
self.fileMenu.Enable(ID_SAVE_BOARD, saveFlag)
self.fileMenu.Enable(ID_SAVE_BOARD_AS, saveAsFlag)
self.saveBrdEna = saveAsFlag
self.protBrdFile = not saveFlag
if self.savePrtEna and self.saveBrdEna:
self.enableSaveConfig(True)
else:
self.enableSaveConfig(False)
def enableSaveConfig(self, flag):
self.fileMenu.Enable(ID_SAVE_CONFIG, flag)
def onLoadConfig(self, evt):
self.loadConfigFile("config.h")
def loadConfigFile(self, fn):
if not self.pgPrinter.confirmLoseChanges("load config"):
return False
if not self.pgBoard.confirmLoseChanges("load config"):
return False
pfile, bfile = self.getConfigFileNames(fn)
if not pfile:
self.message("Config file did not contain a printer file "
"include statement.", "Config error")
return False
else:
if not self.pgPrinter.loadConfigFile(pfile):
self.message("There was a problem loading the printer config file:\n%s"
% pfile, "Config error")
return False
if not bfile:
self.message("Config file did not contain a board file "
"include statement.", "Config error")
return False
else:
if not self.pgBoard.loadConfigFile(bfile):
self.message("There was a problem loading the board config file:\n%s"
% bfile, "Config error")
return False
return True
def getConfigFileNames(self, fn):
pfile = None
bfile = None
path = os.path.join(self.settings.folder, fn)
try:
cfgBuffer = list(open(path))
except:
self.message("Unable to process config file %s." % fn, "File error")
return None, None
for ln in cfgBuffer:
if not ln.lstrip().startswith("#include"):
continue
m = reInclude.search(ln)
if m:
t = m.groups()
if len(t) == 1:
if "printer." in t[0]:
if pfile:
self.message("Multiple printer file include statements.\n"
"Ignoring %s." % ln, "Config error",
wx.OK + wx.ICON_WARNING)
else:
pfile = os.path.join(self.settings.folder, t[0])
elif "board." in t[0]:
if bfile:
self.message("Multiple board file include statements.\n"
"Ignoring %s." % ln, "Config error",
wx.OK + wx.ICON_WARNING)
else:
bfile = os.path.join(self.settings.folder, t[0])
else:
self.message("Unable to parse include statement:\n%s" % ln,
"Config error")
return pfile, bfile
def onSaveConfig(self, evt):
fn = os.path.join(self.settings.folder, "config.h")
try:
fp = open(fn, 'w')
except:
self.message("Unable to open config.h for output.", "File error")
return False
bfn = self.pgBoard.getFileName()
if self.pgBoard.isModified() and self.pgBoard.isValid():
if not self.pgBoard.saveConfigFile(bfn):
return False
else:
self.pgBoard.generateTempTables()
pfn = self.pgPrinter.getFileName()
if self.pgPrinter.isModified() and self.pgPrinter.isValid():
if not self.pgPrinter.saveConfigFile(pfn):
return False
prefix = self.settings.folder + os.path.sep
lpfx = len(prefix)
if bfn.startswith(prefix):
rbfn = bfn[lpfx:]
else:
rbfn = bfn
if pfn.startswith(prefix):
rpfn = pfn[lpfx:]
else:
rpfn = pfn
fp.write("\n")
fp.write("// Configuration for controller board.\n")
fp.write("#include \"%s\"\n" % rbfn)
fp.write("\n")
fp.write("// Configuration for printer board.\n")
fp.write("#include \"%s\"\n" % rpfn)
fp.close()
self.checkEnableLoadConfig()
return True
def onBuild(self, evt):
self.onBuildorUpload(True)
def onUpload(self, evt):
self.onBuildorUpload(False)
def onBuildorUpload(self, buildFlag):
if not (self.pgPrinter.hasData() or self.pgBoard.hasData()):
dlg = wx.MessageDialog(self, "Data needs to be loaded. "
"Click Yes to load config.h.",
"Data missing",
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_INFORMATION)
rc = dlg.ShowModal()
dlg.Destroy()
if rc != wx.ID_YES:
return
self.loadConfigFile("config.h")
else:
if self.pgPrinter.isModified():
dlg = wx.MessageDialog(self, "Printer data needs to be saved. Click "
"Yes to save printer configuration.",
"Changes pending",
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_INFORMATION)
rc = dlg.ShowModal()
dlg.Destroy()
if rc != wx.ID_YES:
return
if self.protPrtFile:
rc = self.onSavePrinterConfigAs(None)
else:
rc = self.onSavePrinterConfig(None)
if not rc:
return
if self.pgBoard.isModified():
dlg = wx.MessageDialog(self, "Board data needs to be saved. Click "
"Yes to save board configuration.",
"Changes pending",
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_INFORMATION)
rc = dlg.ShowModal()
dlg.Destroy()
if rc != wx.ID_YES:
return
if self.protBrdFile:
rc = self.onSaveBoardConfigAs(None)
else:
rc = self.onSaveBoardConfig(None)
if not rc:
return
if not self.verifyConfigLoaded():
dlg = wx.MessageDialog(self, "Loaded configuration does not match the "
"config.h file. Click Yes to save config.h.",
"Configuration changed",
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_INFORMATION)
rc = dlg.ShowModal()
dlg.Destroy()
if rc != wx.ID_YES:
return
if not self.onSaveConfig(None):
return
f_cpu, cpu = self.pgBoard.getCPUInfo()
if not cpu:
dlg = wx.MessageDialog(self, "Unable to determine CPU type.",
"CPU type error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if not f_cpu:
dlg = wx.MessageDialog(self, "Unable to determine CPU clock rate.",
"CPU clock rate error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if buildFlag:
dlg = Build(self, self.settings, f_cpu, cpu)
dlg.ShowModal()
dlg.Destroy()
self.checkEnableUpload()
else:
dlg = Upload(self, self.settings, f_cpu, cpu)
dlg.ShowModal()
dlg.Destroy()
def verifyConfigLoaded(self):
pfile, bfile = self.getConfigFileNames("config.h")
lpfile = self.pgPrinter.getFileName()
lbfile = self.pgBoard.getFileName()
return ((pfile == lpfile) and (bfile == lbfile))
def onEditSettings(self, evt):
dlg = SettingsDlg(self, self.settings)
rc = dlg.ShowModal()
dlg.Destroy()
def onHelp(self, evt):
self.message("Find help by hovering slowly over the buttons and text "
"fields. Tooltip should appear, explaining things.",
"Find help", style = wx.OK)
def onReportProblem(self, evt):
import urllib
import webbrowser
import subprocess
from sys import platform
# Testing allowed URLs up to 32 kB in size. Longer URLs are simply chopped.
mailRecipients ="reply+0004dc756da9f0641af0a3834c580ad5be469f4f6b" \
"5d4cfc92cf00000001118c958a92a169ce051faa8c@" \
"reply.github.com,[email protected]"
mailSubject = "Teacup problem report"
mailBody = "Please answer these questions before hitting \"send\":\n\n" \
"What did you try to do?\n\n\n" \
"What did you expect to happen?\n\n\n" \
"What happened instead?\n\n\n\n" \
"To allow developers to help, configuration files are " \
"attached, with help comments stripped:\n"
for f in self.pgBoard.getFileName(), self.pgPrinter.getFileName():
if not f:
mailBody += "\n(no file loaded)\n"
continue
mailBody += "\n" + os.path.basename(f) + ":\n"
mailBody += "----------------------------------------------\n"
try:
fc = open(f).read()
fc = reHelpText.sub("", fc)
mailBody += fc
except:
mailBody += "(could not read this file)\n"
mailBody += "----------------------------------------------\n"
url = "mailto:" + urllib.quote(mailRecipients) + \
"?subject=" + urllib.quote(mailSubject) + \
"&body=" + urllib.quote(mailBody)
# This is a work around a bug in gvfs-open coming with (at least) Ubuntu
# 15.04. gvfs-open would open mailto:///[email protected] instead of
# the requested mailto:[email protected].
if platform.startswith("linux"):
try:
subprocess.check_output(["gvfs-open", "--help"])
# Broken gvfs-open exists, so it might be used.
# Try to open the URL directly.
for urlOpener in "thunderbird", "evolution", "firefox", "mozilla", \
"epiphany", "konqueror", "chromium-browser", \
"google-chrome":
try:
subprocess.check_output([urlOpener, url], stderr=subprocess.STDOUT)
return
except:
pass
except:
pass
webbrowser.open_new(url)
def onAbout(self, evt):
# Get the contributors' top 10 with something like this:
# export B=experimental
# git log $B | grep "Author:" | sort | uniq | while \
# read A; do N=$(git log $B | grep "$A" | wc -l); echo "$N $A"; done | \
# sort -rn
self.message("Teacup Firmware is a 3D Printer and CNC machine controlling "
"firmware with emphasis on performance, efficiency and "
"outstanding quality. What Teacup does, shall it do very well."
"\n\n\n"
"Lots of people hard at work! Top 10 contributors:\n\n"
" Markus Hitter (542 commits)\n"
" Michael Moon (322 commits)\n"
" Phil Hord (55 commits)\n"
" Jeff Bernardis (51 commits)\n"
" Markus Amsler (47 commits)\n"
" David Forrest (27 commits)\n"
" Jim McGee (15 commits)\n"
" Ben Jackson (12 commits)\n"
" Bas Laarhoven (10 commits)\n"
" Stephan Walter (9 commits)\n"
" Roland Brochard (3 commits)\n"
" Jens Ch. Restemeier (3 commits)\n",
"About Teacup", style = wx.OK)
def message(self, text, title, style = wx.OK + wx.ICON_ERROR):
dlg = wx.MessageDialog(self, text, title, style)
dlg.ShowModal()
dlg.Destroy()
def StartGui(settings):
app = wx.App(False)
frame = ConfigFrame(settings)
frame.Show(True)
app.MainLoop()
| gpl-2.0 | 6,312,573,059,711,797,000 | 30.917342 | 80 | 0.607779 | false |
ahwkuepper/stdme | app/constants.py | 1 | 8339 | import numpy as np
#labels for summary plots
d_label = np.array(["You", "Your gender", "Your age group", "Your race / ethnicity", "Your location"])
#US statistics
gender_number = {}
gender_number["Male"] = 155651602
gender_number["Female"] = 160477237
race_number = {}
race_number["Native"] = 1942876.0
race_number["Asian"] = 12721721.0
race_number["Black"] = 29489649.0
race_number["Hispanic"] = 46407173.0
race_number["Multiple"] = 5145135.0
race_number["Pacific"] = 473703.0
race_number["White"] = 161443167.0
age_number = {}
age_number["0-14"] = 61089123.0
age_number["15-19"] = 21158964.0
age_number["20-24"] = 22795438.0
age_number["25-29"] = 21580198.0
age_number["30-34"] = 21264389.0
age_number["35-39"] = 19603770.0
age_number["40-44"] = 20848920.0
age_number["45-54"] = 43767532.0
age_number["55-64"] = 39316431.0
age_number["65+"] = 44704074.0
#Chlamydia statistics
gender_rate = {}
gender_factor = {}
gender_rate["Male"] = 278.4e-5
gender_rate["Female"] = 627.2e-5
rate_average = ((gender_rate["Male"]*gender_number["Male"]
+gender_rate["Female"]*gender_number["Male"])
/(gender_number["Male"]+gender_number["Female"]))
gender_factor["Male"] = gender_rate["Male"]/rate_average
gender_factor["Female"] = gender_rate["Female"]/rate_average
gender_factor["Female"], gender_factor["Male"]
race_rate = {}
race_factor = {}
race_rate["Native"] = 689.1e-5
race_rate["Asian"] = 115.8e-5
race_rate["Black"] = 1152.6e-5
race_rate["Hispanic"] = 376.2e-5
race_rate["Multiple"] = 116.1e-5
race_rate["Pacific"] = 641.5e-5
race_rate["White"] = 187.0e-5
race_factor["Native"] = race_rate["Native"]/rate_average
race_factor["Asian"] = race_rate["Asian"]/rate_average
race_factor["Black"] = race_rate["Black"]/rate_average
race_factor["Hispanic"] = race_rate["Hispanic"]/rate_average
race_factor["Multiple"] = race_rate["Multiple"]/rate_average
race_factor["Pacific"] = race_rate["Pacific"]/rate_average
race_factor["White"] = race_rate["White"]/rate_average
age_rate = {}
age_factor = {}
age_rate["0-14"] = 20.0e-5
age_rate["15-19"] = 1804.0e-5
age_rate["20-24"] = 2484.6e-5
age_rate["25-29"] = 1176.2e-5
age_rate["30-34"] = 532.4e-5
age_rate["35-39"] = 268.0e-5
age_rate["40-44"] = 131.5e-5
age_rate["45-54"] = 56.6e-5
age_rate["55-64"] = 16.6e-5
age_rate["65+"] = 3.2e-5
age_factor["0-14"] = age_rate["0-14"]/rate_average
age_factor["15-19"] = age_rate["15-19"]/rate_average
age_factor["20-24"] = age_rate["20-24"]/rate_average
age_factor["25-29"] = age_rate["25-29"]/rate_average
age_factor["30-34"] = age_rate["30-34"]/rate_average
age_factor["35-39"] = age_rate["35-39"]/rate_average
age_factor["40-44"] = age_rate["40-44"]/rate_average
age_factor["45-54"] = age_rate["45-54"]/rate_average
age_factor["55-64"] = age_rate["55-64"]/rate_average
age_factor["65+"] = age_rate["65+"]/rate_average
#Gonorrhea statistics
gender_rate_gonorrhea = {}
gender_factor_gonorrhea = {}
gender_rate_gonorrhea["Male"] = 120.1e-5
gender_rate_gonorrhea["Female"] = 101.3e-5
rate_average_gonorrhea = ((gender_rate_gonorrhea["Male"]*gender_number["Male"]
+gender_rate_gonorrhea["Female"]*gender_number["Male"])
/(gender_number["Male"]+gender_number["Female"]))
gender_factor_gonorrhea["Male"] = gender_rate_gonorrhea["Male"]/rate_average
gender_factor_gonorrhea["Female"] = gender_rate_gonorrhea["Female"]/rate_average
gender_factor_gonorrhea["Female"], gender_factor["Male"]
race_rate_gonorrhea = {}
race_factor_gonorrhea = {}
race_rate_gonorrhea["Native"] = 103.2e-5
race_rate_gonorrhea["Asian"] = 19.9e-5
race_rate_gonorrhea["Black"] = 422.9e-5
race_rate_gonorrhea["Hispanic"] = 72.7e-5
race_rate_gonorrhea["Multiple"] = 39.1e-5
race_rate_gonorrhea["Pacific"] = 103.2e-5
race_rate_gonorrhea["White"] = 39.8e-5
race_factor_gonorrhea["Native"] = race_rate_gonorrhea["Native"]/rate_average_gonorrhea
race_factor_gonorrhea["Asian"] = race_rate_gonorrhea["Asian"]/rate_average_gonorrhea
race_factor_gonorrhea["Black"] = race_rate_gonorrhea["Black"]/rate_average_gonorrhea
race_factor_gonorrhea["Hispanic"] = race_rate_gonorrhea["Hispanic"]/rate_average_gonorrhea
race_factor_gonorrhea["Multiple"] = race_rate_gonorrhea["Multiple"]/rate_average_gonorrhea
race_factor_gonorrhea["Pacific"] = race_rate_gonorrhea["Pacific"]/rate_average_gonorrhea
race_factor_gonorrhea["White"] = race_rate_gonorrhea["White"]/rate_average_gonorrhea
age_rate_gonorrhea = {}
age_factor_gonorrhea = {}
age_rate_gonorrhea["0-14"] = 4.3e-5
age_rate_gonorrhea["15-19"] = 323.6e-5
age_rate_gonorrhea["20-24"] = 509.8e-5
age_rate_gonorrhea["25-29"] = 322.5e-5
age_rate_gonorrhea["30-34"] = 180.6e-5
age_rate_gonorrhea["35-39"] = 106.1e-5
age_rate_gonorrhea["40-44"] = 60.9e-5
age_rate_gonorrhea["45-54"] = 35.0e-5
age_rate_gonorrhea["55-64"] = 11.6e-5
age_rate_gonorrhea["65+"] = 2.0e-5
age_factor_gonorrhea["0-14"] = age_rate_gonorrhea["0-14"]/rate_average_gonorrhea
age_factor_gonorrhea["15-19"] = age_rate_gonorrhea["15-19"]/rate_average_gonorrhea
age_factor_gonorrhea["20-24"] = age_rate_gonorrhea["20-24"]/rate_average_gonorrhea
age_factor_gonorrhea["25-29"] = age_rate_gonorrhea["25-29"]/rate_average_gonorrhea
age_factor_gonorrhea["30-34"] = age_rate_gonorrhea["30-34"]/rate_average_gonorrhea
age_factor_gonorrhea["35-39"] = age_rate_gonorrhea["35-39"]/rate_average_gonorrhea
age_factor_gonorrhea["40-44"] = age_rate_gonorrhea["40-44"]/rate_average_gonorrhea
age_factor_gonorrhea["45-54"] = age_rate_gonorrhea["45-54"]/rate_average_gonorrhea
age_factor_gonorrhea["55-64"] = age_rate_gonorrhea["55-64"]/rate_average_gonorrhea
age_factor_gonorrhea["65+"] = age_rate_gonorrhea["65+"]/rate_average_gonorrhea
#Syphilis statistics
gender_rate_syphilis = {}
gender_factor_syphilis = {}
gender_rate_syphilis["Male"] = 11.7e-5
gender_rate_syphilis["Female"] = 1.1e-5
rate_average_syphilis = ((gender_rate_syphilis["Male"]*gender_number["Male"]
+gender_rate_syphilis["Female"]*gender_number["Male"])
/(gender_number["Male"]+gender_number["Female"]))
gender_factor_syphilis["Male"] = gender_rate_syphilis["Male"]/rate_average
gender_factor_syphilis["Female"] = gender_rate_syphilis["Female"]/rate_average
gender_factor_syphilis["Female"], gender_factor["Male"]
race_rate_syphilis = {}
race_factor_syphilis = {}
race_rate_syphilis["Native"] = 7.9e-5
race_rate_syphilis["Asian"] = 2.8e-5
race_rate_syphilis["Black"] = 18.9e-5
race_rate_syphilis["Hispanic"] = 7.4e-5
race_rate_syphilis["Multiple"] = 2.3e-5
race_rate_syphilis["Pacific"] = 6.7e-5
race_rate_syphilis["White"] = 3.4e-5
race_factor_syphilis["Native"] = race_rate_syphilis["Native"]/rate_average_syphilis
race_factor_syphilis["Asian"] = race_rate_syphilis["Asian"]/rate_average_syphilis
race_factor_syphilis["Black"] = race_rate_syphilis["Black"]/rate_average_syphilis
race_factor_syphilis["Hispanic"] = race_rate_syphilis["Hispanic"]/rate_average_syphilis
race_factor_syphilis["Multiple"] = race_rate_syphilis["Multiple"]/rate_average_syphilis
race_factor_syphilis["Pacific"] = race_rate_syphilis["Pacific"]/rate_average_syphilis
race_factor_syphilis["White"] = race_rate_syphilis["White"]/rate_average_syphilis
age_rate_syphilis = {}
age_factor_syphilis = {}
age_rate_syphilis["0-14"] = 0.0e-5
age_rate_syphilis["15-19"] = 4.8e-5
age_rate_syphilis["20-24"] = 18.1e-5
age_rate_syphilis["25-29"] = 19.0e-5
age_rate_syphilis["30-34"] = 13.6e-5
age_rate_syphilis["35-39"] = 10.4e-5
age_rate_syphilis["40-44"] = 8.4e-5
age_rate_syphilis["45-54"] = 6.8e-5
age_rate_syphilis["55-64"] = 2.3e-5
age_rate_syphilis["65+"] = 0.4e-5
age_factor_syphilis["0-14"] = age_rate_syphilis["0-14"]/rate_average_syphilis
age_factor_syphilis["15-19"] = age_rate_syphilis["15-19"]/rate_average_syphilis
age_factor_syphilis["20-24"] = age_rate_syphilis["20-24"]/rate_average_syphilis
age_factor_syphilis["25-29"] = age_rate_syphilis["25-29"]/rate_average_syphilis
age_factor_syphilis["30-34"] = age_rate_syphilis["30-34"]/rate_average_syphilis
age_factor_syphilis["35-39"] = age_rate_syphilis["35-39"]/rate_average_syphilis
age_factor_syphilis["40-44"] = age_rate_syphilis["40-44"]/rate_average_syphilis
age_factor_syphilis["45-54"] = age_rate_syphilis["45-54"]/rate_average_syphilis
age_factor_syphilis["55-64"] = age_rate_syphilis["55-64"]/rate_average_syphilis
age_factor_syphilis["65+"] = age_rate_syphilis["65+"]/rate_average_syphilis
| mit | 658,293,424,676,495,500 | 37.428571 | 102 | 0.699844 | false |
riolet/rioauth | provider/pages/register.py | 1 | 1949 | import web
import common
import base
class Register(base.Page):
def __init__(self):
base.Page.__init__(self, "Register")
def GET(self):
# show login page
return common.render.register()
@staticmethod
def send_conf_email(user_id, name, email):
duration = 1800 # 30 minutes
key = common.email_loopback.add(user_id, '/login', duration=duration)
subject = "Riolet Registration"
link = "{uri_prefix}/confirmemail?key={key}".format(
uri_prefix=web.ctx.home,
key=key)
body = """
Hello, {name}
Thank you for registering with Riolet. To complete your registration, please follow the link below:
{link}
This link will be valid for the next {duration} minutes. If it expires, you will need to register again.
Thanks,
Riolet
""".format(name=name, link=link, duration=duration/60)
common.sendmail(email, subject, body)
def POST(self):
email = self.data.get('email')
name = self.data.get('name')
password = self.data.get('password')
confirmpassword = self.data.get('confirmpassword')
if not email or not name or not password or not confirmpassword:
self.errors.append('Error processing form.')
return common.render.register(self.errors)
if password != confirmpassword:
self.errors.append("Passwords don't match")
return common.render.register(self.errors)
try:
self.user_id = common.users.add(email, password, name)
except (common.ValidationError, KeyError) as e:
self.errors.append('Error: {0}'.format(e.message))
return common.render.register(self.errors)
# send the user an email to have the use confirm their email address
self.send_conf_email(self.user_id, name, email)
# send them back to the login page
self.redirect('/login?register=success')
| gpl-3.0 | 6,252,393,982,004,596,000 | 30.95082 | 104 | 0.636224 | false |
geosoco/twitter_capture_client | multiprocess_client_test.py | 1 | 1546 | #!/usr/bin/env python
"""Base classes for clients."""
import argparse
import logging
import logging.config
from multiprocessing import freeze_support
from clients.multiprocess import MultiprocessClientBase
from configfile import ConfigFile
import exceptions
#
# global
#
log = None
#
# configure basic logger
#
def configure_logging(config=None):
if config is not None:
logging.config.dictConfig(config)
else:
# set up some default logging options
logging.basicConfig(
format=("%(asctime)-15s %(processName) %(levelname)s|"
"%(name)s| %(message)s"),
level=logging.DEBUG
)
if __name__ == "__main__":
freeze_support()
# handle arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--configfile",
default="config.json",
help="config file to use"
)
args = parser.parse_args()
# config file
config = ConfigFile(args.configfile)
# configure the logging
logging_config = config.getValue(
"client.logging",
default=None,
alternate_paths=["logging"])
configure_logging(logging_config)
# create our log
log = logging.getLogger("main")
# create and run client
try:
client = MultiprocessClientBase(config)
client.run()
except exceptions.KeyboardInterrupt, e:
log.info("keyboard interrupt")
client.stop_process()
log.info("stopping")
log.debug("exiting")
else:
configure_logging()
| bsd-3-clause | -522,471,715,683,376,060 | 18.820513 | 66 | 0.631307 | false |
dileep-kishore/microbial-ai | tests/conftest.py | 1 | 2031 | # @Author: dileep
# @Last Modified by: dileep
import json
import os
import random
import pytest
from microbial_ai.io import ModelTable
from microbial_ai.metabolism import Microbe
CURRPATH = os.path.dirname(__file__)
@pytest.fixture(scope="module")
def model_table():
return ModelTable(db_uri=f"sqlite:///{CURRPATH}/data/models.db",
model_path=f"{CURRPATH}/data/metabolic_models")
@pytest.fixture
def core_model(model_table):
[model] = model_table.fetch_byid([17]) # 838 is actual database
return model
@pytest.fixture
def random_model(model_table):
mid = random.randint(1, 21) # 857 in actual database
[model] = model_table.fetch_byid([mid]) # 791 for ecoli
return model
@pytest.fixture
def known_microbes(model_table):
[core, ecoli] = model_table.fetch_byid([17, 19]) # 838, 791 is actual database
return core, ecoli
@pytest.fixture
def random_microbes(model_table):
mids = random.sample(range(1, 21), k=3) # 857 in actual database
[model1, model2, model3] = model_table.fetch_byid(mids)
return model1, model2, model3
@pytest.mark.filterwarnings('ignore')
@pytest.fixture
def known_microbiome(known_microbes):
return [Microbe(m) for m in known_microbes]
@pytest.mark.filterwarnings('ignore')
@pytest.fixture
def random_microbiome(random_microbes):
return [Microbe(m) for m in random_microbes]
@pytest.fixture
def microbe_dict():
config_path = os.path.join(CURRPATH, 'data/config/config.json')
with open(config_path, 'r') as fid:
json_data = json.load(fid)
return json_data['microbes-test']
@pytest.fixture
def core_media():
config_path = os.path.join(CURRPATH, 'data/config/config.json')
with open(config_path, 'r') as fid:
json_data = json.load(fid)
return json_data['media']
@pytest.fixture
def core_exrxns():
config_path = os.path.join(CURRPATH, 'data/config/config.json')
with open(config_path, 'r') as fid:
json_data = json.load(fid)
return json_data['ex_rxns']
| mit | -7,260,259,383,662,208,000 | 24.708861 | 83 | 0.688331 | false |
O-T-L/PyOTL | Tests/nsga_ii.py | 1 | 5611 | """
Copyright (C) 2014, 申瑞珉 (Ruimin Shen)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import unittest
import numpy
import scipy.stats
import pyotl.utility
import pyotl.problem.real
import pyotl.initial.real
import pyotl.crossover.real
import pyotl.mutation.real
import pyotl.optimizer.real
import pyotl.indicator.real
class TestCase(unittest.TestCase):
def setUp(self):
self.pathData = os.path.join(os.path.dirname(__file__), 'Data')
self.repeat = 30
def tearDown(self):
pass
def testNSGA_II_DTLZ2(self):
random = pyotl.utility.Random(1)
problemGen = lambda: pyotl.problem.real.DTLZ2(3)
problem = problemGen()
pathProblem = os.path.join(self.pathData, type(problem).__name__, str(problem.GetNumberOfObjectives()))
_crossover = pyotl.crossover.real.SimulatedBinaryCrossover(random, 1, problem.GetBoundary(), 20)
crossover = pyotl.crossover.real.CoupleCoupleCrossoverAdapter(_crossover, random)
mutation = pyotl.mutation.real.PolynomialMutation(random, 1 / float(len(problem.GetBoundary())), problem.GetBoundary(), 20)
pfList = []
for _ in range(self.repeat):
problem = problemGen()
initial = pyotl.initial.real.BatchUniform(random, problem.GetBoundary(), 100)
optimizer = pyotl.optimizer.real.NSGA_II(random, problem, initial, crossover, mutation)
while optimizer.GetProblem().GetNumberOfEvaluations() < 30000:
optimizer()
pf = pyotl.utility.PyListList2VectorVector_Real(
[list(solution.objective_) for solution in optimizer.GetSolutionSet()])
pfList.append(pf)
pathCrossover = os.path.join(pathProblem, type(crossover.GetCrossover()).__name__)
pathOptimizer = os.path.join(pathCrossover, type(optimizer).__name__)
pfTrue = pyotl.utility.PyListList2VectorVector_Real(numpy.loadtxt(os.path.join(pathProblem, 'PF.csv')).tolist())
# GD
indicator = pyotl.indicator.real.DTLZ2GD()
metricList = [indicator(pf) for pf in pfList]
rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'GD.csv')).tolist()
self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
# IGD
indicator = pyotl.indicator.real.InvertedGenerationalDistance(pfTrue)
metricList = [indicator(pf) for pf in pfList]
rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'IGD.csv')).tolist()
self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
def testNSGA_II_NegativeDTLZ2(self):
random = pyotl.utility.Random(1)
problemGen = lambda: pyotl.problem.real.NegativeDTLZ2(3)
problem = problemGen()
pathProblem = os.path.join(self.pathData, type(problem).__name__.replace('Negative', ''), str(problem.GetNumberOfObjectives()))
_crossover = pyotl.crossover.real.SimulatedBinaryCrossover(random, 1, problem.GetBoundary(), 20)
crossover = pyotl.crossover.real.CoupleCoupleCrossoverAdapter(_crossover, random)
mutation = pyotl.mutation.real.PolynomialMutation(random, 1 / float(len(problem.GetBoundary())), problem.GetBoundary(), 20)
pfList = []
for _ in range(self.repeat):
problem = problemGen()
initial = pyotl.initial.real.BatchUniform(random, problem.GetBoundary(), 100)
optimizer = pyotl.optimizer.real.NSGA_II(random, problem, initial, crossover, mutation)
while optimizer.GetProblem().GetNumberOfEvaluations() < 30000:
optimizer()
pf = pyotl.utility.PyListList2VectorVector_Real(
[list(solution.objective_) for solution in optimizer.GetSolutionSet()])
for objective in pf:
problem.Fix(objective)
pfList.append(pf)
pathCrossover = os.path.join(pathProblem, type(crossover.GetCrossover()).__name__)
pathOptimizer = os.path.join(pathCrossover, type(optimizer).__name__)
pfTrue = pyotl.utility.PyListList2VectorVector_Real(numpy.loadtxt(os.path.join(pathProblem, 'PF.csv')).tolist())
# GD
indicator = pyotl.indicator.real.DTLZ2GD()
metricList = [indicator(pf) for pf in pfList]
rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'GD.csv')).tolist()
self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
# IGD
indicator = pyotl.indicator.real.InvertedGenerationalDistance(pfTrue)
metricList = [indicator(pf) for pf in pfList]
rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'IGD.csv')).tolist()
self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | 5,589,080,152,595,766,000 | 51.383178 | 142 | 0.688671 | false |
siacs/HttpUploadComponent | httpupload/server.py | 2 | 13933 | #!/usr/bin/env python3
import argparse
import base64
import errno
import hashlib
import logging
import mimetypes
import os
import random
import shutil
import ssl
import string
import sys
import time
import urllib.parse
import yaml
from sleekxmpp.componentxmpp import ComponentXMPP
from threading import Event
from threading import Lock
from threading import Thread
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
LOGLEVEL=logging.DEBUG
global files
global files_lock
global config
global quotas
def normalize_path(path, sub_url_length):
"""
Normalizes the URL to prevent users from grabbing arbitrary files via `../'
and the like.
"""
return os.path.normcase(os.path.normpath(path))[sub_url_length:]
def expire(quotaonly=False, kill_event=None):
"""
Expire all files over 'user_quota_soft' and older than 'expire_maxage'
- quotaonly - If true don't delete anything just calculate the
used space per user and return. Otherwise make an exiry run
every config['expire_interval'] seconds.
- kill_event - threading.Event to listen to. When set, quit to
prevent hanging on KeyboardInterrupt. Only applicable when
quotaonly = False
"""
global config
global quotas
while True:
if not quotaonly:
# Wait expire_interval secs or return on kill_event
if kill_event.wait(config['expire_interval']):
return
now = time.time()
# Scan each senders upload directories seperatly
for sender in os.listdir(config['storage_path']):
senderdir = os.path.join(config['storage_path'], sender)
quota = 0
filelist = []
# Traverse sender directory, delete anything older expire_maxage and collect file stats.
for dirname, dirs, files in os.walk(senderdir, topdown=False):
removed = []
for name in files:
fullname = os.path.join(dirname, name)
stats = os.stat(fullname)
if not quotaonly:
if now - stats.st_mtime > config['expire_maxage']:
logging.debug('Expiring %s. Age: %s', fullname, now - stats.st_mtime)
try:
os.unlink(fullname)
removed += [name]
except OSError as e:
logging.warning("Exception '%s' deleting file '%s'.", e, fullname)
quota += stats.st_size
filelist += [(stats.st_mtime, fullname, stats.st_size)]
else:
quota += stats.st_size
filelist += [(stats.st_mtime, fullname, stats.st_size)]
if dirs == [] and removed == files: # Directory is empty, so we can remove it
logging.debug('Removing directory %s.', dirname)
try:
os.rmdir(dirname)
except OSError as e:
logging.warning("Exception '%s' deleting directory '%s'.", e, dirname)
if not quotaonly and config['user_quota_soft']:
# Delete oldest files of sender until occupied space is <= user_quota_soft
filelist.sort()
while quota > config['user_quota_soft']:
entry = filelist[0]
try:
logging.debug('user_quota_soft exceeded. Removing %s. Age: %s', entry[1], now - entry[0])
os.unlink(entry[1])
quota -= entry[2]
except OSError as e:
logging.warning("Exception '%s' deleting file '%s'.", e, entry[1])
filelist.pop(0)
quotas[sender] = quota
logging.debug('Expire run finished in %fs', time.time() - now)
if quotaonly:
return
class MissingComponent(ComponentXMPP):
def __init__(self, jid, secret, port):
ComponentXMPP.__init__(self, jid, secret, "localhost", port)
self.register_plugin('xep_0030')
self.register_plugin('upload',module='plugins.upload')
self.add_event_handler('request_upload_slot',self.request_upload_slot)
def request_upload_slot(self, iq):
global config
global files
global files_lock
request = iq['request']
maxfilesize = int(config['max_file_size'])
if not request['filename'] or not request['size']:
self._sendError(iq,'modify','bad-request','please specify filename and size')
elif maxfilesize < int(request['size']):
self._sendError(iq,'modify','not-acceptable','file too large. max file size is '+str(maxfilesize))
elif 'whitelist' not in config or iq['from'].domain in config['whitelist'] or iq['from'].bare in config['whitelist']:
sender = iq['from'].bare
sender_hash = base64.urlsafe_b64encode(hashlib.sha1(sender.encode()).digest()).decode('ascii').rstrip('=')
if config['user_quota_hard'] and quotas.setdefault(sender_hash, 0) + int(request['size']) > config['user_quota_hard']:
msg = 'quota would be exceeded. max file size is %d' % (config['user_quota_hard'] - quotas[sender_hash])
logging.debug(msg)
self._sendError(iq, 'modify', 'not-acceptable', msg)
return
filename = request['filename']
folder = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(int(len(sender_hash) / 2)))
sane_filename = "".join([c for c in filename if (c == '_' or c == '.' or ord(c) >= 48 and ord(c) <= 122)]).rstrip()
path = os.path.join(sender_hash, folder)
if sane_filename:
path = os.path.join(path, sane_filename)
with files_lock:
files.add(path)
print(path)
reply = iq.reply()
reply['slot']['get'] = urllib.parse.urljoin(config['get_url'], path)
reply['slot']['put'] = urllib.parse.urljoin(config['put_url'], path)
reply.send()
else:
self._sendError(iq,'cancel','not-allowed','not allowed to request upload slots')
def _sendError(self, iq, error_type, condition, text):
reply = iq.reply()
iq.error()
iq['error']['type'] = error_type
iq['error']['condition'] = condition
iq['error']['text'] = text
iq.send()
class HttpHandler(BaseHTTPRequestHandler):
def do_PUT(self):
print('do put')
global files
global files_lock
global config
path = normalize_path(self.path, config['put_sub_url_len'])
length = int(self.headers['Content-Length'])
maxfilesize = int(config['max_file_size'])
if config['user_quota_hard']:
sender_hash = path.split('/')[0]
maxfilesize = min(maxfilesize, config['user_quota_hard'] - quotas.setdefault(sender_hash, 0))
if maxfilesize < length:
self.send_response(400,'file too large')
self.end_headers()
else:
print('path: '+path)
files_lock.acquire()
if path in files:
files.remove(path)
files_lock.release()
filename = os.path.join(config['storage_path'], path)
os.makedirs(os.path.dirname(filename))
remaining = length
with open(filename,'wb') as f:
data = self.rfile.read(min(4096,remaining))
while data and remaining >= 0:
databytes = len(data)
remaining -= databytes
if config['user_quota_hard']:
quotas[sender_hash] += databytes
f.write(data)
data = self.rfile.read(min(4096,remaining))
self.send_response(200,'ok')
self.end_headers()
else:
files_lock.release()
self.send_response(403,'invalid slot')
self.end_headers()
def do_GET(self, body=True):
global config
path = normalize_path(self.path, config['get_sub_url_len'])
slashcount = path.count('/')
if path[0] in ('/', '\\') or slashcount < 1 or slashcount > 2:
self.send_response(404,'file not found')
self.end_headers()
else:
filename = os.path.join(config['storage_path'], path)
print('requesting file: '+filename)
try:
with open(filename,'rb') as f:
self.send_response(200)
mime, _ = mimetypes.guess_type(filename)
if mime is None:
mime = 'application/octet-stream'
self.send_header("Content-Type", mime)
if mime[:6] != 'image/':
self.send_header("Content-Disposition", 'attachment; filename="{}"'.format(os.path.basename(filename)))
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs.st_size))
self.end_headers()
if body:
shutil.copyfileobj(f, self.wfile)
except FileNotFoundError:
self.send_response(404,'file not found')
self.end_headers()
def do_HEAD(self):
self.do_GET(body=False)
def do_OPTIONS(self):
if 'allow_web_clients' in config and config['allow_web_clients']:
self.send_response(200, 'OK')
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-Methods", "GET,PUT")
self.end_headers()
else:
self.send_response(501, 'NO OPTIONS')
self.end_headers()
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default='config.yml', help='Specify alternate config file.')
parser.add_argument("-l", "--logfile", default=None, help='File where the server log will be stored. If not specified log to stdout.')
args = parser.parse_args()
with open(args.config,'r') as ymlfile:
config = yaml.load(ymlfile)
files = set()
files_lock = Lock()
kill_event = Event()
logging.basicConfig(level=LOGLEVEL,
format='%(asctime)-24s %(levelname)-8s %(message)s',
filename=args.logfile)
if not config['get_url'].endswith('/'):
config['get_url'] = config['get_url'] + '/'
if not config['put_url'].endswith('/'):
config['put_url'] = config['put_url'] + '/'
try:
config['get_sub_url_len'] = len(urllib.parse.urlparse(config['get_url']).path)
config['put_sub_url_len'] = len(urllib.parse.urlparse(config['put_url']).path)
except ValueError:
logging.warning("Invalid get_sub_url ('%s') or put_sub_url ('%s'). sub_url's disabled.", config['get_sub_url'], config['put_sub_url'])
config['get_sub_url_int'] = 1
config['put_sub_url_int'] = 1
# Sanitize config['user_quota_*'] and calculate initial quotas
quotas = {}
try:
config['user_quota_hard'] = int(config.get('user_quota_hard', 0))
config['user_quota_soft'] = int(config.get('user_quota_soft', 0))
if config['user_quota_soft'] or config['user_quota_hard']:
expire(quotaonly=True)
except ValueError:
logging.warning("Invalid user_quota_hard ('%s') or user_quota_soft ('%s'). Quotas disabled.", config['user_quota_soft'], config['user_quota_soft'])
config['user_quota_soft'] = 0
config['user_quota_hard'] = 0
# Sanitize config['expire_*'] and start expiry thread
try:
config['expire_interval'] = float(config.get('expire_interval', 0))
config['expire_maxage'] = float(config.get('expire_maxage', 0))
if config['expire_interval'] > 0 and (config['user_quota_soft'] or config['expire_maxage']):
t = Thread(target=expire, kwargs={'kill_event': kill_event})
t.start()
else:
logging.info('Expiring disabled.')
except ValueError:
logging.warning("Invalid expire_interval ('%s') or expire_maxage ('%s') set in config file. Expiring disabled.",
config['expire_interval'], config['expire_maxage'])
try:
server = ThreadedHTTPServer((config['http_address'], config['http_port']), HttpHandler)
except Exception as e:
import traceback
logging.debug(traceback.format_exc())
kill_event.set()
sys.exit(1)
if 'http_keyfile' in config and 'http_certfile' in config:
server.socket = ssl.wrap_socket(server.socket, keyfile=config['http_keyfile'], certfile=config['http_certfile'])
jid = config['component_jid']
secret = config['component_secret']
port = int(config.get('component_port',5347))
xmpp = MissingComponent(jid,secret,port)
if xmpp.connect():
xmpp.process()
print("connected")
try:
server.serve_forever()
except (KeyboardInterrupt, Exception) as e:
if e == KeyboardInterrupt:
logging.debug('Ctrl+C pressed')
else:
import traceback
logging.debug(traceback.format_exc())
kill_event.set()
else:
print("unable to connect")
kill_event.set()
| agpl-3.0 | 1,377,332,783,470,547,000 | 41.478659 | 165 | 0.55975 | false |
joaofanti/TrabRedesIIFinal | Modelos/Mapa/MapFactory.py | 1 | 2000 | import json
from Map import Map
from Door import Door
from Room import Room
from Item import Item
"""
Define um gerador de mapas.
"""
class MapFactory:
"""
Cria uma nova instancia de gerador de mapas.
"""
def __init__(self):
self.RoomsList = [] # Lista de salas geradas.
self.DoorsList = [] # Lista de portas geradas.
pass
"""
Gera um mapa a partir de um arquivo de texto com a definicao do mapa em JSON.
"""
def GenerateMap(self, mapJson, mapDesignText):
# Para cada sala no arquivo JSON, gera um novo objeto Sala e entao o salva na lista de salas.
for roomJson in mapJson:
newRoom = self.GenerateRoom(roomJson)
self.RoomsList.append(newRoom)
return Map(self.RoomsList, mapDesignText)
"""
Gera uma sala a partir de um JSON de sala.
"""
def GenerateRoom(self, roomJson):
currentRoomID = roomJson["ID"]
doors = []
for connectedRoom in roomJson["ConnectedRoomsID"]:
door = self.GenerateDoor(currentRoomID, connectedRoom)
doors.append(door)
objects = []
for objectJson in roomJson["Objects"]:
# Se existe "State" nas configuracoes do objeto, adiciona! Se nao, usa None
if ("State" in objectJson):
newObject = Item(objectJson["Name"], objectJson["Description"], objectJson["State"])
else:
newObject = Item(objectJson["Name"], objectJson["Description"])
objects.append(newObject)
newRoom = Room(currentRoomID, roomJson["StartRoom"], doors, objects)
return newRoom
"""
Gera uma porta a partir de um JSON de porta ou, caso ela ja exista, utiliza a ja existente.
"""
def GenerateDoor(self, room1ID, room2JSON):
room2ID = room2JSON["Room"]
room2Direction = room2JSON["Direction"]
room1Direction = "N"
if (room2Direction == "N"):
room1Direction = "S"
elif (room2Direction == "L"):
room1Direction = "E"
elif (room2Direction == "E"):
room1Direction = "L"
door = Door(room1ID, room1Direction, room2ID, room2Direction, room2JSON["Opened"] == "True")
self.DoorsList.append(door)
return door | mit | -4,662,907,654,454,719,000 | 25.328947 | 95 | 0.697 | false |
gdanezis/claimchain-core | claimchain/utils/wrappers.py | 1 | 6989 | import hippiehug
from hippiehug.Utils import binary_hash
from .encodings import ensure_binary
# TODO: Move to hippiehug 1.0
# TODO: Move the tests out of doctests
class Blob(bytes):
@property
def hid(self):
return binary_hash(self)
def _check_hash(key, value):
if value.hid != key:
raise ValueError('Hash of the value is not the lookup key')
class ObjectStore(object):
"""
>>> store = ObjectStore()
>>> blob = Blob(b'test')
>>> store.add(blob)
>>> store[blob.hid]
b'test'
"""
def __init__(self, backend=None):
self._backend = backend
if backend is None:
self._backend = {}
# Check hashes if it is a plain dictionary
if not isinstance(self._backend, ObjectStore):
for lookup_key, value in self._backend.items():
_check_hash(lookup_key, value)
else:
# If input is another ObjectStore, unwrap the
# underlying dictionary
self._backend = self._backend._backend
def __getitem__(self, lookup_key):
value = self._backend[lookup_key]
_check_hash(lookup_key, value)
return value
def get(self, lookup_key):
try:
return self._backend[lookup_key]
except KeyError:
return None
def __setitem__(self, lookup_key, value):
_check_hash(lookup_key, value)
self._backend[lookup_key] = value
def keys(self):
return self._backend.keys()
def values(self):
return self._backend.values()
def items(self):
return self._backend.items()
def add(self, value):
lookup_key = value.hid
self._backend[lookup_key] = value
# TODO: (Higher priority) move this to hippiehug classes themselves
def serialize_object(obj):
"""
Serialize blobs, hippiehug tree nodes, and hippiehug chain blocks
>>> block = hippiehug.Block([])
>>> len(serialize_object(block))
4
>>> node = hippiehug.Nodes.Branch(b'pivot', b'left', b'right')
>>> len(serialize_object(node))
3
>>> leaf = hippiehug.Nodes.Leaf(b'item', b'key')
>>> len(serialize_object(leaf))
2
>>> blob = Blob(b'content')
>>> serialize_object(blob) == blob
True
.. warning::
There is no guarantee this is in sync with hippiehug, i.e., this
is the serialization hippiehug employs internally. This will eventually
move inside the hippiehug library.
"""
if isinstance(obj, hippiehug.Nodes.Leaf):
return (obj.key, obj.item)
elif isinstance(obj, hippiehug.Nodes.Branch):
return (obj.pivot, obj.left_branch, obj.right_branch)
elif isinstance(obj, hippiehug.Block):
return (obj.index, obj.fingers, obj.items, obj.aux)
elif isinstance(obj, Blob):
return obj
class Chain(object):
def __init__(self, object_store=None):
self.object_store = object_store or ObjectStore()
class Tree(object):
"""
Wrapper to enable map interface on top of Hippiehug trees
>>> tree = Tree()
>>> b'label' in tree
False
>>> tree[b'label']
Traceback (most recent call last):
KeyError: b'label'
>>> tree[b'label'] = Blob(b'test')
>>> tree[b'label']
b'test'
>>> b'label' in tree
True
Creating a tree from existing storage:
>>> tree1 = Tree(tree.object_store, root_hash=tree.root_hash)
>>> b'label' in tree1
True
>>> tree.evidence(b'label') is not None
True
Adding multiple items at once:
>>> b'label1' in tree
False
>>> b'label2' in tree
False
>>> tree.update({'label1': Blob(b'test1'), 'label2': Blob(b'test2')})
>>> b'label1' in tree
True
>>> b'label2' in tree
True
>>> tree[b'label1']
b'test1'
>>> tree[b'label2']
b'test2'
"""
def __init__(self, object_store=None, root_hash=None):
self.object_store = object_store
if object_store is None:
self.object_store = ObjectStore()
self.tree = hippiehug.Tree(self.object_store, root_hash=root_hash)
@property
def root_hash(self):
return self.tree.root()
def __getitem__(self, lookup_key):
lookup_key = ensure_binary(lookup_key)
_, evidence = self.evidence(lookup_key)
if not evidence or evidence[-1].key != lookup_key:
raise KeyError(lookup_key)
value_hash = evidence[-1].item
return self.tree.store[value_hash]
def __setitem__(self, lookup_key, value):
"""
Add value with given lookup key.
TODO: Add transactions. If this fails or stops at some point,
storage will be left in a screwed up state.
:param value: An object with ``hid`` property (e.g. ``Blob`` object)
"""
lookup_key = ensure_binary(lookup_key)
if not hasattr(value, 'hid'):
raise TypeError('Value is not a valid object.')
self.tree.add(key=lookup_key, item=value)
_, evidence = self.evidence(lookup_key)
assert self.tree.is_in(value, key=lookup_key)
value_hash = evidence[-1].item
# This should only happen if hashing here and in hippiehug
# become inconsistent because of internal changes in hippiehug
# TODO: Remove when hashing when changes here are moved to
# hippiehug
assert value_hash == value.hid
self.tree.store[value_hash] = value
def update(self, items):
"""
Add multiple values.
TODO: Add transactions. If this fails or stops at some point,
storage will be left in a screwed up state.
:param items: dictionary, where the values are objects with
``hid`` property (e.g. ``Blob`` objects)
"""
items = {ensure_binary(key): value for key, value in items.items()}
for value in items.values():
if not hasattr(value, 'hid'):
raise TypeError('Value is not a valid object.')
self.tree.store[value.hid] = value
if len(items) > 0:
self.tree.multi_add(list(items.values()), list(items.keys()))
def __contains__(self, lookup_key):
lookup_key = ensure_binary(lookup_key)
_, evidence = self.evidence(lookup_key)
return evidence != [] and evidence[-1].key == lookup_key
def evidence(self, lookup_key):
result = self.tree.evidence(key=lookup_key)
if not result:
result = None, []
return result
def check_evidence(root_hash, evidence, lookup_key):
"""
>>> tree = Tree()
>>> tree[b'label'] = Blob(b'test')
>>> root_hash, evidence = tree.evidence(b'label')
>>> check_evidence(root_hash, evidence, b'label')
True
>>> check_evidence(root_hash, evidence, b'label1')
False
"""
lookup_key = ensure_binary(lookup_key)
store = {node.identity(): node for node in evidence}
tree = Tree(store, root_hash=root_hash)
return lookup_key in tree
| mit | 3,593,495,631,763,739,600 | 27.880165 | 76 | 0.597653 | false |
onyg/aserializer | aserializer/django/fields.py | 1 | 2229 | # -*- coding: utf-8 -*-
from collections import Iterable
try:
from django.db.models.query import QuerySet
from django.db.models import Manager
except ImportError:
QuerySet = None
Manager = None
from aserializer.fields import ListSerializerField
from aserializer.django.utils import get_local_fields, get_related_fields
class RelatedManagerListSerializerField(ListSerializerField):
def __init__(self, serializer, sort_by=None, use_prefetch=False, *args, **kwargs):
super(RelatedManagerListSerializerField, self).__init__(serializer=serializer, sort_by=sort_by, *args, **kwargs)
self.use_prefetch = use_prefetch
def pre_value(self, fields=None, exclude=None, **extras):
super(RelatedManagerListSerializerField, self).pre_value(fields=fields, exclude=exclude, **extras)
self.use_prefetch = extras.get('use_prefetch', self.use_prefetch)
def set_value(self, value):
if value is None:
return
elif isinstance(value, Iterable):
values = value
elif isinstance(value, (QuerySet, Manager)):
# if using prefetch_related, we can't use only as it will re-fetch the data
if not self.use_prefetch and (self.only_fields or self.exclude):
local_fields = get_local_fields(value.model)
related_fields = get_related_fields(value.model)
only_fields = [f.name for f in local_fields]
if self.only_fields:
only_fields = [f for f in only_fields if f in self.only_fields]
exclude_fields = [f.name for f in local_fields if f.name in self.exclude]
if exclude_fields:
only_fields = [f for f in only_fields if f not in exclude_fields]
only_fields += [f.name for f in related_fields]
# .only() returns a QuerySet of RelatedDjangoModel_Deferred objects?
values = value.only(*only_fields)
else:
values = value.all()
else:
return
self.items[:] = []
self._native_items[:] = []
self._python_items[:] = []
for item in values:
self.add_item(source=item)
| mit | -4,767,140,626,325,645,000 | 42.705882 | 120 | 0.61956 | false |
pedrobaeza/l10n-spain | l10n_es_partner/models/l10n_es_partner.py | 1 | 7222 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2008 Spanish Localization Team
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com)
# Jordi Esteve <[email protected]>
# Copyright (c) 2012-2014 Acysos S.L. (http://acysos.com)
# Ignacio Ibeas <[email protected]>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
class ResPartnerBank(orm.Model):
_inherit = 'res.partner.bank'
_columns = {
'acc_country_id': fields.many2one(
"res.country", 'Bank country',
help="If the country of the bank is Spain, it validates the bank "
"code or IBAN, formatting it accordingly."),
}
def _crc(self, cTexto):
"""Calculo el CRC de un número de 10 dígitos
ajustados con ceros por la izquierda"""
factor = (1, 2, 4, 8, 5, 10, 9, 7, 3, 6)
# Cálculo CRC
nCRC = 0
for n in range(10):
nCRC += int(cTexto[n]) * factor[n]
# Reducción del CRC a un dígi9to
nValor = 11 - nCRC % 11
if nValor == 10:
nValor = 1
elif nValor == 11:
nValor = 0
return nValor
def calcCC(self, cBanco, cSucursal, cCuenta):
"""Cálculo del código de control bancario"""
cTexto = "00%04d%04d" % (int(cBanco), int(cSucursal))
dc1 = self._crc(cTexto)
cTexto = "%010d" % long(cCuenta)
dc2 = self._crc(cTexto)
return "%1d%1d" % (dc1, dc2)
def checkBankAccount(self, account):
number = ""
for i in account:
if i.isdigit():
number += i
if len(number) != 20:
return 'invalid-size'
bank = number[0:4]
office = number[4:8]
dc = number[8:10]
account = number[10:20]
if dc != self.calcCC(bank, office, account):
return 'invalid-dc'
return '%s %s %s %s' % (bank, office, dc, account)
def _pretty_iban(self, iban_str):
"""return iban_str in groups of four characters separated
by a single space"""
res = []
while iban_str:
res.append(iban_str[:4])
iban_str = iban_str[4:]
return ' '.join(res)
def onchange_banco(self, cr, uid, ids, account, country_id,
state, context=None):
if account and country_id:
country = self.pool.get('res.country').browse(cr, uid, country_id,
context=context)
if country.code.upper() == 'ES':
bank_obj = self.pool.get('res.bank')
if state == 'bank':
account = account.replace(' ', '')
number = self.checkBankAccount(account)
if number == 'invalid-size':
return {
'warning': {
'title': _('Warning'),
'message': _('Bank account should have 20 '
'digits.')
}
}
if number == 'invalid-dc':
return {
'warning': {
'title': _('Warning'),
'message': _('Invalid bank account.')
}
}
bank_ids = bank_obj.search(cr, uid,
[('code', '=', number[:4])],
context=context)
if bank_ids:
return {'value': {'acc_number': number,
'bank': bank_ids[0]}}
else:
return {'value': {'acc_number': number}}
elif state == 'iban':
partner_bank_obj = self.pool['res.partner.bank']
if partner_bank_obj.is_iban_valid(cr, uid, account, context):
number = self._pretty_iban(account.replace(" ", ""))
bank_ids = bank_obj.search(cr, uid,
[('code', '=', number[5:9])],
context=context)
if bank_ids:
return {'value': {'acc_number': number,
'bank': bank_ids[0]}}
else:
return {'value': {'acc_number': number}}
else:
return {'warning': {'title': _('Warning'),
'message': _('IBAN account is not valid')}}
return {'value': {}}
class ResBank(orm.Model):
_inherit = 'res.bank'
_columns = {
'code': fields.char('Code', size=64),
'lname': fields.char('Long name', size=128),
'vat': fields.char('VAT code', size=32, help="Value Added Tax number"),
'website': fields.char('Website', size=64),
'code': fields.char('Code', size=64),
}
class ResPartner(orm.Model):
_inherit = 'res.partner'
_columns = {
'comercial': fields.char('Trade name', size=128, select=True),
}
def name_search(self, cr, uid, name, args=None, operator='ilike',
context=None, limit=100):
if not args:
args = []
partners = super(ResPartner, self).name_search(cr, uid, name, args,
operator, context,
limit)
ids = [x[0] for x in partners]
if name and len(ids) == 0:
ids = self.search(cr, uid, [('comercial', operator, name)] + args,
limit=limit, context=context)
return self.name_get(cr, uid, ids, context=context)
def vat_change(self, cr, uid, ids, value, context=None):
result = super(ResPartner, self).vat_change(cr, uid, ids, value,
context=context)
if value:
result['value']['vat'] = value.upper()
return result
| agpl-3.0 | 6,887,857,847,909,223,000 | 40.705202 | 81 | 0.461123 | false |
151706061/SimpleITK | Examples/Segmentation/ConnectedThresholdImageFilter.py | 1 | 2202 | '''=========================================================================
'
' Copyright Insight Software Consortium
'
' Licensed under the Apache License, Version 2.0 (the "License");
' you may not use this file except in compliance with the License.
' You may obtain a copy of the License at
'
' http://www.apache.org/licenses/LICENSE-2.0.txt
'
' Unless required by applicable law or agreed to in writing, software
' distributed under the License is distributed on an "AS IS" BASIS,
' WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
' See the License for the specific language governing permissions and
' limitations under the License.
'
'========================================================================='''
from __future__ import print_function
import SimpleITK
import sys
if __name__ == '__main__':
#
# Check Command Line
#
if len( sys.argv ) < 7:
print("Usage: ConnectedThresholdImageFilter inputImage outputImage lowerThreshold upperThreshold seedX seedY [seed2X seed2Y ... ]");
sys.exit( 1 )
#
# Read the image
#
reader = SimpleITK.ImageFileReader()
reader.SetFileName( sys.argv[1] )
image = reader.Execute();
#
# Set up the writer
#
writer = SimpleITK.ImageFileWriter()
writer.SetFileName( sys.argv[2] )
#
# Blur using CurvatureFlowImageFilter
#
blurFilter = SimpleITK.CurvatureFlowImageFilter()
blurFilter.SetNumberOfIterations( 5 )
blurFilter.SetTimeStep( 0.125 )
image = blurFilter.Execute( image )
#
# Set up ConnectedThresholdImageFilter for segmentation
#
segmentationFilter = SimpleITK.ConnectedThresholdImageFilter()
segmentationFilter.SetLower( float(sys.argv[3]) )
segmentationFilter.SetUpper( float(sys.argv[4]) )
segmentationFilter.SetReplaceValue( 255 )
for i in range( 5, len(sys.argv)-1, 2 ):
seed = SimpleITK.Index( int(sys.argv[i]), int(sys.argv[i+1]) )
segmentationFilter.AddSeed( seed )
print( "Adding seed at ", seed.ToString() )
# Run the segmentation filter
image = segmentationFilter.Execute( image )
#
# Write out the result
#
writer.Execute( image )
sys.exit(0)
| apache-2.0 | 7,621,793,930,765,058,000 | 26.525 | 136 | 0.645322 | false |
dbrattli/RxPY | rx/linq/observable/sequenceequal.py | 1 | 3360 | import collections
from rx import AnonymousObservable, Observable
from rx.disposables import CompositeDisposable
from rx.internal import default_comparer
from rx.internal import extensionmethod
@extensionmethod(Observable)
def sequence_equal(self, second, comparer=None):
"""Determines whether two sequences are equal by comparing the
elements pairwise using a specified equality comparer.
1 - res = source.sequence_equal([1,2,3])
2 - res = source.sequence_equal([{ "value": 42 }], lambda x, y: x.value == y.value)
3 - res = source.sequence_equal(Observable.return_value(42))
4 - res = source.sequence_equal(Observable.return_value({ "value": 42 }), lambda x, y: x.value == y.value)
second -- Second observable sequence or array to compare.
comparer -- [Optional] Comparer used to compare elements of both sequences.
Returns an observable sequence that contains a single element which
indicates whether both sequences are of equal length and their
corresponding elements are equal according to the specified equality
comparer.
"""
first = self
comparer = comparer or default_comparer
if isinstance(second, collections.Iterable):
second = Observable.from_iterable(second)
def subscribe(observer):
donel = [False]
doner = [False]
ql = []
qr = []
def on_next1(x):
if len(qr) > 0:
v = qr.pop(0)
try:
equal = comparer(v, x)
except Exception as e:
observer.on_error(e)
return
if not equal:
observer.on_next(False)
observer.on_completed()
elif doner[0]:
observer.on_next(False)
observer.on_completed()
else:
ql.append(x)
def on_completed1():
donel[0] = True
if not len(ql):
if len(qr) > 0:
observer.on_next(False)
observer.on_completed()
elif doner[0]:
observer.on_next(True)
observer.on_completed()
def on_next2(x):
if len(ql) > 0:
v = ql.pop(0)
try:
equal = comparer(v, x)
except Exception as exception:
observer.on_error(exception)
return
if not equal:
observer.on_next(False)
observer.on_completed()
elif donel[0]:
observer.on_next(False)
observer.on_completed()
else:
qr.append(x)
def on_completed2():
doner[0] = True
if not len(qr):
if len(ql) > 0:
observer.on_next(False)
observer.on_completed()
elif donel[0]:
observer.on_next(True)
observer.on_completed()
subscription1 = first.subscribe(on_next1, observer.on_error, on_completed1)
subscription2 = second.subscribe(on_next2, observer.on_error, on_completed2)
return CompositeDisposable(subscription1, subscription2)
return AnonymousObservable(subscribe)
| apache-2.0 | 2,095,176,045,865,010,000 | 32.267327 | 110 | 0.545238 | false |
laborautonomo/django-newsletter | newsletter/migrations/0005_auto__del_emailtemplate__del_unique_emailtemplate_title_action__del_fi.py | 1 | 11955 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from ..utils import get_user_model
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'EmailTemplate', fields ['title', 'action']
db.delete_unique('newsletter_emailtemplate', ['title', 'action'])
# Deleting model 'EmailTemplate'
db.delete_table('newsletter_emailtemplate')
# Deleting field 'Newsletter.update_template'
db.delete_column('newsletter_newsletter', 'update_template_id')
# Deleting field 'Newsletter.unsubscribe_template'
db.delete_column('newsletter_newsletter', 'unsubscribe_template_id')
# Deleting field 'Newsletter.message_template'
db.delete_column('newsletter_newsletter', 'message_template_id')
# Deleting field 'Newsletter.subscribe_template'
db.delete_column('newsletter_newsletter', 'subscribe_template_id')
def backwards(self, orm):
# Adding model 'EmailTemplate'
db.create_table('newsletter_emailtemplate', (
('title', self.gf('django.db.models.fields.CharField')(default=u'Default', max_length=200)),
('text', self.gf('django.db.models.fields.TextField')()),
('html', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('action', self.gf('django.db.models.fields.CharField')(max_length=16, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('subject', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('newsletter', ['EmailTemplate'])
# Adding unique constraint on 'EmailTemplate', fields ['title', 'action']
db.create_unique('newsletter_emailtemplate', ['title', 'action'])
# User chose to not deal with backwards NULL issues for 'Newsletter.update_template'
raise RuntimeError("Cannot reverse this migration. 'Newsletter.update_template' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Newsletter.unsubscribe_template'
raise RuntimeError("Cannot reverse this migration. 'Newsletter.unsubscribe_template' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Newsletter.message_template'
raise RuntimeError("Cannot reverse this migration. 'Newsletter.message_template' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Newsletter.subscribe_template'
raise RuntimeError("Cannot reverse this migration. 'Newsletter.subscribe_template' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'newsletter.article': {
'Meta': {'ordering': "('sortorder',)", 'object_name': 'Article'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'articles'", 'to': "orm['newsletter.Message']"}),
'sortorder': ('django.db.models.fields.PositiveIntegerField', [], {'default': '20', 'db_index': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'newsletter.message': {
'Meta': {'unique_together': "(('slug', 'newsletter'),)", 'object_name': 'Message'},
'date_create': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modify': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'newsletter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['newsletter.Newsletter']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'newsletter.newsletter': {
'Meta': {'object_name': 'Newsletter'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'})
},
'newsletter.submission': {
'Meta': {'object_name': 'Submission'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['newsletter.Message']"}),
'newsletter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['newsletter.Newsletter']"}),
'prepared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'publish': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 16, 0, 0)', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'sending': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'subscriptions': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'to': "orm['newsletter.Subscription']", 'symmetrical': 'False', 'blank': 'True'})
},
'newsletter.subscription': {
'Meta': {'unique_together': "(('user', 'email_field', 'newsletter'),)", 'object_name': 'Subscription'},
'activation_code': ('django.db.models.fields.CharField', [], {'default': "'cfac7ee20279d5842214a4e8371475175ed8f00b'", 'max_length': '40'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email_field': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'db_column': "'email'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'name_field': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'db_column': "'name'", 'blank': 'True'}),
'newsletter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['newsletter.Newsletter']"}),
'subscribe_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscribed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'unsubscribe_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'unsubscribed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['newsletter']
| agpl-3.0 | -1,749,140,199,191,356,700 | 71.018072 | 187 | 0.582016 | false |
rakhimov/cppdep | setup.py | 1 | 1609 | #!/usr/bin/env python
"""The setup script to generate dist files for PyPi.
To upload the release to PyPi:
$ ./setup.py sdist bdist_wheel --universal
$ twine upload dist/*
"""
from setuptools import setup
from cppdep import cppdep
setup(
name="cppdep",
version=cppdep.VERSION,
maintainer="Olzhas Rakhimov",
maintainer_email="[email protected]",
description="Dependency analyzer for C/C++ projects",
download_url="https://github.com/rakhimov/cppdep",
license="GPLv3+",
install_requires=[
"networkx",
"pydot",
"pydotplus",
"PyYAML",
"PyKwalify>=1.6.0"
],
keywords=["c++", "c", "static analysis", "dependency analysis"],
url="http://github.com/rakhimov/cppdep",
packages=["cppdep"],
package_data={"cppdep": ["config_schema.yml"]},
entry_points={"console_scripts": ["cppdep = cppdep.__main__:main"]},
long_description=open("README.rst").read(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Quality Assurance",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Programming Language :: C",
"Programming Language :: C++",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Environment :: Console",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5"
],
)
| gpl-3.0 | 8,897,259,033,538,249,000 | 31.836735 | 85 | 0.607209 | false |
ScientificDataFormat/SDF-Python | sdf/hdf5.py | 1 | 5874 | # Copyright (c) 2019 Dassault Systemes. All rights reserved.
import h5py
import sdf
import numpy as np
import os
import sys
def _to_python_str(s):
""" Convert to Python string """
if isinstance(s, bytes):
return s.decode('utf-8')
else:
return s
def load(filename, objectname):
with h5py.File(filename, 'r') as f:
datasets = {}
dsobj = f[objectname]
class_name = dsobj.__class__.__name__
if class_name == 'Group':
group = _create_group(dsobj, datasets)
_restore_scales(datasets)
return group
elif class_name == 'Dataset':
dataset = _create_dataset(dsobj, datasets)
for ri in range(dsobj.ndim):
if dsobj.dims[ri]:
sobj = dsobj.dims[ri][0]
s = _create_dataset(sobj, dict())
s.is_scale = True
dataset.scales[ri] = s
return dataset
else:
raise Exception('Unexpected object')
def save(filename, group):
with h5py.File(filename, 'w') as f:
datasets = dict()
_write_group(f, group, '/', datasets)
# attach the scales
for ds, h5ds in datasets.items():
for i, s in enumerate(ds.scales):
if s is None:
continue
elif s in datasets:
h5s = datasets[s]
dimname = s._display_name
if dimname is None:
dimname = ''
h5s.make_scale(_str(dimname))
h5ds.dims[i].attach_scale(h5s)
else:
print("Cannot attach scale for '" + h5ds.name +
"' because the referenced scale for dimension " + str(i) + " is not part of the file")
def _create_group(gobj, datasets):
""" Create an sdf.Group from an h5py group """
ds_obj_list = []
g_obj_list = []
group_attrs = {key: gobj.attrs[key] for key in gobj.attrs.keys() if key != 'COMMENT'}
comment = gobj.attrs.get('COMMENT')
for ds_name in gobj.keys():
# TODO: fix this?
if isinstance(gobj[ds_name], h5py._hl.dataset.Dataset):
ds_obj_list.append(gobj[ds_name])
elif isinstance(gobj[ds_name], h5py._hl.group.Group):
g_obj_list.append(gobj[ds_name])
child_groups = []
for cgobj in g_obj_list:
child_groups.append(_create_group(cgobj, datasets))
ds_list = [_create_dataset(dsobj, datasets) for dsobj in ds_obj_list]
name = gobj.name.split('/')[-1]
return sdf.Group(name=name, comment=comment, attributes=group_attrs, groups=child_groups, datasets=ds_list)
def _create_dataset(dsobj, datasets):
""" Create a dataset from an h5py dataset """
_, name = os.path.split(dsobj.name)
ds = sdf.Dataset(name, data=dsobj[()])
for attr in dsobj.attrs:
if attr == 'COMMENT':
ds.comment = _to_python_str(dsobj.attrs[attr])
elif attr == 'NAME':
ds.display_name = _to_python_str(dsobj.attrs[attr])
elif attr == 'RELATIVE_QUANTITY' and _to_python_str(dsobj.attrs[attr]) == 'TRUE':
ds.relative_quantity = True
elif attr == 'UNIT':
ds.unit = _to_python_str(dsobj.attrs[attr])
elif attr == 'DISPLAY_UNIT':
ds.display_unit = _to_python_str(dsobj.attrs[attr])
elif attr == 'CLASS' and _to_python_str(dsobj.attrs[attr]) == 'DIMENSION_SCALE':
ds.is_scale = True
elif attr == 'REFERENCE_LIST':
ds.is_scale = True
elif attr in ['REFERENCE_LIST', 'DIMENSION_LIST']:
pass
else:
ds.attributes[attr] = _to_python_str(dsobj.attrs[attr])
ds.scales = [None] * ds.data.ndim
datasets[dsobj] = ds
return ds
def _restore_scales(datasets):
for dsobj, ds in datasets.items():
for i in range(ds.data.ndim):
if dsobj.dims[i]:
sobj = dsobj.dims[i][0]
scale = datasets[sobj]
scale.is_scale = True
ds.scales[i] = scale
pass
def _str(s):
""" Convert to byte string """
if sys.version_info.major >= 3 and isinstance(s, bytes):
return s
else:
# convert the string to an fixed-length utf-8 byte string
return np.string_(s.encode('utf-8'))
def _write_group(f, g, path, datasets):
if path == '/':
gobj = f
else:
gobj = f.create_group(path)
# iterate over the child groups
for subgroup in g.groups:
_write_group(f, subgroup, path + subgroup.name + '/', datasets)
if g.comment is not None:
gobj.attrs['COMMENT'] = _str(g.comment)
for key, value in g.attributes.items():
gobj.attrs[key] = _str(value)
# write the datasets
for ds in g.datasets:
_write_dataset(f, ds, path, datasets)
def _write_dataset(f, ds, path, datasets):
f[path + ds.name] = ds.data
dsobj = f[path + ds.name]
datasets[ds] = dsobj
if ds.comment:
dsobj.attrs['COMMENT'] = _str(ds.comment)
if ds._display_name:
dsobj.attrs['NAME'] = _str(ds.display_name)
if ds.relative_quantity:
dsobj.attrs['RELATIVE_QUANTITY'] = _str('TRUE')
if ds.unit:
dsobj.attrs['UNIT'] = _str(ds.unit)
if ds.display_unit != ds.unit:
dsobj.attrs['DISPLAY_UNIT'] = _str(ds.display_unit)
if ds.is_scale:
dimname = ds.display_name
if dimname is None:
dimname = ''
h5py.h5ds.set_scale(dsobj.id, _str(dimname))
return dsobj
| bsd-3-clause | 7,269,598,341,535,490,000 | 26.240385 | 112 | 0.52843 | false |
XNBlank/sudoBot | modules/profile.py | 1 | 8013 | import discord
from discord.ext import commands
from PIL import Image
from PIL import ImageOps
from PIL import ImageDraw
from PIL import ImageFont
import asyncio, aiohttp, io, time, imghdr, os, json, textwrap, re
class Profile:
"""
Profile card setup and display
"""
def __init__(self, client, config):
self.client = client
self.config = config
@commands.group(pass_context=True)
async def profile(self, ctx):
"""Profile command ( see 'sudo help profile' for more info )"""
if ctx.invoked_subcommand is None:
await ctx.send('Invalid profile command passed... Send `sudo help profile` for assistance.')
@profile.command(pass_context=True)
async def get(self, ctx, user : discord.Member = None):
"""Get a user profile card. Passing no arguments returns requesters card"""
if user == None:
user = ctx.message.author
name = user.name
userid = user.id
avatar = user.avatar_url
def_avatar = user.default_avatar_url
created = user.created_at
nick = user.display_name
discr = user.discriminator
roles = user.roles
roleImages = {}
for x, role in enumerate(roles):
try:
roleImages[role.name] = Image.open('data/images/roles/small/{}.png'.format(role.name.lower()))
except Exception as e:
next
if avatar == '':
async with aiohttp.ClientSession() as session:
async with session.get(def_avatar) as r:
image = await r.content.read()
else:
async with aiohttp.ClientSession() as session:
async with session.get(avatar) as r:
image = await r.content.read()
with open('data/users/avatars/{}.png'.format(user.id), 'wb') as f:
f.write(image)
checked = False
while checked == False:
checks = 0
isImage = imghdr.what('data/users/avatars/{}.png'.format(user.id))
if checks > 4:
checked = True
if isImage != 'None':
checked = True
else:
checks += 1
av = Image.open('data/users/avatars/{}.png'.format(user.id))
userAvatar = av.resize((128,128), resample=Image.BILINEAR).convert('RGBA')
maxsize = ( 800, 500 )
try:
bg = Image.open('data/users/backgrounds/{0}.png'.format(user.id))
bg_width, bg_height = bg.size
bg = ImageOps.fit(bg,maxsize)
except:
bg = Image.open('data/images/background_default.png')
fontFace = 'data/fonts/{}'.format(self.config['fonts']['normal'])
fontFace_bold = 'data/fonts/{}'.format(self.config['fonts']['bold'])
fontSize = 18
descSize = 12
headerSize = 32
header_font = ImageFont.truetype(fontFace_bold, headerSize)
font = ImageFont.truetype(fontFace, fontSize)
desc_font = ImageFont.truetype(fontFace, descSize)
font_bold = font = ImageFont.truetype(fontFace_bold, fontSize)
cardbg = Image.new('RGBA', (800, 500), (255,255,255,255))
d = ImageDraw.Draw(cardbg)
d.rectangle([(0,0), 800,500], fill=(255,255,255,255))
cardbg.paste(bg, (0,0))
cardfg = Image.new('RGBA', (800, 500), (255,255,255,0))
dd = ImageDraw.Draw(cardfg)
# Info Box Top
dd.rectangle([(200,60),(740,191)], fill=(255,255,255,224));
dd.rectangle([(200,60),(740,134)], fill=(255,255,255,255));
# Avatar box
dd.rectangle([(60,60),(191,191)], fill=(80,80,80, 255));
cardfg.paste(userAvatar, (62,62));
# Profile Information
dd.text((210, 64), nick, fill=(74, 144, 226, 255), font=header_font);
dd.text((210, 106), '@' + name + '#' + discr, fill=(74, 144, 226, 255), font=font);
# Roles
for idy, ii in enumerate(roleImages):
startx = int((270 - (30 * len(roleImages))) / 2);
cardfg.paste(roleImages[ii], (337 + startx + (30 * idy),152), roleImages[ii]);
#Info Box Bottom
dd.rectangle([(60,200),(740,450)], fill=(255,255,255,224));
answers = None;
questions = self.config["profile_questions"];
try:
with open('data/users/profiles/{}.dat'.format(userid)) as f:
answers = json.load(f);
except Exception as e:
pass
if (answers != None):
for key, quest in zip(sorted(answers),questions):
if(int(key) < 5):
dd.text((80, 260 + ((int(key)-1) * 48)), textwrap.fill(quest,50) + "\n" + textwrap.fill(answers[key],50), fill=(74, 144, 226, 255), font=desc_font);
else:
dd.text((410, 260 + ((int(key)-6) * 48)), textwrap.fill(quest,50) + "\n" + textwrap.fill(answers[key],50), fill=(74, 144, 226, 255), font=desc_font);
card = Image.new('RGBA', (800, 500), (255,255,255,255));
card = Image.alpha_composite(card,cardbg);
card = Image.alpha_composite(card,cardfg);
s = 'data/users/cards/{0}.png'.format(user.id);
card.save(s, 'png');
with open('data/users/cards/{0}.png'.format(user.id), 'rb') as g:
return await ctx.message.channel.send(file=discord.File(g));
@profile.command(pass_context=True)
async def setup(self, ctx):
"""Set your user profile card."""
await ctx.send('Sending a PM to setup profile card.')
questions = self.config["profile_questions"]
answers = {}
recipient = ctx.message.author
await recipient.send("Hello! I'm here to help you set up your profile. I will ask you a few questions that you may answer. If you don't want to answer the question, you may reply with 'skip' to move on.")
for x, question in enumerate(questions):
await recipient.send(question)
def check(m):
try:
return m.channel.recipient.id == recipient.id and m.author.id == recipient.id
except:
return False
answer = await self.client.wait_for('message', check=check)
if answer.content.lower() == 'skip':
answers[x] = 'N/A'
else:
answers[x] = answer.content
with open('data/users/profiles/{}.dat'.format(recipient.id), 'w', encoding='utf8') as f:
json.dump(answers, f)
return await recipient.send('You have completed your profile setup')
@profile.command(pass_context=True)
async def wallpaper(self, ctx, *, url=""):
"""Set a profile card background image.
Can either be a link to an image or an attachment."""
try:
background = ctx.message.attachments[0].url
except:
background = url
if (background == ""):
return await ctx.send('```Image or URL not found.```')
user = ctx.message.author
async with aiohttp.ClientSession() as session:
async with session.get(background) as r:
image = await r.content.read()
with open('data/users/backgrounds/{0}.png'.format(user.id),'wb') as f:
f.write(image)
isImage = imghdr.what('data/users/backgrounds/{0}.png'.format(user.id))
if(isImage == 'png' or isImage == 'jpeg' or isImage == 'jpg' or isImage == 'gif'):
f.close()
return await ctx.send('```Successfully set profile wallpaper```')
else:
f.close()
os.remove('data/users/backgrounds/{0}.png'.format(user.id))
return await ctx.send('```Something went wrong when setting your wallpaper. Perhaps the file you sent wasn\'t an image?```')
def setup(client):
client.add_cog(Profile(client, client.config))
| gpl-3.0 | 1,141,081,767,628,333,000 | 35.094595 | 212 | 0.560215 | false |
chirpradio/chirpradio-machine | chirp/stream/statistics.py | 1 | 6045 | """
Compute statistics about a consumed sequence of messages.
"""
from chirp.common import http_console_server
from chirp.common import timestamp
from chirp.stream import message
class _ConnectionInfo(object):
MAX_NUM_ERRORS = 25
connection_id = None
start_timestamp_ms = None
last_timestamp_ms = None
num_frames = 0
size_frames = 0
duration_frames_ms = 0
freq_frame_kbps = None # Initialized as {} in constructor
first_frame_timestamp_ms = None
last_frame_timestamp_ms = None
num_blocks = 0
size_blocks = 0
last_block_timestamp_ms = None
errors = None # Initialized as [] in constructor
def __init__(self):
self.freq_frame_kbps = {}
self.errors = []
def process(self, msg):
self.last_timestamp_ms = msg.end_timestamp_ms
if msg.message_type == message.CONNECTED:
self.connection_id = msg.connection_id
self.start_timestamp_ms = msg.start_timestamp_ms
elif msg.message_type == message.FRAME:
self.num_frames += 1
self.size_frames += len(msg.payload)
self.duration_frames_ms += msg.mp3_header.duration_ms
key = msg.mp3_header.bit_rate_kbps
self.freq_frame_kbps[key] = self.freq_frame_kbps.get(key, 0) + 1
if self.first_frame_timestamp_ms is None:
self.first_frame_timestamp_ms = msg.start_timestamp_ms
self.last_frame_timestamp_ms = msg.end_timestamp_ms
elif msg.message_type == message.BLOCK:
self.num_blocks += 1
self.size_blocks += len(msg.payload)
self.last_block_timestamp_ms = msg.end_timestamp_ms
elif msg.message_type == message.ERROR:
self.errors.append(msg)
self.last_error_timestamp_ms = msg.start_timestamp_ms
if len(self.errors) > self.MAX_NUM_ERRORS:
self.errors.pop(0)
def html(self):
now_ms = timestamp.now_ms()
# Note my use of nested tables. I suck at HTML.
contents = ["<table border=1 cellpadding=4><tr><td><table>"]
def add(key, val):
contents.append(
"<tr><td><i>%s</i></td><td>%s</td></tr>" % (key, val))
def add_since_ms(key, ts_ms):
add(key, "%s (%s ago)" % (
timestamp.get_pretty_ms(ts_ms),
timestamp.get_human_readable_duration_ms(now_ms-ts_ms)))
add("start time", timestamp.get_pretty_ms(self.start_timestamp_ms))
duration_ms = self.last_timestamp_ms - self.start_timestamp_ms
add("duration", timestamp.get_human_readable_duration_ms(duration_ms))
if self.num_frames:
add("frames", "%d / %.2fM" % (self.num_frames,
float(self.size_frames) / (1 << 20)))
subtable = ["<table cellpadding=2>"]
vbr = 0
for key, num in sorted(self.freq_frame_kbps.items()):
perc = 100.0 * num / self.num_frames
vbr += float(key * num) / self.num_frames
subtable.append(
"<tr><td>%d kbps</td><td>%.1f%%</td><td>%d</td></tr>" %
(key, perc, num))
subtable.append("</table>")
add("frame distribution", "".join(subtable))
add("average bit rate", "%.2f kbps" % vbr)
since_last_ms = now_ms - self.last_frame_timestamp_ms
add_since_ms("last frame", self.last_frame_timestamp_ms)
frame_span_ms = (self.last_frame_timestamp_ms -
self.first_frame_timestamp_ms)
add("frame deficit",
"%.1fms" % (frame_span_ms - self.duration_frames_ms))
if self.num_blocks:
add("junk blocks", "%d / %db" % (
self.num_blocks, self.size_blocks))
add_since_ms("last junk", self.last_block_timestamp_ms)
if self.errors:
error_list = [
"%s - %s / %s / %s" % (
timestamp.get_pretty_ms(err.start_timestamp_ms),
err.error_type, err.error_code, err.error_text)
for err in reversed(self.errors)]
add("errors", "<br>".join(error_list))
contents.append("</table></td></tr></table>")
return "\n".join(contents)
class Statistics(message.MessageConsumer):
MAX_NUM_RECENT_CONNECTIONS = 20
def __init__(self, src):
message.MessageConsumer.__init__(self, src)
self._current_connection_info = None
self._recent_connections = []
def _process_message(self, msg):
if msg.message_type == message.CONNECTED:
if self._current_connection_info:
self._recent_connections.append(self._current_connection_info)
if (len(self._recent_connections)
> self.MAX_NUM_RECENT_CONNECTIONS):
self._recent_connections.pop(0)
self._current_connection_info = _ConnectionInfo()
if self._current_connection_info is not None:
self._current_connection_info.process(msg)
def _connections_html(self, unused_request):
contents = ["<html><head><title>Connection Log</title></head><body>"]
contents.append("<h1>Connection Log</h1>")
contents.append("The current time is %s" % timestamp.get_pretty())
contents.append("<h2>Current</h2>")
if self._current_connection_info:
contents.append(self._current_connection_info.html())
else:
contents.append("<i>No connections yet</i>")
if self._recent_connections:
contents.append("<h2>Recent</h2>")
contents.extend(con.html()
for con in reversed(self._recent_connections))
contents.append("</body></html>")
return "\n".join(contents)
def export(self, path=None):
http_console_server.register("/connections", self._connections_html)
| apache-2.0 | -8,816,550,096,536,408,000 | 37.503185 | 79 | 0.561952 | false |
unreal666/outwiker | src/outwiker/core/htmltemplate.py | 2 | 2477 | # -*- coding: utf-8 -*-
import re
from string import Template
from outwiker.gui.guiconfig import HtmlRenderConfig
from outwiker.core.application import Application
class MyTemplate (Template):
"""
Класс работы с шаблонами. Единственное, для чего сделан такой класс
- избавитсья от замены $$ на $
Реализация регулярного выражения частично взята
из http://stackoverflow.com/a/12769116
"""
pattern = r"""
%(delim)s(?:
(?P<escaped>^$) | # Отключим замену $$
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>^$) # never matches (the regex is not multilined)
)
""" % dict(delim=re.escape(Template.delimiter), id=Template.idpattern)
class HtmlTemplate(object):
"""
Класс для генерации HTML-страницы на основе шаблона
"""
def __init__(self, template):
"""
template - текст шаблона
Шаблон должен иметь содержание которого оформлено в стиле,
описанном в http://docs.python.org/library/string.html#template-strings
за исключением того, что в шаблоне $$ не заменяется на $
"""
self.config = HtmlRenderConfig(Application.config)
self.fontsize = self.config.fontSize.value
self.fontfamily = self.config.fontName.value
self.userStyle = self.config.userStyle.value
self.template = MyTemplate(template)
def substitute(self, content, **kwargs):
'''
In outwiker.core 1.5 'userhead' parameter will be replaced to **kwargs
'''
if 'userhead' not in kwargs:
kwargs['userhead'] = u''
if 'title' not in kwargs:
kwargs['title'] = u''
if 'userhtmlattrs' not in kwargs:
kwargs['userhtmlattrs'] = u''
return self.template.safe_substitute(content = content,
fontsize = self.fontsize,
fontfamily = self.fontfamily,
userstyle = self.userStyle,
**kwargs)
| gpl-3.0 | 1,693,166,516,128,128,300 | 34.290323 | 79 | 0.579525 | false |
DanLindeman/memegen | tests/test_api_aliases.py | 1 | 1225 | # pylint: disable=unused-variable,misplaced-comparison-constant,expression-not-assigned
from expecter import expect
from .utils import load
def describe_get():
def it_requires_a_name_to_return_aliases(client):
status, data = load(client.get("/api/aliases/"))
expect(status) == 200
expect(data) == []
def it_redirects_with_param(client):
status, text = load(client.get("/api/aliases/?name=foo"), as_json=False)
expect(status) == 302
expect(text).contains('<a href="/api/aliases/foo">')
def describe_filter():
def with_single_match(client):
status, data = load(client.get("/api/aliases/sad-biden"))
expect(status) == 200
expect(data) == {
'sad-biden': {
'styles': [
'down',
'scowl',
'window',
],
'template': "http://localhost/api/templates/sad-biden"
}
}
def with_many_matches(client):
status, data = load(client.get("/api/aliases/votestakes"))
expect(status) == 200
expect(len(data)) == 5
| mit | -1,296,583,061,009,426,400 | 27.488372 | 87 | 0.516735 | false |
PoolC/pythonstudy | entity.py | 1 | 1669 | #coding:utf-8
import types
class Channel(object):
def __init__(self, owner):
self._owner = owner
def _process_message(self, sender, msg, msgargs):
return self._owner._handle(sender, msg, msgargs)
def sendto(self, receiver, msg, msgargs = None):
return receiver._process_message(self, msg, msgargs)
class Entity(object):
def __init__(self):
self.channel = Channel(self)
self._nextcomp = None
self._children = []
def attach(self, comp):
assert comp._nextcomp is None
c = self
while c._nextcomp is not None:
c = c._nextcomp
c._nextcomp = comp
comp.channel = self.channel
return self
def _handle_attach(self, sender, msgargs):
self.attach(msgargs)
def components(self):
c = self
while c._nextcomp is not None:
yield c._nextcomp
c = c._nextcomp
def add(self, entity):
self._children.append(entity)
def _handle(self, sender, msg, msgargs):
c = self
while c is not None:
try:
handler = getattr(c, '_handle_' + msg)
except AttributeError:
handler = None
if handler:
return handler(sender, msgargs)
c = c._nextcomp
return None
def tick(compcls):
for comp in compcls.instances:
comp.tick()
def tickall():
for compcls in component_classes:
tick(compcls)
def send(sender, receiver, msg, msgargs):
receiver.send(sender, msg, msgargs)
| gpl-3.0 | -5,939,575,256,779,529,000 | 24.287879 | 60 | 0.537448 | false |
keenondrums/sovrin-node | sovrin_client/agent/agent_issuer.py | 1 | 3182 | import json
from plenum.common.types import f
from anoncreds.protocol.issuer import Issuer
from anoncreds.protocol.types import ID
from anoncreds.protocol.types import ClaimRequest
from sovrin_client.agent.constants import EVENT_NOTIFY_MSG, CLAIMS_LIST_FIELD
from sovrin_client.agent.msg_constants import CLAIM, CLAIM_REQ_FIELD, CLAIM_FIELD, \
AVAIL_CLAIM_LIST, REVOC_REG_SEQ_NO, SCHEMA_SEQ_NO, ISSUER_DID
from sovrin_common.identity import Identity
from plenum.common.constants import DATA
from sovrin_client.client.wallet.attribute import Attribute
class AgentIssuer:
def __init__(self, issuer: Issuer):
self.issuer = issuer
async def processReqAvailClaims(self, msg):
body, (frm, ha) = msg
link = self.verifyAndGetLink(msg)
data = {
CLAIMS_LIST_FIELD: self.get_available_claim_list(link)
}
resp = self.getCommonMsg(AVAIL_CLAIM_LIST, data)
self.signAndSend(resp, link.localIdentifier, frm)
async def processReqClaim(self, msg):
body, (frm, _) = msg
link = self.verifyAndGetLink(msg)
if not link:
raise NotImplementedError
claimReqDetails = body[DATA]
schemaId = ID(schemaId=claimReqDetails[SCHEMA_SEQ_NO])
schema = await self.issuer.wallet.getSchema(schemaId)
if not self.is_claim_available(link, schema.name):
self.notifyToRemoteCaller(
EVENT_NOTIFY_MSG, "This claim is not yet available.",
self.wallet.defaultId, frm,
origReqId=body.get(f.REQ_ID.nm))
return
public_key = await self.issuer.wallet.getPublicKey(schemaId)
claimReq = ClaimRequest.from_str_dict(
claimReqDetails[CLAIM_REQ_FIELD], public_key.N)
self._add_attribute(
schemaKey=schema.getKey(),
proverId=claimReq.userId,
link=link)
claim_signature, claim_attributes = await self.issuer.issueClaim(schemaId, claimReq)
claimDetails = {
f.SIG.nm: claim_signature.to_str_dict(),
ISSUER_DID: schema.issuerId,
CLAIM_FIELD: json.dumps({k: v.to_str_dict() for k, v in claim_attributes.items()}),
REVOC_REG_SEQ_NO: None,
SCHEMA_SEQ_NO: claimReqDetails[SCHEMA_SEQ_NO]
}
resp = self.getCommonMsg(CLAIM, claimDetails)
self.signAndSend(resp, link.localIdentifier, frm,
origReqId=body.get(f.REQ_ID.nm))
def _add_attribute(self, schemaKey, proverId, link):
attr = self.issuer_backend.get_record_by_internal_id(link.internalId)
self.issuer._attrRepo.addAttributes(schemaKey=schemaKey,
userId=proverId,
attributes=attr)
def publish_trust_anchor(self, idy: Identity):
self.wallet.addTrustAnchoredIdentity(idy)
reqs = self.wallet.preparePending()
self.client.submitReqs(*reqs)
def publish_trust_anchor_attribute(self, attrib: Attribute):
self.wallet.addAttribute(attrib)
reqs = self.wallet.preparePending()
self.client.submitReqs(*reqs)
| apache-2.0 | 4,596,627,729,626,327,000 | 37.337349 | 95 | 0.641735 | false |
jbeezley/girder | girder/__init__.py | 1 | 11346 | # -*- coding: utf-8 -*-
from pkg_resources import DistributionNotFound, get_distribution
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
__version__ = None
__license__ = 'Apache 2.0'
import cherrypy
import functools
import logging
import logging.handlers
import os
import six
import sys
import traceback
from girder.constants import LOG_ROOT, MAX_LOG_SIZE, LOG_BACKUP_COUNT, TerminalColor
from girder.utility import config, mkdir
from girder.utility._cache import cache, requestCache, rateLimitBuffer
_quiet = False
_originalStdOut = sys.stdout
_originalStdErr = sys.stderr
auditLogger = logging.getLogger('girder_audit')
auditLogger.setLevel(logging.INFO)
logger = logging.getLogger('girder')
logger.setLevel(logging.DEBUG) # Pass everything; let filters handle level-based filtering
config.loadConfig() # Populate the config info at import time
class LogLevelFilter(object):
"""
Filter log records based on whether they are between a min and max level.
"""
def __init__(self, min, max):
self.minLevel = min
self.maxLevel = max
def filter(self, logRecord):
level = logRecord.levelno
return self.maxLevel >= level >= self.minLevel
class LogFormatter(logging.Formatter):
"""
Custom formatter that adds useful information about the request to the logs
when an exception happens. Cherrypy access logs are passed through without
change.
"""
def formatException(self, exc):
info = '\n'.join((
' Request URL: %s %s' % (cherrypy.request.method.upper(), cherrypy.url()),
' Query string: ' + cherrypy.request.query_string,
' Remote IP: ' + cherrypy.request.remote.ip,
' Request UID: ' + getattr(cherrypy.request, 'girderRequestUid', '[none]')
))
return ('%s\n'
'Additional info:\n'
'%s' % (logging.Formatter.formatException(self, exc), info))
def format(self, record, *args, **kwargs):
if hasattr(record, 'name') and hasattr(record, 'message'):
if (record.name.startswith('cherrypy.access')
or record.name.startswith('cherrypy.error')):
return record.message
return super(LogFormatter, self).format(record, *args, **kwargs)
class StreamToLogger(object):
"""
Redirect a file-like stream to a logger.
"""
def __init__(self, stream, logger, level):
self.stream = stream
self.logger = logger
self.level = level
self.logger._girderLogHandlerOutput = False
# This class is intended to override a default stream like sys.stdout
# and sys.stderr and send that information to both the original stream
# and the logger method. However, we want to preserve as much
# functionality for stdout and stderr as possible, so that other
# modules that send data to them can do so without a problem. The only
# method we really need to override is write, but we cannot mutate the
# write method on the stream itself, so we replace the stream with this
# custom class. To preserve the stream methods, all of them get added
# to our class instance except private and built-in methods, which, in
# python, begin with _.
# Fundamentally, this lets our stream replacement handle functions
# flush, writeline, and others without having to enumerate them
# individually.
for key in dir(stream):
# It's possible for a file-like object to have name appear in dir(stream) but not
# actually be an attribute, thus using a default with getattr is required.
# See https://github.com/GrahamDumpleton/mod_wsgi/issues/184 for more.
if (key != 'write'
and not key.startswith('_')
and (callable(getattr(stream, key, None))
or isinstance(
getattr(stream, key, None),
(six.binary_type, six.string_types, six.integer_types, bool)))):
setattr(self, key, getattr(stream, key))
def write(self, buf):
if not self.logger._girderLogHandlerOutput:
self.logger._girderLogHandlerOutput = True
self.stream.write(buf)
for line in buf.rstrip().splitlines():
self.logger.log(self.level, line.rstrip())
self.logger._girderLogHandlerOutput = False
def getLogPaths():
"""
Return the paths to the error and info log files. These are returned as
a dict with "error" and "info" keys that point to the respective file,
as well as a "root" key pointing to the log root directory.
"""
cfg = config.getConfig()
logCfg = cfg.get('logging', {})
root = os.path.expanduser(logCfg.get('log_root', LOG_ROOT))
return {
'root': root,
'error': logCfg.get('error_log_file', os.path.join(root, 'error.log')),
'info': logCfg.get('info_log_file', os.path.join(root, 'info.log'))
}
def _attachFileLogHandlers():
"""
Sets up the Girder logger.
"""
global _quiet
cfg = config.getConfig()
logCfg = cfg.get('logging', {})
# If we are asked to be quiet, set a global flag so that logprint doesn't
# have to get the configuration settings every time it is used.
if logCfg.get('log_quiet') is True:
_quiet = True
logPaths = getLogPaths()
# Ensure log paths are valid
logDirs = [
logPaths['root'],
os.path.dirname(logPaths['info']),
os.path.dirname(logPaths['error'])
]
for logDir in logDirs:
mkdir(logDir)
# Allow minimum log level to be set via config file
level = logging.INFO
if logCfg.get('log_level') and isinstance(getattr(logging, logCfg['log_level'], None), int):
level = getattr(logging, logCfg['log_level'])
logSize = MAX_LOG_SIZE
if logCfg.get('log_max_size'):
sizeValue = logCfg['log_max_size']
sizeUnits = {'kb': 1024, 'Mb': 1024 ** 2, 'Gb': 1024 ** 3}
if sizeValue[-2:] in sizeUnits:
logSize = int(sizeValue[:-2].strip()) * sizeUnits[sizeValue[-2:]]
else:
logSize = int(sizeValue)
backupCount = int(logCfg.get('log_backup_count', LOG_BACKUP_COUNT))
fmt = LogFormatter('[%(asctime)s] %(levelname)s: %(message)s')
infoMaxLevel = logging.INFO
# Create log handlers
if logPaths['error'] != logPaths['info']:
eh = logging.handlers.RotatingFileHandler(
logPaths['error'], maxBytes=logSize, backupCount=backupCount)
eh.setLevel(level)
eh.addFilter(LogLevelFilter(min=logging.WARNING, max=logging.CRITICAL))
eh._girderLogHandler = 'error'
eh.setFormatter(fmt)
logger.addHandler(eh)
# Record cherrypy errors in our logs, too
cherrypy.log.error_log.addHandler(eh)
else:
infoMaxLevel = logging.CRITICAL
if isinstance(getattr(logging, logCfg.get('log_max_info_level', ''), None), int):
infoMaxLevel = getattr(logging, logCfg['log_max_info_level'])
ih = logging.handlers.RotatingFileHandler(
logPaths['info'], maxBytes=logSize, backupCount=backupCount)
ih.setLevel(level)
ih.addFilter(LogLevelFilter(min=logging.DEBUG, max=infoMaxLevel))
ih._girderLogHandler = 'info'
ih.setFormatter(fmt)
logger.addHandler(ih)
# Record cherrypy errors in our logs, too
cherrypy.log.error_log.addHandler(ih)
# Log http accesses to the screen and/or the info log.
accessLog = logCfg.get('log_access', 'screen')
if not isinstance(accessLog, (tuple, list, set)):
accessLog = [accessLog]
if _quiet or ('screen' not in accessLog and 'stdout' not in accessLog):
cherrypy.config.update({'log.screen': False})
if 'info' in accessLog:
cherrypy.log.access_log.addHandler(ih)
return logger
def logStdoutStderr(force=False):
if _originalStdOut == sys.stdout or force:
sys.stdout = StreamToLogger(_originalStdOut, logger, logging.INFO)
sys.stderr = StreamToLogger(_originalStdErr, logger, logging.ERROR)
def logprint(*args, **kwargs):
"""
Send a message to both stdout and the appropriate logs. This behaves like
Python3's print statement, plus takes additional named parameters:
:param level: the log level. This determines which log handlers will store
the log message. The log is always sent to stdout.
:param color: one of the constants.TerminalColor values or None.
:param exc_info: None to not print exception information. True for the
last exception, or a tuple of exception information.
"""
data = six.StringIO()
kwargs = (kwargs or {}).copy()
level = kwargs.pop('level', logging.DEBUG)
color = kwargs.pop('color', None)
exc_info = kwargs.pop('exc_info', None)
kwargs['file'] = data
six.print_(*args, **kwargs)
data = data.getvalue().rstrip()
if exc_info and not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
data += '\n' + ''.join(traceback.format_exception(*exc_info)).rstrip()
logger.log(level, data)
if not _quiet:
if color:
data = getattr(TerminalColor, color)(data)
_originalStdOut.write('%s\n' % data)
_originalStdOut.flush()
def _setupCache():
"""
Setup caching based on configuration file.
Cache backends are forcibly replaced because Girder initially configures
the regions with the null backends.
"""
curConfig = config.getConfig()
if curConfig['cache']['enabled']:
# Replace existing backend, this is necessary
# because they're initially configured with the null backend
cacheConfig = {
'cache.global.replace_existing_backend': True,
'cache.request.replace_existing_backend': True
}
curConfig['cache'].update(cacheConfig)
cache.configure_from_config(curConfig['cache'], 'cache.global.')
requestCache.configure_from_config(curConfig['cache'], 'cache.request.')
else:
# Reset caches back to null cache (in the case of server teardown)
cache.configure(backend='dogpile.cache.null', replace_existing_backend=True)
requestCache.configure(backend='dogpile.cache.null', replace_existing_backend=True)
# Although the rateLimitBuffer has no pre-existing backend, this method may be called multiple
# times in testing (where caches were already configured)
rateLimitBuffer.configure(backend='dogpile.cache.memory', replace_existing_backend=True)
# Expose common logging levels and colors as methods of logprint.
logprint.info = functools.partial(logprint, level=logging.INFO, color='info')
logprint.warning = functools.partial(
logprint, level=logging.WARNING, color='warning')
logprint.error = functools.partial(
logprint, level=logging.ERROR, color='error')
logprint.success = functools.partial(
logprint, level=logging.INFO, color='success')
logprint.critical = functools.partial(
logprint, level=logging.CRITICAL, color='error')
logprint.debug = logprint
logprint.exception = functools.partial(
logprint, level=logging.ERROR, color='error', exc_info=True)
| apache-2.0 | -6,225,005,307,584,097,000 | 37.591837 | 98 | 0.654328 | false |
zhewang/lcvis | main.py | 1 | 3262 | import json
import numpy as np
import pca
import process_object as po
from flask import Flask, request, jsonify, render_template, url_for
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/supernova')
def supernova():
return render_template('supernova.html')
@app.route('/new')
def splinenew():
return render_template('index_new.html')
@app.route('/plotusers', methods=['post'])
def plotusers():
data = request.get_json()
position = po.project(data['lc'], data['p'])
return jsonify(position)
@app.route('/fastpca')
def fastpca():
return render_template('fastpca.html')
@app.route('/calculatepca', methods=['post'])
def calculatepca():
global LAST_PCA
uids = request.get_json()
idlist, matrix, status = get_data_by_id(uids)
pca_result = {}
if status == 'ok':
pca_result = pca.calculate(idlist, matrix, LAST_PCA)
LAST_PCA = pca_result
final_result = [[{} for x in range(len(pca_result[0]))] for x in range(len(pca_result))]
for i in range(len(pca_result)):
for j in range(len(pca_result[0])):
final_result[i][j] = {'count':pca_result[i][j]}
return jsonify({'status':status, 'data':final_result})
@app.route('/calculate_average_lc', methods=['post'])
def calculate_average_lc():
global LCDATA, LCPHASE
uids = request.get_json()
# TODO: band as parameter
band = 'V'
matrix = []
for i in uids:
i = str(i)
if i in LCDATA:
if band in LCDATA[i]['bands']:
vec = LCDATA[i][band]['mag']
if len(vec) > 0: # some id may not have lc data
matrix.append(LCDATA[i][band]['mag'])
mean = np.mean(np.array(matrix), axis=0)
std = np.std(np.array(matrix), axis=0)
return jsonify({'mean':mean.tolist(),
'std':std.tolist(),
'phase':LCPHASE})
def load_lc_data():
lcdata = {}
lcphase = [] # Assume all phase for different surveys are the same
surveys = json.load(open("./static/data_ogle/list.json"))['surveys']
for s in surveys:
path = "./static/data_ogle/lightcurves/{}/fit.json".format(s)
data = json.load(open(path))
lcphase = data['phase']
for objid in data['data']:
lcdata[objid] = data['data'][objid]
return lcdata, lcphase
def get_data_by_id(ids, band='V'):
global LCDATA
ids_exist = [] # some id may not have lc data
matrix = []
status = 'ok'
for i in ids:
i = str(i)
if i in LCDATA:
if band in LCDATA[i]['bands']:
vec = LCDATA[i][band]['mag']
if len(vec) > 0:
ids_exist.append(i)
matrix.append(vec)
if len(matrix) == 0:
return ids_exist, matrix, 'no light curve data'
c_length = len(matrix[0])
matrix = np.array(matrix)
if len(matrix) < c_length:
status = "numrows < numcols"
else:
try:
matrix.shape = (len(matrix), c_length)
except:
status = "not all rows have same numcols"
return ids_exist, matrix, status
LCDATA, LCPHASE = load_lc_data()
LAST_PCA = []
app.run(port=8080, debug=True)
| gpl-2.0 | 1,719,386,529,822,531,600 | 26.411765 | 92 | 0.57572 | false |
nicememory/pie | pyglet/examples/opengl_3.py | 1 | 1655 | #!/usr/bin/python
# $Id:$
'''In order to use the new features of OpenGL 3, you must explicitly create
an OpenGL 3 context. You can do this by supplying the `major_version` and
`minor_version` attributes for a GL Config.
This example creates an OpenGL 3 context, prints the version string to stdout,
and exits.
At time of writing, only the beta nvidia driver on Windows and Linux support
OpenGL 3, and requires an 8-series or higher.
On Windows, OpenGL 3 API must be explicitly enabled using the nvemulate tool
[1]. Additionally, at time of writing the latest driver did not yet support
forward compatible or debug contexts.
On Linux, the only driver that currently exposes the required GLX extensions
is 177.61.02 -- later drivers (177.67, 177.68, 177.7*, 177.8*, 180.06) seem to
be missing the extensions.
[1] http://developer.nvidia.com/object/nvemulate.html
'''
from __future__ import print_function
import pyglet
# Specify the OpenGL version explicitly to request 3.0 features, including
# GLSL 1.3.
#
# Some other attributes relevant to OpenGL 3:
# forward_compatible = True To request a context without deprecated
# functionality
# debug = True To request a debug context
config = pyglet.gl.Config(major_version=3, minor_version=0)
# Create a context matching the above configuration. Will fail if
# OpenGL 3 is not supported by the driver.
window = pyglet.window.Window(config=config, visible=False)
# Print the version of the context created.
print('OpenGL version:', window.context.get_info().get_version())
window.close()
| apache-2.0 | 8,591,080,027,478,945,000 | 34.777778 | 78 | 0.712387 | false |
datascopeanalytics/sensor_fusion | main.py | 1 | 6487 | import copy
import datetime
import math
import random
from collections import defaultdict
import matplotlib as mpl
import matplotlib.animation
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import scipy
import seaborn as sns
import traces
# local imports
from kalman import Estimate, Reading
from matplotlib import pyplot as plt
from sensor import Sensor
from traincar import TrainCar
class SensorAnimation(matplotlib.animation.FuncAnimation):
def __init__(self, time_array, truth, reading_array, estimate_array):
self.fig, (self.ax2, self.ax1) = plt.subplots(
1, 2, sharey=True,
gridspec_kw={"width_ratios":[3, 1]},
figsize=(8, 4)
)
plt.tight_layout(pad=2.0)
self.time_array = time_array
self.estimate_array = estimate_array
self.ax1.set_ylim(0, 120)
self.ax1.set_xlim(0, 20)
self.ax1.set_xlabel("Probability")
self.ax1.xaxis.set_major_formatter(FormatStrFormatter('%d%%'))
self.estimate_line = self.ax1.plot(
[], [], color='purple', label='estimate')
self.lines = []
for sensor in reading_array:
self.lines += self.ax1.plot(
[], [], color=sensor.color, label=sensor.name)
self.truth_line = self.ax1.hlines(truth[0], 0, 20, color='red', label='Occupancy')
self.ax1.legend()
self.ax2.plot(time_array, truth, color='red', label='Occupancy')
# self.ax2.set_ylim(0, 150)
self.ax2.set_title("Train car occupancy over time")
self.ax2.set_xlabel("Time (minutes)")
self.ax2.set_ylabel("Occupants")
self.estimate_ts = self.ax2.plot(
[], [], color='purple', label='estimate')
self.fill_lines = self.ax2.fill_between(
[], [], color='purple', alpha=0.5)
self.truth = truth
self.reading_array = reading_array
super().__init__(
self.fig, self.update,
frames=len(time_array),
blit=True
)
def update(self, i):
"""updates frame i of the animation"""
self.ax1.set_title("{}".format(
datetime.timedelta(minutes=self.time_array[i]))
)
for sensor, line in zip(self.reading_array.keys(), self.lines):
reading = self.reading_array.get(sensor)[i]
x, y = reading.vectorize(self.ax1.get_ylim())
line.set_data(y, x)
estimate = self.estimate_array[i]
self.estimate_line[0].set_data(
estimate.vectorize(self.ax1.get_ylim())[1],
estimate.vectorize(self.ax1.get_ylim())[0],
)
self.truth_line.remove()
self.truth_line = self.ax1.hlines(truth[i], 0, 20, color='red', label='Occupancy')
self.estimate_ts[0].set_data(
self.time_array[:i], self.estimate_array[:i])
self.fill_lines.remove()
self.fill_lines = self.ax2.fill_between(
self.time_array[:i],
[e.mu - 2 * e.sigma for e in self.estimate_array[:i]],
[e.mu + 2 * e.sigma for e in self.estimate_array[:i]],
color='purple',
alpha=0.5
)
return tuple(self.lines + self.estimate_line + self.estimate_ts + [self.fill_lines] + [self.truth_line])
if __name__ == "__main__":
# create some crappy sensors
co2_sensor = Sensor("CO$_2$", intersect=350, slope=15,
sigma=10, round_level=500, proc_sigma=30, units="ppm")
# sigma=500, round_level=500, proc_sigma=0)
temp_sensor = Sensor("Temperature", intersect=0, slope=0.25,
sigma=5, round_level=10, proc_sigma=5, units="$^{\circ}$C")
# put the sensors on a train car
train_car = TrainCar(sensor_array=[co2_sensor, temp_sensor])
# run some experiments to model/calibrate the sensors
train_car.run_experiment(datapoints=250)
train_car.plot_experiment(path="experiment_plots")
# generate some "real" occupancy data
train_car.generate_occupancy() # defaults to 5 stations and 30 minutes
time_array = np.arange(0, 30, 1.0 / 10)
reading_array = defaultdict(list)
truth = []
estimate_array = []
estimate = Estimate()
for t in time_array:
for reading in train_car.read_sensors(t):
reading_array[reading.sensor].append(reading)
estimate.add_reading(reading)
estimate_array.append(copy.deepcopy(estimate))
# if the last point was in a station
if truth and train_car.occupants_trace[t] != truth[-1]:
estimate = Estimate()
truth.append(train_car.occupants_trace[t])
# plt.clf()
# plt.plot(time_array, reading_array[co2_sensor])
# plt.savefig("co2.png")
plt.clf()
animation = SensorAnimation(
time_array, truth, reading_array, estimate_array
)
animation.save("30minutes.mp4", fps=10, bitrate=1024)
plt.clf()
plt.xlabel("Number of people in the train car")
plt.ylabel("Probability")
plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%.1f%%'))
reading_1 = Reading(co2_sensor, 60)
print("reading_1 = ", (reading_1.value, reading_1.mu))
plt.plot(*reading_1.vectorize((0,120)), color=co2_sensor.color, label="CO$_2$ sensor")
plt.vlines(reading_1, 0, max(reading_1.vectorize((0,120))[1]), linestyles='dashed')
plt.legend()
plt.savefig("reading_plots/1_co2.svg")
reading_2 = Reading(co2_sensor, 60)
print("reading_2 = ", (reading_2.value, reading_2.mu))
plt.plot(*reading_2.vectorize((0,120)), color=co2_sensor.color)
plt.vlines(reading_2, 0, max(reading_2.vectorize((0,120))[1]), linestyles='dashed')
plt.savefig("reading_plots/2_co2.svg")
estimate = Estimate()
estimate.add_reading(reading_1)
estimate.add_reading(reading_2)
estimate_line = plt.plot(*estimate.vectorize((0,120)), color='purple', label="Estimate")
plt.legend()
plt.savefig("reading_plots/3_co2.svg")
reading_3 = Reading(temp_sensor, 60)
print("reading_3 = ", (reading_3.value, reading_3.mu))
plt.plot(*reading_3.vectorize((0,120)), color=temp_sensor.color, label="Temperature sensor")
plt.vlines(reading_3, 0, max(reading_3.vectorize((0,120))[1]), linestyles='dashed')
estimate.add_reading(reading_3)
estimate_line[0].remove()
estimate_line = plt.plot(*estimate.vectorize((0,120)), color='purple', label="Estimate")
plt.legend()
plt.savefig("reading_plots/4_co2.svg")
| unlicense | -2,849,462,378,540,954,000 | 34.839779 | 112 | 0.617389 | false |
minrk/sympy | sympy/tensor/indexed.py | 1 | 13604 | """Module that defines indexed objects
The classes IndexedBase, Indexed and Idx would represent a matrix element
M[i, j] as in the following graph::
1) The Indexed class represents the entire indexed object.
|
___|___
' '
M[i, j]
/ \__\______
| |
| |
| 2) The Idx class represent indices and each Idx can
| optionally contain information about its range.
|
3) IndexedBase represents the `stem' of an indexed object, here `M'.
The stem used by itself is usually taken to represent the entire
array.
There can be any number of indices on an Indexed object. No transformation
properties are implemented in these Base objects, but implicit contraction
of repeated indices is supported.
Note that the support for complicated (i.e. non-atomic) integer expressions
as indices is limited. (This should be improved in future releases.)
Examples
========
To express the above matrix element example you would write:
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> M = IndexedBase('M')
>>> i, j = map(Idx, ['i', 'j'])
>>> M[i, j]
M[i, j]
Repreated indices in a product implies a summation, so to express a
matrix-vector product in terms of Indexed objects:
>>> x = IndexedBase('x')
>>> M[i, j]*x[j]
M[i, j]*x[j]
If the indexed objects will be converted to component based arrays, e.g.
with the code printers or the autowrap framework, you also need to provide
(symbolic or numerical) dimensions. This can be done by passing an
optional shape parameter to IndexedBase upon construction:
>>> dim1, dim2 = symbols('dim1 dim2', integer=True)
>>> A = IndexedBase('A', shape=(dim1, 2*dim1, dim2))
>>> A.shape
Tuple(dim1, 2*dim1, dim2)
>>> A[i, j, 3].shape
Tuple(dim1, 2*dim1, dim2)
If an IndexedBase object has no shape information, it is assumed that the
array is as large as the ranges of it's indices:
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> M[i, j].shape
Tuple(m, n)
>>> M[i, j].ranges
[Tuple(0, m - 1), Tuple(0, n - 1)]
The above can be compared with the following:
>>> A[i, 2, j].shape
Tuple(dim1, 2*dim1, dim2)
>>> A[i, 2, j].ranges
[Tuple(0, m - 1), None, Tuple(0, n - 1)]
To analyze the structure of indexed expressions, you can use the methods
get_indices() and get_contraction_structure():
>>> from sympy.tensor import get_indices, get_contraction_structure
>>> get_indices(A[i, j, j])
(set([i]), {})
>>> get_contraction_structure(A[i, j, j])
{(j,): set([A[i, j, j]])}
See the appropriate docstrings for a detailed explanation of the output.
"""
# TODO: (some ideas for improvement)
#
# o test and guarantee numpy compatibility
# - implement full support for broadcasting
# - strided arrays
#
# o more functions to analyze indexed expressions
# - identify standard constructs, e.g matrix-vector product in a subexpression
#
# o functions to generate component based arrays (numpy and sympy.Matrix)
# - generate a single array directly from Indexed
# - convert simple sub-expressions
#
# o sophisticated indexing (possibly in subclasses to preserve simplicity)
# - Idx with range smaller than dimension of Indexed
# - Idx with stepsize != 1
# - Idx with step determined by function call
from sympy.core import Expr, Basic, Tuple, Symbol, Integer, sympify, S
from sympy.core.compatibility import ordered_iter
class IndexException(Exception):
pass
class IndexedBase(Expr):
"""Represent the base or stem of an indexed object
The IndexedBase class represent an array that contains elements. The main purpose
of this class is to allow the convenient creation of objects of the Indexed
class. The __getitem__ method of IndexedBase returns an instance of
Indexed. Alone, without indices, the IndexedBase class can be used as a
notation for e.g. matrix equations, resembling what you could do with the
Symbol class. But, the IndexedBase class adds functionality that is not
available for Symbol instances:
- An IndexedBase object can optionally store shape information. This can
be used in to check array conformance and conditions for numpy
broadcasting. (TODO)
- An IndexedBase object implements syntactic sugar that allows easy symbolic
representation of array operations, using implicit summation of
repeated indices.
- The IndexedBase object symbolizes a mathematical structure equivalent
to arrays, and is recognized as such for code generation and automatic
compilation and wrapping.
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> A = IndexedBase('A'); A
A
>>> type(A)
<class 'sympy.tensor.indexed.IndexedBase'>
When an IndexedBase object recieves indices, it returns an array with named
axes, represented by an Indexed object:
>>> i, j = symbols('i j', integer=True)
>>> A[i, j, 2]
A[i, j, 2]
>>> type(A[i, j, 2])
<class 'sympy.tensor.indexed.Indexed'>
The IndexedBase constructor takes an optional shape argument. If given,
it overrides any shape information in the indices. (But not the index
ranges!)
>>> m, n, o, p = symbols('m n o p', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> A[i, j].shape
Tuple(m, n)
>>> B = IndexedBase('B', shape=(o, p))
>>> B[i, j].shape
Tuple(o, p)
"""
is_commutative = False
def __new__(cls, label, shape=None, **kw_args):
if isinstance(label, basestring):
label = Symbol(label)
obj = Expr.__new__(cls, label, **kw_args)
if ordered_iter(shape):
obj._shape = Tuple(*shape)
else:
obj._shape = shape
return obj
@property
def args(self):
if self._shape:
return self._args + (self._shape,)
else:
return self._args
def _hashable_content(self):
return Expr._hashable_content(self) + (self._shape,)
def __getitem__(self, indices, **kw_args):
if ordered_iter(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch")
return Indexed(self, *indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch")
return Indexed(self, indices, **kw_args)
@property
def shape(self):
return self._shape
@property
def label(self):
return self.args[0]
def _sympystr(self, p):
return p.doprint(self.label)
class Indexed(Expr):
"""Represents a mathematical object with indices.
>>> from sympy.tensor import Indexed, IndexedBase, Idx
>>> from sympy import symbols
>>> i, j = map(Idx, ['i', 'j'])
>>> Indexed('A', i, j)
A[i, j]
It is recommended that Indexed objects are created via IndexedBase:
>>> A = IndexedBase('A')
>>> Indexed('A', i, j) == A[i, j]
True
"""
is_commutative = False
def __new__(cls, base, *args, **kw_args):
if not args: raise IndexException("Indexed needs at least one index")
if isinstance(base, (basestring, Symbol)):
base = IndexedBase(base)
elif not isinstance(base, IndexedBase):
raise TypeError("Indexed expects string, Symbol or IndexedBase as base")
return Expr.__new__(cls, base, *args, **kw_args)
@property
def base(self):
return self.args[0]
@property
def indices(self):
return self.args[1:]
@property
def rank(self):
"""returns the number of indices"""
return len(self.args)-1
@property
def shape(self):
"""returns a list with dimensions of each index.
Dimensions is a property of the array, not of the indices. Still, if
the IndexedBase does not define a shape attribute, it is assumed that
the ranges of the indices correspond to the shape of the array.
>>> from sympy.tensor.indexed import IndexedBase, Idx
>>> from sympy import symbols
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', m)
>>> A = IndexedBase('A', shape=(n, n))
>>> B = IndexedBase('B')
>>> A[i, j].shape
Tuple(n, n)
>>> B[i, j].shape
Tuple(m, m)
"""
if self.base.shape:
return self.base.shape
try:
return Tuple(*[i.upper - i.lower + 1 for i in self.indices])
except AttributeError:
raise IndexException("Range is not defined for all indices in: %s" % self)
except TypeError:
raise IndexException("Shape cannot be inferred from Idx with undefined range: %s"%self)
@property
def ranges(self):
"""returns a list of tuples with lower and upper range of each index
If an index does not define the data members upper and lower, the
corresponding slot in the list contains ``None'' instead of a tuple.
"""
ranges = []
for i in self.indices:
try:
ranges.append(Tuple(i.lower, i.upper))
except AttributeError:
ranges.append(None)
return ranges
def _sympystr(self, p):
indices = map(p.doprint, self.indices)
return "%s[%s]" % (p.doprint(self.base), ", ".join(indices))
class Idx(Expr):
"""Represents an index, either symbolic or integer.
There are a number of ways to create an Idx object. The constructor
takes two arguments:
``label``
An integer or a symbol that labels the index.
``range``
Optionally you can specify a range as either
- Symbol or integer: This is interpreted as dimension. lower and
upper ranges are set to 0 and range-1
- tuple: This is interpreted as the lower and upper bounds in the
range.
Note that the Idx constructor is rather pedantic, and will not accept
non-integer symbols. The only exception is that you can use oo and -oo to
specify an unbounded range. For all other cases, both label and bounds
must be declared as integers, in the sense that for a index label n,
n.is_integer must return True.
For convenience, if the label is given as a string, it is automatically
converted to an integer symbol. (Note that this conversion is not done for
range or dimension arguments.)
:Examples:
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols, oo
>>> n, i, L, U = symbols('n i L U', integer=True)
0) Construction from a string. An integer symbol is created from the
string.
>>> Idx('qwerty')
qwerty
1) Both upper and lower bound specified
>>> idx = Idx(i, (L, U)); idx
i
>>> idx.lower, idx.upper
(L, U)
2) Only dimension specified, lower bound defaults to 0
>>> idx = Idx(i, n); idx.lower, idx.upper
(0, n - 1)
>>> idx = Idx(i, 4); idx.lower, idx.upper
(0, 3)
>>> idx = Idx(i, oo); idx.lower, idx.upper
(0, oo)
3) No bounds given, interpretation of this depends on context.
>>> idx = Idx(i); idx.lower, idx.upper
(None, None)
4) for a literal integer instead of a symbolic label the bounds are still
there:
>>> idx = Idx(2, n); idx.lower, idx.upper
(0, n - 1)
"""
is_integer = True
def __new__(cls, label, range=None, **kw_args):
if isinstance(label, basestring):
label = Symbol(label, integer=True)
label, range = map(sympify, (label, range))
if not label.is_integer:
raise TypeError("Idx object requires an integer label")
elif ordered_iter(range):
assert len(range) == 2, "Idx got range tuple with wrong length"
for bound in range:
if not (bound.is_integer or abs(bound) is S.Infinity):
raise TypeError("Idx object requires integer bounds")
args = label, Tuple(*range)
elif isinstance(range, Expr):
if not (range.is_integer or range is S.Infinity):
raise TypeError("Idx object requires an integer dimension")
args = label, Tuple(S.Zero, range-S.One)
elif range:
raise TypeError("range must be ordered iterable or integer sympy expression")
else:
args = label,
obj = Expr.__new__(cls, *args, **kw_args)
return obj
@property
def label(self):
"""Returns the name/label of the index, or it's integer value"""
return self.args[0]
@property
def lower(self):
"""Returns the lower bound of the index"""
try:
return self.args[1][0]
except IndexError:
return
@property
def upper(self):
"""Returns the upper bound of the index"""
try:
return self.args[1][1]
except IndexError:
return
def _sympystr(self, p):
return p.doprint(self.label)
| bsd-3-clause | -3,039,205,285,116,504,000 | 31.623501 | 99 | 0.60394 | false |
1503051/webhookshipping | app.py | 1 | 1887 | #!/usr/bin/env python
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
from pymongo import MongoClient
# Flask app should start in global layout
app = Flask(__name__)
client = MongoClient('mongodb://localhost:27017')
@app.route('/star', methods=['POST'])
def get_one_star():
db = client['hrvisual']
collect=db['ORG_DEPT_EMP_2016']
post1 = collect.find_one({'emp_number': '1503051'})
if post1 is None:
return 'no record'
else:
return post1['emp_name']
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "branchcontact":
return {}
result = req.get("result")
parameters = result.get("parameters")
contact = {'Narl':'02-6630-0151', 'Ndl':'03-5726100', 'Nchc':'03-5776085', 'Cic':'03-7777777', '1503051':'0911111111'}
speech ="unknow"
branch = parameters.get("branch")
if branch is not None:
speech = "The contact information for " + branch + " is " + contact[branch]
anytxt = parameters.get("any")
if anytxt is not None:
speech = "The contact information for " + anytxt + " is " + contact[anytxt]
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-onlinestore-shipping"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
app.run(debug=True, port=port, host='0.0.0.0')
| apache-2.0 | -2,935,963,759,269,244,400 | 24.849315 | 126 | 0.612083 | false |
rajanandakumar/DIRAC | DataManagementSystem/Client/ReplicaManager.py | 1 | 113878 | """
:mod: ReplicaManager
.. module: ReplicaManager
:synopsis: ReplicaManager links the functionalities of StorageElement and FileCatalog.
This module consists ReplicaManager and related classes.
OBSOLETED !!! DO NOT USE THIS ANYMORE!!!! USE THE DataManager CLASS
"""
# # imports
from datetime import datetime, timedelta
import fnmatch
import os
import time
from types import StringTypes, ListType, DictType, StringType, TupleType
# # from DIRAC
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.Core.Utilities.Adler import fileAdler, compareAdler
from DIRAC.Core.Utilities.File import makeGuid, getSize
from DIRAC.Core.Utilities.List import randomize
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite, isSameSiteSE, getSEsForCountry
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Storage.StorageFactory import StorageFactory
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
class CatalogBase( object ):
"""
.. class:: CatalogBase
This class stores the two wrapper functions for interacting with the FileCatalog.
"""
def __init__( self ):
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
self.useCatalogPFN = Operations().getValue( 'DataManagement/UseCatalogPFN', True )
def _callFileCatalogFcnSingleFile( self, lfn, method, argsDict = None, catalogs = None ):
""" A wrapper around :CatalogBase._callFileCatalogFcn: for a single file. It parses
the output of :CatalogBase_callFileCatalogFcn: for the first file provided as input.
If this file is found in::
* res['Value']['Successful'] an S_OK() is returned with the value.
* res['Value']['Failed'] an S_ERROR() is returned with the error message.
:warning: this function is executed only for the first LFN provided, in case of dict of LFNs
the order of keys are NOT preserved, so the output is undefined
:param self: self reference
:param mixed lfn: LFN as string or list with LFNs or dict with LFNs as keys
:param str method: :FileCatalog: method name
:param dict argsDict: kwargs for method
:param list catalogs: list with catalog names
"""
# # default values
argsDict = argsDict if argsDict else dict()
catalogs = catalogs if catalogs else list()
# # checjk type
if not lfn or type( lfn ) not in StringTypes + ( ListType, DictType ):
return S_ERROR( "wrong type (%s) for argument 'lfn'" % type( lfn ) )
singleLfn = lfn
if type( lfn ) == ListType:
singleLfn = lfn[0]
elif type( lfn ) == DictType:
singleLfn = lfn.keys()[0]
# # call only for single lfn
res = self._callFileCatalogFcn( lfn, method, argsDict, catalogs = catalogs )
if not res["OK"]:
return res
elif singleLfn in res["Value"]["Failed"]:
return S_ERROR( res["Value"]["Failed"][singleLfn] )
if not singleLfn in res["Value"]["Successful"]:
result = S_OK( {} )
for catalog in catalogs:
result['Value'][catalog] = 'OK'
return result
return S_OK( res["Value"]["Successful"][singleLfn] )
def _callFileCatalogFcn( self, lfn, method, argsDict = None, catalogs = None ):
""" A simple wrapper around the file catalog functionality
This is a wrapper around the available :FileCatalog: functions.
The :lfn: and :method: arguments must be provided.
:param self: self reference
:param mixed lfn: a single LFN string or a list of LFNs or dictionary with LFNs stored as keys.
:param str method: name of the FileCatalog function to be invoked
:param dict argsDict: aditional keyword arguments that are requred for the :method:
:param list catalogs: list of catalogs the operation is to be performed on, by default this
is all available catalogs; examples are 'LcgFileCatalogCombined', 'BookkeepingDB',
'ProductionDB' etc.
"""
# # default values
argsDict = argsDict if argsDict else dict()
catalogs = catalogs if catalogs else list()
lfns = None
if not lfn or type( lfn ) not in StringTypes + ( ListType, DictType ):
errStr = "_callFileCatalogFcn: Wrong 'lfn' argument."
self.log.error( errStr )
return S_ERROR( errStr )
elif type( lfn ) in StringTypes:
lfns = { lfn : False }
elif type( lfn ) == ListType:
lfns = dict.fromkeys( lfn, False )
elif type( lfn ) == DictType:
lfns = lfn.copy()
# # lfns supplied?
if not lfns:
errMsg = "_callFileCatalogFcn: No lfns supplied."
self.log.error( errMsg )
return S_ERROR( errMsg )
self.log.debug( "_callFileCatalogFcn: Will execute '%s' method with %s lfns." % ( method, len( lfns ) ) )
# # create FileCatalog instance
fileCatalog = FileCatalog( catalogs = catalogs )
if not fileCatalog.isOK():
return S_ERROR( "Can't get FileCatalogs %s" % catalogs )
# # get symbol
fcFcn = getattr( fileCatalog, method ) if hasattr( fileCatalog, method ) else None
# # check if it is callable
fcFcn = fcFcn if callable( fcFcn ) else None
if not fcFcn:
errMsg = "_callFileCatalogFcn: '%s' isn't a member function in FileCatalog." % method
self.log.error( errMsg )
return S_ERROR( errMsg )
# # call it at least
res = fcFcn( lfns, **argsDict )
if not res["OK"]:
self.log.error( "_callFileCatalogFcn: Failed to execute '%s'." % method, res["Message"] )
return res
def _fcFuncWrapper( self, singleFile = False ):
""" choose wrapper to call
:param self: self reference
:param bool singleFile: flag to choose wrapper function, default :False: will
execute :FileCatalog._callFileCatalogFcn:
"""
return { True: self._callFileCatalogFcnSingleFile,
False: self._callFileCatalogFcn }[singleFile]
class CatalogFile( CatalogBase ):
"""
.. class:: CatalogFile
Wrappers for various :FileCatalog: methods concering operations on files.
"""
def __init__( self ):
""" c'tor """
CatalogBase.__init__( self )
def getCatalogExists( self, lfn, singleFile = False, catalogs = None ):
""" determine whether the path is registered in the :FileCatalog: by calling
:FileCatalog.exists: method.
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "exists", catalogs = catalogs )
def getCatalogIsFile( self, lfn, singleFile = False, catalogs = None ):
""" determine whether the path is registered as a file in the :FileCatalog:
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "isFile", catalogs = catalogs )
def getCatalogFileMetadata( self, lfn, singleFile = False, catalogs = None ):
""" get the metadata associated to the LFN in the :FileCatalog:
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getFileMetadata", catalogs = catalogs )
def getCatalogFileSize( self, lfn, singleFile = False, catalogs = None ):
""" get the size registered for files in the FileCatalog
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getFileSize", catalogs = catalogs )
def getCatalogReplicas( self, lfn, allStatus = False, singleFile = False, catalogs = None ):
""" Get the replicas registered for files in the FileCatalog
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool allStatus: ???
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getReplicas", argsDict = { "allStatus" : allStatus },
catalogs = catalogs )
def getCatalogLFNForPFN( self, pfn, singleFile = False, catalogs = None ):
""" get the LFNs registered with the supplied PFNs from the FileCatalog
:param self: self reference
:param mixed pfn: the files to obtain (can be a single PFN or list of PFNs)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( pfn, 'getLFNForPFN', catalogs = catalogs )
def addCatalogFile( self, lfn, singleFile = False, catalogs = None ):
""" Add a new file to the FileCatalog
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "addFile", catalogs = catalogs )
def removeCatalogFile( self, lfn, singleFile = False, catalogs = None ):
""" remove a file from the FileCatalog
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
# # make sure lfns are sorted from the longest to the shortest
if type( lfn ) == ListType:
lfn = sorted( lfn, reverse = True )
return self._fcFuncWrapper( singleFile )( lfn, "removeFile", catalogs = catalogs )
class CatalogReplica( CatalogBase ):
"""
.. class:: CatalogReplica
Wrappers for various :FileCatalog: methods concering operations on replicas.
"""
def getCatalogReplicaStatus( self, lfn, singleFile = False, catalogs = None ):
""" get the status of the replica as registered in the :FileCatalog:
:param self: self reference
:param dict lfn: dict containing { LFN : SE }
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getReplicaStatus", catalogs = catalogs )
def addCatalogReplica( self, lfn, singleFile = False, catalogs = None ):
""" add a new replica to the :FileCatalog:
:param self: self reference
:param dict lfn: dictionary containing the replica properties
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "addReplica", catalogs = catalogs )
def removeCatalogReplica( self, lfn, singleFile = False, catalogs = None ):
""" remove a replica from the :FileCatalog:
:param self: self reference
:param mixed lfn: lfn to be removed
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "removeReplica", catalogs = catalogs )
def setCatalogReplicaStatus( self, lfn, singleFile = False, catalogs = None ):
""" Change the status for a replica in the :FileCatalog:
:param self: self reference
:param mixed lfn: dict with replica information to change
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "setReplicaStatus", catalogs = catalogs )
def setCatalogReplicaHost( self, lfn, singleFile = False, catalogs = None ):
""" change the registered SE for a replica in the :FileCatalog:
:param self: self reference
:param mixed lfn: dict with replica information to change
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "setReplicaHost", catalogs = catalogs )
class CatalogDirectory( CatalogBase ):
"""
.. class:: CatalogDirectory
Wrappers for various :FileCatalog: methods concering operations on folders.
"""
def __init__( self ):
""" c'tor """
CatalogBase.__init__( self )
def getCatalogIsDirectory( self, lfn, singleFile = False, catalogs = None ):
""" determine whether the path is registered as a directory in the :FileCatalog:
:param self: self reference
:param mixed lfn: files to check (can be a single file or list of lfns)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "isDirectory", catalogs = catalogs )
def getCatalogDirectoryMetadata( self, lfn, singleFile = False, catalogs = None ):
""" get the metadata associated to a directory in the :FileCatalog:
:param self: self reference
:param mixed lfn: folders to check (can be a single directory or list of directories)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getDirectoryMetadata", catalogs = catalogs )
def getCatalogDirectoryReplicas( self, lfn, singleFile = False, catalogs = None ):
""" get the replicas for the contents of a directory in the FileCatalog
:param self: self reference
:param mixed lfn: folders to check (can be a single directory or list of directories)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getDirectoryReplicas", catalogs = catalogs )
def getCatalogListDirectory( self, lfn, verbose = False, singleFile = False, catalogs = None ):
""" get the contents of a directory in the :FileCatalog:
:param self: self reference
:param mixed lfn: folders to check (can be a single directory or list of directories)
:param bool verbose: shout
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "listDirectory", argsDict = {"verbose": verbose},
catalogs = catalogs )
def getCatalogDirectorySize( self, lfn, singleFile = False, catalogs = None ):
""" get the size a directory in the :FileCatalog:
:param self: self reference
:param mixed lfn: folders to check (can be a single directory or list of directories)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getDirectorySize", catalogs = catalogs )
def createCatalogDirectory( self, lfn, singleFile = False, catalogs = None ):
""" mkdir in the :FileCatalog:
:param self: self reference
:param mixed lfn: the directory to create
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "createDirectory", catalogs = catalogs )
def removeCatalogDirectory( self, lfn, recursive = False, singleFile = False, catalogs = None ):
""" rmdir from the :FileCatalog:
:param self: self reference
:param mixed lfn: the directory to remove
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "removeDirectory", argsDict = {"recursive" : recursive},
catalogs = catalogs )
class CatalogLink( CatalogBase ):
"""
.. class:: CatalogReplica
Wrappers for various :FileCatalog: methods concering operations on links.
"""
def __init__( self ):
""" c'tor """
CatalogBase.__init__( self )
def getCatalogIsLink( self, lfn, singleFile = False, catalogs = None ):
""" determine whether the path is registered as a link in the :FileCatalog:
:param self: self reference
:param mixed lfn: path to be checked (string of list of strings)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "isLink", catalogs = catalogs )
def getCatalogReadLink( self, lfn, singleFile = False, catalogs = None ):
""" get the target of a link as registered in the :FileCatalog:
:param self: self reference
:param mixed lfn: path to be checked (string of list of strings)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "readLink", catalogs = catalogs )
def createCatalogLink( self, lfn, singleFile = False, catalogs = None ):
""" ln in the :FileCatalog: (create the link)
:param self: self reference
:param mixed lfn: link dictionary containing the target lfn and link name to create
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "createLink", catalogs = catalogs )
def removeCatalogLink( self, lfn, singleFile = False, catalogs = None ):
""" rm the link supplied from the :FileCatalog:
:param self: self reference
:param mixed lfn: link to be removed (string of list of strings)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
self._fcFuncWrapper( singleFile )( lfn, "removeLink", catalogs = catalogs )
class CatalogInterface( CatalogFile, CatalogReplica, CatalogDirectory, CatalogLink ):
"""
.. class:: CatalogInterface
Dummy class to expose all the methods of the CatalogInterface
"""
pass
class StorageBase( object ):
"""
.. class:: StorageBase
This class stores the two wrapper functions for interacting with the StorageElement.
"""
def __init__( self ):
""" c'tor """
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
def _callStorageElementFcnSingleFile( self, storageElementName, pfn, method, argsDict = None ):
""" wrapper around :StorageBase._callStorageElementFcn: for single file execution
It parses the output of :StorageBase._callStorageElementFcn: for the first pfn provided as input.
If this pfn is found in::
* res['Value']['Successful'] an S_OK() is returned with the value.
* res['Value']['Failed'] an S_ERROR() is returned with the error message.
:param self: self reference
:param str storageElementName: DIRAC SE name to be accessed e.g. CERN-DST
:param mixed pfn: contains a single PFN string or a list of PFNs or dictionary containing PFNs
:param str method: name of the :StorageElement: method to be invoked
:param dict argsDict: additional keyword arguments that are required for the :method:
"""
argsDict = argsDict if argsDict else {}
# # call wrapper
res = self._callStorageElementFcn( storageElementName, pfn, method, argsDict )
# # check type
if type( pfn ) == ListType:
pfn = pfn[0]
elif type( pfn ) == DictType:
pfn = pfn.keys()[0]
# # check results
if not res["OK"]:
return res
elif pfn in res["Value"]["Failed"]:
errorMessage = res["Value"]["Failed"][pfn]
return S_ERROR( errorMessage )
else:
return S_OK( res["Value"]["Successful"][pfn] )
def _callStorageElementFcn( self, storageElementName, pfn, method, argsDict = None ):
""" a simple wrapper around the :StorageElement: functionality
:param self: self reference
:param str storageElementName: DIRAC SE name to be accessed e.g. CERN-DST
:param mixed pfn: contains a single PFN string or a list of PFNs or dictionary containing PFNs
:param str method: name of the :StorageElement: method to be invoked
:param dict argsDict: additional keyword arguments that are required for the :method:
"""
argsDict = argsDict if argsDict else {}
# # check pfn type
if type( pfn ) in StringTypes:
pfns = {pfn : False}
elif type( pfn ) == ListType:
pfns = dict.fromkeys( pfn, False )
elif type( pfn ) == DictType:
pfns = pfn.copy()
else:
errStr = "_callStorageElementFcn: Supplied pfns must be a str, list of str or dict."
self.log.error( errStr )
return S_ERROR( errStr )
# # have we got some pfns?
if not pfns:
errMessage = "_callStorageElementFcn: No pfns supplied."
self.log.error( errMessage )
return S_ERROR( errMessage )
self.log.debug( "_callStorageElementFcn: Will execute '%s' with %s pfns." % ( method, len( pfns ) ) )
storageElement = StorageElement( storageElementName )
res = storageElement.isValid( method )
if not res['OK']:
errStr = "_callStorageElementFcn: Failed to instantiate Storage Element"
self.log.error( errStr, "for performing %s at %s." % ( method, storageElementName ) )
return res
# # get sybmbol
fcFcn = getattr( storageElement, method ) if hasattr( storageElement, method ) else None
# # make sure it is callable
fcFcn = fcFcn if callable( fcFcn ) else None
if not fcFcn:
errMsg = "_callStorageElementFcn: '%s' isn't a member function in StorageElement." % method
self.log.error( errMsg )
return S_ERROR( errMsg )
# # call it at least
res = fcFcn( pfns, **argsDict )
# # return the output
if not res["OK"]:
errStr = "_callStorageElementFcn: Completely failed to perform %s." % method
self.log.error( errStr, '%s : %s' % ( storageElementName, res["Message"] ) )
return res
def _seFuncWrapper( self, singleFile = False ):
""" choose wrapper to call
:param self: self reference
:param bool singleFile: flag to choose wrapper function, default :False: will
execute :StorageBase._callStorageElementFcn:
"""
return { True: self._callStorageElementFcnSingleFile,
False: self._callStorageElementFcn }[singleFile]
def getPfnForLfn( self, lfns, storageElementName ):
""" get PFNs for supplied LFNs at :storageElementName: SE
:param self: self reference
:param list lfns: list of LFNs
:param str stotrageElementName: DIRAC SE name
"""
if type( lfns ) == type( '' ):
lfns = [lfns]
storageElement = StorageElement( storageElementName )
res = storageElement.isValid( "getPfnForLfn" )
if not res['OK']:
self.log.error( "getPfnForLfn: Failed to instantiate StorageElement at %s" % storageElementName )
return res
retDict = { "Successful" : {}, "Failed" : {} }
for lfn in lfns:
res = storageElement.getPfnForLfn( lfn )
if res["OK"] and lfn in res['Value']['Successful']:
retDict["Successful"][lfn] = res["Value"]['Successful'][lfn]
else:
retDict["Failed"][lfn] = res.get( "Message", res.get( 'Value', {} ).get( 'Failed', {} ).get( lfn ) )
return S_OK( retDict )
def getLfnForPfn( self, pfns, storageElementName ):
""" get LFNs for supplied PFNs at :storageElementName: SE
:param self: self reference
:param list lfns: list of LFNs
:param str stotrageElementName: DIRAC SE name
"""
storageElement = StorageElement( storageElementName )
res = storageElement.isValid( "getPfnPath" )
if not res['OK']:
self.log.error( "getLfnForPfn: Failed to instantiate StorageElement at %s" % storageElementName )
return res
retDict = { "Successful" : {}, "Failed" : {} }
for pfn in pfns:
res = storageElement.getPfnPath( pfn )
if res["OK"]:
retDict["Successful"][pfn] = res["Value"]
else:
retDict["Failed"][pfn] = res["Message"]
return S_OK( retDict )
def getPfnForProtocol( self, pfns, storageElementName, protocol = "SRM2", withPort = True ):
""" create PFNs strings at :storageElementName: SE using protocol :protocol:
:param self: self reference
:param list pfns: list of PFNs
:param str storageElementName: DIRAC SE name
:param str protocol: protocol name (default: 'SRM2')
:param bool withPort: flag to include port in PFN (default: True)
"""
storageElement = StorageElement( storageElementName )
res = storageElement.isValid( "getPfnForProtocol" )
if not res["OK"]:
self.log.error( "getPfnForProtocol: Failed to instantiate StorageElement at %s" % storageElementName )
return res
retDict = { "Successful" : {}, "Failed" : {}}
for pfn in pfns:
res = returnSingleResult( storageElement.getPfnForProtocol( pfn, protocol, withPort = withPort ) )
if res["OK"]:
retDict["Successful"][pfn] = res["Value"]
else:
retDict["Failed"][pfn] = res["Message"]
return S_OK( retDict )
class StorageFile( StorageBase ):
"""
.. class:: StorageFile
Wrappers for various :StorageElement: methods concering operations on files.
"""
def __init__( self ):
""" c'tor """
StorageBase.__init__( self )
def getStorageFileExists( self, physicalFile, storageElementName, singleFile = False ):
""" determine the existance of the physical files
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "exists" )
def getStorageFileIsFile( self, physicalFile, storageElementName, singleFile = False ):
""" determine if supplied physical paths are files
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "isFile" )
def getStorageFileSize( self, physicalFile, storageElementName, singleFile = False ):
""" get the size of the physical files
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "getFileSize" )
def getStorageFileAccessUrl( self, physicalFile, storageElementName, protocol = None, singleFile = False ):
""" get the access url for a physical file
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
protocol = protocol if protocol else list()
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile,
"getAccessUrl", argsDict = {"protocol" : protocol} )
def getStorageFileMetadata( self, physicalFile, storageElementName, singleFile = False ):
""" get the metadatas for physical files
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "getFileMetadata" )
def removeStorageFile( self, physicalFile, storageElementName, singleFile = False ):
""" rm supplied physical files from :storageElementName: DIRAC SE
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "removeFile" )
def prestageStorageFile( self, physicalFile, storageElementName, lifetime = 86400, singleFile = False ):
""" prestage physical files
:param self: self reference
:param mixed physicalFile: PFNs to be prestaged
:param str storageElement: SE name
:param int lifetime: 24h in seconds
:param bool singleFile: flag to prestage only one file
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile,
"prestageFile", argsDict = {"lifetime" : lifetime} )
def getPrestageStorageFileStatus( self, physicalFile, storageElementName, singleFile = False ):
""" get the status of a pre-stage request
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "prestageFileStatus" )
def pinStorageFile( self, physicalFile, storageElementName, lifetime = 86400, singleFile = False ):
""" pin physical files with a given lifetime
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile,
"pinFile", argsDict = {"lifetime": lifetime} )
def releaseStorageFile( self, physicalFile, storageElementName, singleFile = False ):
""" release the pin on physical files
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "releaseFile" )
def getStorageFile( self, physicalFile, storageElementName, localPath = False, singleFile = False ):
""" create a local copy of a physical file
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param mixed localPath: string with local paht to use or False (if False, os.getcwd() will be used)
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile,
"getFile", argsDict = {"localPath": localPath} )
def putStorageFile( self, physicalFile, storageElementName, singleFile = False ):
""" put the local file to the storage element
:param self: self reference
:param mixed physicalFile: dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "putFile" )
def replicateStorageFile( self, physicalFile, size, storageElementName, singleFile = False ):
""" replicate a physical file to a storage element
:param self: self reference
:param mixed physicalFile: dictionary with PFN information
:param int size: size of PFN in bytes
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile,
'replicateFile', argsDict = {'sourceSize': size} )
class StorageDirectory( StorageBase ):
"""
.. class:: StorageDirectory
Wrappers for various :StorageElement: methods concering operations on folders.
"""
def __init__( self ):
""" c'tor """
StorageBase.__init__( self )
def getStorageDirectoryIsDirectory( self, storageDirectory, storageElementName, singleDirectory = False ):
""" determine if the storage paths are directories
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory, "isDirectory" )
def getStorageDirectoryMetadata( self, storageDirectory, storageElementName, singleDirectory = False ):
""" get the metadata for storage directories
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory, "getDirectoryMetadata" )
def getStorageDirectorySize( self, storageDirectory, storageElementName, singleDirectory = False ):
""" get the size of the storage directories
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory, "getDirectorySize" )
def getStorageListDirectory( self, storageDirectory, storageElementName, singleDirectory = False ):
""" ls of a directory in the Storage Element
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory, "listDirectory" )
def getStorageDirectory( self, storageDirectory, storageElementName, localPath = False, singleDirectory = False ):
""" copy the contents of a directory from the Storage Element to local folder
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param mixed localPath: destination folder, if False, so.getcwd() will be used
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory,
"getDirectory", argsDict = {'localPath': localPath} )
def putStorageDirectory( self, storageDirectory, storageElementName, singleDirectory = False ):
""" put the local directory to the storage element
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory, "putDirectory" )
def removeStorageDirectory( self, storageDirectory, storageElementName, recursive = False, singleDirectory = False ):
""" rmdir a directory from the storage element
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory,
"removeDirectory", argsDict = {"recursive": recursive} )
class StorageInterface( StorageFile, StorageDirectory ):
"""
.. class:: StorageInterface
Dummy class to expose all the methods of the StorageInterface
"""
def __init__( self ):
""" c'tor """
StorageFile.__init__( self )
StorageDirectory.__init__( self )
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
class CatalogToStorage( CatalogInterface, StorageInterface ):
"""
.. class:: CatalogToStorage
Collection of functions doing simple replica<-->Storage element operations.
"""
def __init__( self ):
""" c'tor """
CatalogInterface.__init__( self )
StorageInterface.__init__( self )
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
def _replicaSEFcnWrapper( self, singleFile = False ):
""" choose wrapper to call
:param self: self reference
:param bool singleFile: flag to choose wrapper function, default :False: will
execute :CatalogToStorage._callReplicaSEFcn:
"""
return { True: self._callReplicaSEFcnSingleFile,
False: self._callReplicaSEFcn }[singleFile]
def _callReplicaSEFcnSingleFile( self, storageElementName, lfn, method, argsDict = None ):
""" call :method: of StorageElement :storageElementName: for single :lfn: using :argsDict: kwargs
:param self: self reference
:param str storageElementName: DIRAC SE name
:param mixed lfn: LFN
:param str method: StorageElement function name
:param dict argsDict: kwargs of :method:
"""
# # default value
argsDict = argsDict if argsDict else {}
# # get single LFN
singleLfn = lfn
if type( lfn ) == ListType:
singleLfn = lfn[0]
elif type( lfn ) == DictType:
singleLfn = lfn.keys()[0]
# # call method
res = self._callReplicaSEFcn( storageElementName, singleLfn, method, argsDict )
# # check results
if not res["OK"]:
return res
elif singleLfn in res["Value"]["Failed"]:
return S_ERROR( res["Value"]["Failed"][singleLfn] )
return S_OK( res["Value"]["Successful"][singleLfn] )
def _callReplicaSEFcn( self, storageElementName, lfn, method, argsDict = None ):
""" a simple wrapper that allows replica querying then perform the StorageElement operation
:param self: self reference
:param str storageElementName: DIRAC SE name
:param mixed lfn: a LFN str, list of LFNs or dict with LFNs as keys
"""
# # default value
argsDict = argsDict if argsDict else {}
# # get replicas for lfn
res = self._callFileCatalogFcn( lfn, "getReplicas" )
if not res["OK"]:
errStr = "_callReplicaSEFcn: Completely failed to get replicas for LFNs."
self.log.error( errStr, res["Message"] )
return res
# # returned dict, get failed replicase
retDict = { "Failed": res["Value"]["Failed"],
"Successful" : {} }
# # print errors
for lfn, reason in retDict["Failed"].items():
self.log.error( "_callReplicaSEFcn: Failed to get replicas for file.", "%s %s" % ( lfn, reason ) )
# # good replicas
lfnReplicas = res["Value"]["Successful"]
# # store PFN to LFN mapping
pfnDict = {}
for lfn, replicas in lfnReplicas.items():
if storageElementName in replicas:
useCatalogPFN = Operations().getValue( 'DataManagement/UseCatalogPFN', True )
if useCatalogPFN:
pfn = replicas[storageElementName]
else:
res = self.getPfnForLfn( lfn, storageElementName )
pfn = res.get( 'Value', {} ).get( 'Successful', {} ).get( lfn, replicas[storageElementName] )
pfnDict[pfn] = lfn
else:
errStr = "_callReplicaSEFcn: File hasn't got replica at supplied Storage Element."
self.log.error( errStr, "%s %s" % ( lfn, storageElementName ) )
retDict["Failed"][lfn] = errStr
# # call StorageElement function at least
res = self._callStorageElementFcn( storageElementName, pfnDict.keys(), method, argsDict )
# # check result
if not res["OK"]:
errStr = "_callReplicaSEFcn: Failed to execute %s StorageElement method." % method
self.log.error( errStr, res["Message"] )
return res
# # filter out failed nad successful
for pfn, pfnRes in res["Value"]["Successful"].items():
retDict["Successful"][pfnDict[pfn]] = pfnRes
for pfn, errorMessage in res["Value"]["Failed"].items():
retDict["Failed"][pfnDict[pfn]] = errorMessage
return S_OK( retDict )
def getReplicaIsFile( self, lfn, storageElementName, singleFile = False ):
""" determine whether the supplied lfns are files at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn, "isFile" )
def getReplicaSize( self, lfn, storageElementName, singleFile = False ):
""" get the size of files for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn, "getFileSize" )
def getReplicaAccessUrl( self, lfn, storageElementName, singleFile = False ):
""" get the access url for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn, "getAccessUrl" )
def getReplicaMetadata( self, lfn, storageElementName, singleFile = False ):
""" get the file metadata for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn, "getFileMetadata" )
def prestageReplica( self, lfn, storageElementName, lifetime = 86400, singleFile = False ):
""" issue a prestage requests for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn,
"prestageFile", argsDict = {"lifetime": lifetime} )
def getPrestageReplicaStatus( self, lfn, storageElementName, singleFile = False ):
""" This functionality is not supported.
Then what is it doing here? Not supported -> delete it!
"""
return S_ERROR( "Not supported functionality. Please use getReplicaMetadata and check the 'Cached' element." )
def pinReplica( self, lfn, storageElementName, lifetime = 86400, singleFile = False ):
""" pin the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn,
"pinFile", argsDict = {"lifetime": lifetime} )
def releaseReplica( self, lfn, storageElementName, singleFile = False ):
""" release pins for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn, "releaseFile" )
def getReplica( self, lfn, storageElementName, localPath = False, singleFile = False ):
""" copy replicas from DIRAC SE to local directory
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param mixed localPath: path in the local file system, if False, os.getcwd() will be used
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn,
"getFile", argsDict = {"localPath": localPath} )
class ReplicaManager( CatalogToStorage ):
"""
.. class:: ReplicaManager
A ReplicaManager is putting all possible StorageElement and FileCatalog functionalities togehter.
"""
def __init__( self ):
""" c'tor
:param self: self reference
"""
CatalogToStorage.__init__( self )
self.fileCatalogue = FileCatalog()
self.accountingClient = None
self.registrationProtocol = ['SRM2', 'DIP']
self.thirdPartyProtocols = ['SRM2', 'DIP']
self.resourceStatus = ResourceStatus()
self.ignoreMissingInFC = Operations().getValue( 'DataManagement/IgnoreMissingInFC', False )
def setAccountingClient( self, client ):
""" Set Accounting Client instance
"""
self.accountingClient = client
def __verifyOperationPermission( self, path ):
""" Check if we have write permission to the given directory
"""
if type( path ) in StringTypes:
paths = [ path ]
else:
paths = path
fc = FileCatalog()
res = fc.getPathPermissions( paths )
if not res['OK']:
return res
for path in paths:
if not res['Value']['Successful'].get( path, {} ).get( 'Write', False ):
return S_OK( False )
return S_OK( True )
##########################################################################
#
# These are the bulk removal methods
#
def cleanLogicalDirectory( self, lfnDir ):
""" Clean the logical directory from the catalog and storage
"""
if type( lfnDir ) in StringTypes:
lfnDir = [ lfnDir ]
retDict = { "Successful" : {}, "Failed" : {} }
for folder in lfnDir:
res = self.__cleanDirectory( folder )
if not res['OK']:
self.log.error( "Failed to clean directory.", "%s %s" % ( folder, res['Message'] ) )
retDict["Failed"][folder] = res['Message']
else:
self.log.info( "Successfully removed directory.", folder )
retDict["Successful"][folder] = res['Value']
return S_OK( retDict )
def __cleanDirectory( self, folder ):
""" delete all files from directory :folder: in FileCatalog and StorageElement
:param self: self reference
:param str folder: directory name
"""
res = self.__verifyOperationPermission( folder )
if not res['OK']:
return res
if not res['Value']:
errStr = "__cleanDirectory: Write access not permitted for this credential."
self.log.error( errStr, folder )
return S_ERROR( errStr )
res = self.__getCatalogDirectoryContents( [ folder ] )
if not res['OK']:
return res
res = self.removeFile( res['Value'].keys() + [ '%s/dirac_directory' % folder ] )
if not res['OK']:
return res
for lfn, reason in res['Value']['Failed'].items():
self.log.error( "Failed to remove file found in the catalog", "%s %s" % ( lfn, reason ) )
storageElements = gConfig.getValue( 'Resources/StorageElementGroups/SE_Cleaning_List', [] )
failed = False
for storageElement in sorted( storageElements ):
res = self.__removeStorageDirectory( folder, storageElement )
if not res['OK']:
failed = True
if failed:
return S_ERROR( "Failed to clean storage directory at all SEs" )
res = self.removeCatalogDirectory( folder, recursive = True, singleFile = True )
if not res['OK']:
return res
return S_OK()
def __removeStorageDirectory( self, directory, storageElement ):
""" delete SE directory
:param self: self reference
:param str directory: folder to be removed
:param str storageElement: DIRAC SE name
"""
self.log.info( 'Removing the contents of %s at %s' % ( directory, storageElement ) )
res = self.getPfnForLfn( [directory], storageElement )
if not res['OK']:
self.log.error( "Failed to get PFN for directory", res['Message'] )
return res
for directory, error in res['Value']['Failed'].items():
self.log.error( 'Failed to obtain directory PFN from LFN', '%s %s' % ( directory, error ) )
if res['Value']['Failed']:
return S_ERROR( 'Failed to obtain directory PFN from LFNs' )
storageDirectory = res['Value']['Successful'].values()[0]
res = self.getStorageFileExists( storageDirectory, storageElement, singleFile = True )
if not res['OK']:
self.log.error( "Failed to obtain existance of directory", res['Message'] )
return res
exists = res['Value']
if not exists:
self.log.info( "The directory %s does not exist at %s " % ( directory, storageElement ) )
return S_OK()
res = self.removeStorageDirectory( storageDirectory, storageElement, recursive = True, singleDirectory = True )
if not res['OK']:
self.log.error( "Failed to remove storage directory", res['Message'] )
return res
self.log.info( "Successfully removed %d files from %s at %s" % ( res['Value']['FilesRemoved'],
directory,
storageElement ) )
return S_OK()
def __getCatalogDirectoryContents( self, directories ):
""" ls recursively all files in directories
:param self: self reference
:param list directories: folder names
"""
self.log.info( 'Obtaining the catalog contents for %d directories:' % len( directories ) )
for directory in directories:
self.log.info( directory )
activeDirs = directories
allFiles = {}
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
res = self.getCatalogListDirectory( currentDir, singleFile = True )
activeDirs.remove( currentDir )
if not res['OK'] and res['Message'].endswith( 'The supplied path does not exist' ):
self.log.info( "The supplied directory %s does not exist" % currentDir )
elif not res['OK']:
self.log.error( 'Failed to get directory contents', '%s %s' % ( currentDir, res['Message'] ) )
else:
dirContents = res['Value']
activeDirs.extend( dirContents['SubDirs'] )
allFiles.update( dirContents['Files'] )
self.log.info( "Found %d files" % len( allFiles ) )
return S_OK( allFiles )
def getReplicasFromDirectory( self, directory ):
""" get all replicas from a given directory
:param self: self reference
:param mixed directory: list of directories or one directory
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
res = self.__getCatalogDirectoryContents( directories )
if not res['OK']:
return res
allReplicas = {}
for lfn, metadata in res['Value'].items():
allReplicas[lfn] = metadata['Replicas']
return S_OK( allReplicas )
def getFilesFromDirectory( self, directory, days = 0, wildcard = '*' ):
""" get all files from :directory: older than :days: days matching to :wildcard:
:param self: self reference
:param mixed directory: list of directories or directory name
:param int days: ctime days
:param str wildcard: pattern to match
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
self.log.info( "Obtaining the files older than %d days in %d directories:" % ( days, len( directories ) ) )
for folder in directories:
self.log.info( folder )
activeDirs = directories
allFiles = []
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
# We only need the metadata (verbose) if a limit date is given
res = self.getCatalogListDirectory( currentDir, verbose = ( days != 0 ), singleFile = True )
activeDirs.remove( currentDir )
if not res['OK']:
self.log.error( "Error retrieving directory contents", "%s %s" % ( currentDir, res['Message'] ) )
else:
dirContents = res['Value']
subdirs = dirContents['SubDirs']
files = dirContents['Files']
self.log.info( "%s: %d files, %d sub-directories" % ( currentDir, len( files ), len( subdirs ) ) )
for subdir in subdirs:
if ( not days ) or self.__isOlderThan( subdirs[subdir]['CreationDate'], days ):
if subdir[0] != '/':
subdir = currentDir + '/' + subdir
activeDirs.append( subdir )
for fileName in files:
fileInfo = files[fileName]
fileInfo = fileInfo.get( 'Metadata', fileInfo )
if ( not days ) or not fileInfo.get( 'CreationDate' ) or self.__isOlderThan( fileInfo['CreationDate'], days ):
if wildcard == '*' or fnmatch.fnmatch( fileName, wildcard ):
fileName = fileInfo.get( 'LFN', fileName )
allFiles.append( fileName )
return S_OK( allFiles )
def __isOlderThan( self, stringTime, days ):
timeDelta = timedelta( days = days )
maxCTime = datetime.utcnow() - timeDelta
# st = time.strptime( stringTime, "%a %b %d %H:%M:%S %Y" )
# cTimeStruct = datetime( st[0], st[1], st[2], st[3], st[4], st[5], st[6], None )
cTimeStruct = stringTime
if cTimeStruct < maxCTime:
return True
return False
##########################################################################
#
# These are the data transfer methods
#
def getFile( self, lfn, destinationDir = '' ):
""" Get a local copy of a LFN from Storage Elements.
'lfn' is the logical file name for the desired file
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "getFile: Supplied lfn must be string or list of strings."
self.log.error( errStr )
return S_ERROR( errStr )
self.log.verbose( "getFile: Attempting to get %s files." % len( lfns ) )
res = self.getActiveReplicas( lfns )
if not res['OK']:
return res
failed = res['Value']['Failed']
lfnReplicas = res['Value']['Successful']
res = self.getCatalogFileMetadata( lfnReplicas.keys() )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
fileMetadata = res['Value']['Successful']
successful = {}
for lfn in fileMetadata:
res = self.__getFile( lfn, lfnReplicas[lfn], fileMetadata[lfn], destinationDir )
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = res['Value']
return S_OK( { 'Successful': successful, 'Failed' : failed } )
def __getFile( self, lfn, replicas, metadata, destinationDir ):
if not replicas:
self.log.error( "No accessible replicas found" )
return S_ERROR( "No accessible replicas found" )
# Determine the best replicas
res = self._getSEProximity( replicas.keys() )
if not res['OK']:
return res
for storageElementName in res['Value']:
physicalFile = replicas[storageElementName]
# print '__getFile', physicalFile, replicas[storageElementName]
res = self.getStorageFile( physicalFile,
storageElementName,
localPath = os.path.realpath( destinationDir ),
singleFile = True )
if not res['OK']:
self.log.error( "Failed to get %s from %s" % ( lfn, storageElementName ), res['Message'] )
else:
localFile = os.path.realpath( os.path.join( destinationDir, os.path.basename( lfn ) ) )
localAdler = fileAdler( localFile )
if ( metadata['Size'] != res['Value'] ):
self.log.error( "Size of downloaded file (%d) does not match catalog (%d)" % ( res['Value'],
metadata['Size'] ) )
elif ( metadata['Checksum'] ) and ( not compareAdler( metadata['Checksum'], localAdler ) ):
self.log.error( "Checksum of downloaded file (%s) does not match catalog (%s)" % ( localAdler,
metadata['Checksum'] ) )
else:
return S_OK( localFile )
self.log.error( "getFile: Failed to get local copy from any replicas.", lfn )
return S_ERROR( "ReplicaManager.getFile: Failed to get local copy from any replicas." )
def _getSEProximity( self, ses ):
""" get SE proximity """
siteName = DIRAC.siteName()
localSEs = [se for se in getSEsForSite( siteName )['Value'] if se in ses]
countrySEs = []
countryCode = str( siteName ).split( '.' )[-1]
res = getSEsForCountry( countryCode )
if res['OK']:
countrySEs = [se for se in res['Value'] if se in ses and se not in localSEs]
sortedSEs = randomize( localSEs ) + randomize( countrySEs )
sortedSEs += randomize( [se for se in ses if se not in sortedSEs] )
return S_OK( sortedSEs )
def putAndRegister( self, lfn, fileName, diracSE, guid = None, path = None, checksum = None, catalog = None, ancestors = None ):
""" Put a local file to a Storage Element and register in the File Catalogues
'lfn' is the file LFN
'file' is the full path to the local file
'diracSE' is the Storage Element to which to put the file
'guid' is the guid with which the file is to be registered (if not provided will be generated)
'path' is the path on the storage where the file will be put (if not provided the LFN will be used)
"""
ancestors = ancestors if ancestors else list()
res = self.__verifyOperationPermission( os.path.dirname( lfn ) )
if not res['OK']:
return res
if not res['Value']:
errStr = "putAndRegister: Write access not permitted for this credential."
self.log.error( errStr, lfn )
return S_ERROR( errStr )
# Instantiate the desired file catalog
if catalog:
self.fileCatalogue = FileCatalog( catalog )
if not self.fileCatalogue.isOK():
return S_ERROR( "Can't get FileCatalog %s" % catalog )
else:
self.fileCatalogue = FileCatalog()
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "putAndRegister: Supplied file does not exist."
self.log.error( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "putAndRegister: Supplied file is zero size."
self.log.error( errStr, fileName )
return S_ERROR( errStr )
# If the GUID is not given, generate it here
if not guid:
guid = makeGuid( fileName )
if not checksum:
self.log.info( "putAndRegister: Checksum information not provided. Calculating adler32." )
checksum = fileAdler( fileName )
self.log.info( "putAndRegister: Checksum calculated to be %s." % checksum )
res = self.fileCatalogue.exists( {lfn:guid} )
if not res['OK']:
errStr = "putAndRegister: Completely failed to determine existence of destination LFN."
self.log.error( errStr, lfn )
return res
if lfn not in res['Value']['Successful']:
errStr = "putAndRegister: Failed to determine existence of destination LFN."
self.log.error( errStr, lfn )
return S_ERROR( errStr )
if res['Value']['Successful'][lfn]:
if res['Value']['Successful'][lfn] == lfn:
errStr = "putAndRegister: The supplied LFN already exists in the File Catalog."
self.log.error( errStr, lfn )
else:
errStr = "putAndRegister: This file GUID already exists for another file. " \
"Please remove it and try again."
self.log.error( errStr, res['Value']['Successful'][lfn] )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Successful'][lfn] ) )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "putAndRegister: The storage element is not currently valid."
self.log.error( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
destinationSE = storageElement.getStorageElementName()['Value']
res = storageElement.getPfnForLfn( lfn )
if not res['OK'] or lfn not in res['Value']['Successful']:
errStr = "putAndRegister: Failed to generate destination PFN."
self.log.error( errStr, res.get( 'Message', res.get( 'Value', {} ).get( 'Failed', {} ).get( lfn ) ) )
return S_ERROR( errStr )
destPfn = res['Value']['Successful'][lfn]
fileDict = {destPfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
oDataOperation = self.__initialiseAccountingObject( 'putAndRegister', diracSE, 1 )
oDataOperation.setStartTime()
oDataOperation.setValueByKey( 'TransferSize', size )
startTime = time.time()
res = storageElement.putFile( fileDict, singleFile = True )
putTime = time.time() - startTime
oDataOperation.setValueByKey( 'TransferTime', putTime )
if not res['OK']:
errStr = "putAndRegister: Failed to put file to Storage Element."
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.info( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
self.log.error( errStr, "%s: %s" % ( fileName, res['Message'] ) )
return S_ERROR( "%s %s" % ( errStr, res['Message'] ) )
successful[lfn] = {'put': putTime}
###########################################################
# Perform the registration here
oDataOperation.setValueByKey( 'RegistrationTotal', 1 )
fileTuple = ( lfn, destPfn, size, destinationSE, guid, checksum )
registerDict = {'LFN':lfn, 'PFN':destPfn, 'Size':size, 'TargetSE':destinationSE, 'GUID':guid, 'Addler':checksum}
startTime = time.time()
res = self.registerFile( fileTuple, catalog = catalog )
registerTime = time.time() - startTime
oDataOperation.setValueByKey( 'RegistrationTime', registerTime )
if not res['OK']:
errStr = "putAndRegister: Completely failed to register file."
self.log.error( errStr, res['Message'] )
failed[lfn] = { 'register' : registerDict }
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
elif lfn in res['Value']['Failed']:
errStr = "putAndRegister: Failed to register file."
self.log.error( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
failed[lfn] = { 'register' : registerDict }
else:
successful[lfn]['register'] = registerTime
oDataOperation.setValueByKey( 'RegistrationOK', 1 )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.info( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
return S_OK( {'Successful': successful, 'Failed': failed } )
def replicateAndRegister( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' , catalog = '' ):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
successful = {}
failed = {}
self.log.verbose( "replicateAndRegister: Attempting to replicate %s to %s." % ( lfn, destSE ) )
startReplication = time.time()
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
replicationTime = time.time() - startReplication
if not res['OK']:
errStr = "ReplicaManager.replicateAndRegister: Completely failed to replicate file."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
if not res['Value']:
# The file was already present at the destination SE
self.log.info( "replicateAndRegister: %s already present at %s." % ( lfn, destSE ) )
successful[lfn] = { 'replicate' : 0, 'register' : 0 }
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
successful[lfn] = { 'replicate' : replicationTime }
destPfn = res['Value']['DestPfn']
destSE = res['Value']['DestSE']
self.log.verbose( "replicateAndRegister: Attempting to register %s at %s." % ( destPfn, destSE ) )
replicaTuple = ( lfn, destPfn, destSE )
startRegistration = time.time()
res = self.registerReplica( replicaTuple, catalog = catalog )
registrationTime = time.time() - startRegistration
if not res['OK']:
# Need to return to the client that the file was replicated but not registered
errStr = "replicateAndRegister: Completely failed to register replica."
self.log.error( errStr, res['Message'] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
else:
if lfn in res['Value']['Successful']:
self.log.info( "replicateAndRegister: Successfully registered replica." )
successful[lfn]['register'] = registrationTime
else:
errStr = "replicateAndRegister: Failed to register replica."
self.log.info( errStr, res['Value']['Failed'][lfn] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
return S_OK( {'Successful': successful, 'Failed': failed} )
def replicate( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' ):
""" Replicate a LFN to a destination SE without registering the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
self.log.verbose( "replicate: Attempting to replicate %s to %s." % ( lfn, destSE ) )
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
if not res['OK']:
errStr = "replicate: Replication failed."
self.log.error( errStr, "%s %s" % ( lfn, destSE ) )
return res
if not res['Value']:
# The file was already present at the destination SE
self.log.info( "replicate: %s already present at %s." % ( lfn, destSE ) )
return res
return S_OK( lfn )
def __replicate( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' ):
""" Replicate a LFN to a destination SE.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
"""
###########################################################
# Check that we have write permissions to this directory.
res = self.__verifyOperationPermission( lfn )
if not res['OK']:
return res
if not res['Value']:
errStr = "__replicate: Write access not permitted for this credential."
self.log.error( errStr, lfn )
return S_ERROR( errStr )
self.log.verbose( "__replicate: Performing replication initialization." )
res = self.__initializeReplication( lfn, sourceSE, destSE )
if not res['OK']:
self.log.error( "__replicate: Replication initialisation failed.", lfn )
return res
destStorageElement = res['Value']['DestStorage']
lfnReplicas = res['Value']['Replicas']
destSE = res['Value']['DestSE']
catalogueSize = res['Value']['CatalogueSize']
###########################################################
# If the LFN already exists at the destination we have nothing to do
if destSE in lfnReplicas:
self.log.info( "__replicate: LFN is already registered at %s." % destSE )
return S_OK()
###########################################################
# Resolve the best source storage elements for replication
self.log.verbose( "__replicate: Determining the best source replicas." )
res = self.__resolveBestReplicas( lfn, sourceSE, lfnReplicas, catalogueSize )
if not res['OK']:
self.log.error( "__replicate: Best replica resolution failed.", lfn )
return res
replicaPreference = res['Value']
###########################################################
# Now perform the replication for the file
if destPath:
destPath = '%s/%s' % ( destPath, os.path.basename( lfn ) )
else:
destPath = lfn
res = destStorageElement.getPfnForLfn( destPath )
if not res['OK'] or destPath not in res['Value']['Successful']:
errStr = "__replicate: Failed to generate destination PFN."
self.log.error( errStr, res.get( 'Message', res.get( 'Value', {} ).get( 'Failed', {} ).get( destPath ) ) )
return S_ERROR( errStr )
destPfn = res['Value']['Successful'][destPath]
# Find out if there is a replica already at the same site
localReplicas = []
otherReplicas = []
for sourceSE, sourcePfn in replicaPreference:
if sourcePfn == destPfn:
continue
res = isSameSiteSE( sourceSE, destSE )
if res['OK'] and res['Value']:
localReplicas.append( ( sourceSE, sourcePfn ) )
else:
otherReplicas.append( ( sourceSE, sourcePfn ) )
replicaPreference = localReplicas + otherReplicas
for sourceSE, sourcePfn in replicaPreference:
self.log.verbose( "__replicate: Attempting replication from %s to %s." % ( sourceSE, destSE ) )
fileDict = {destPfn:sourcePfn}
if sourcePfn == destPfn:
continue
localFile = ''
#FIXME: this should not be hardcoded!!!
if sourcePfn.find( 'srm' ) == -1 or destPfn.find( 'srm' ) == -1:
# No third party transfer is possible, we have to replicate through the local cache
localDir = '.'
if localCache:
localDir = localCache
self.getFile( lfn, localDir )
localFile = os.path.join( localDir, os.path.basename( lfn ) )
fileDict = {destPfn:localFile}
res = destStorageElement.replicateFile( fileDict, catalogueSize, singleFile = True )
if localFile and os.path.exists( localFile ):
os.remove( localFile )
if res['OK']:
self.log.info( "__replicate: Replication successful." )
resDict = {'DestSE':destSE, 'DestPfn':destPfn}
return S_OK( resDict )
else:
errStr = "__replicate: Replication failed."
self.log.error( errStr, "%s from %s to %s." % ( lfn, sourceSE, destSE ) )
##########################################################
# If the replication failed for all sources give up
errStr = "__replicate: Failed to replicate with all sources."
self.log.error( errStr, lfn )
return S_ERROR( errStr )
def __initializeReplication( self, lfn, sourceSE, destSE ):
# Horrible, but kept to not break current log messages
logStr = "__initializeReplication:"
###########################################################
# Check the sourceSE if specified
self.log.verbose( "%s: Determining whether source Storage Element is sane." % logStr )
if sourceSE:
if not self.__SEActive( sourceSE ).get( 'Value', {} ).get( 'Read' ):
infoStr = "%s Supplied source Storage Element is not currently allowed for Read." % ( logStr )
self.log.info( infoStr, sourceSE )
return S_ERROR( infoStr )
###########################################################
# Check that the destination storage element is sane and resolve its name
self.log.verbose( "%s Verifying dest StorageElement validity (%s)." % ( logStr, destSE ) )
destStorageElement = StorageElement( destSE )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "%s The storage element is not currently valid." % logStr
self.log.error( errStr, "%s %s" % ( destSE, res['Message'] ) )
return S_ERROR( errStr )
destSE = destStorageElement.getStorageElementName()['Value']
self.log.info( "%s Destination Storage Element verified." % logStr )
###########################################################
# Check whether the destination storage element is banned
self.log.verbose( "%s Determining whether %s ( destination ) is Write-banned." % ( logStr, destSE ) )
if not self.__SEActive( destSE ).get( 'Value', {} ).get( 'Write' ):
infoStr = "%s Supplied destination Storage Element is not currently allowed for Write." % ( logStr )
self.log.info( infoStr, destSE )
return S_ERROR( infoStr )
###########################################################
# Get the LFN replicas from the file catalogue
self.log.verbose( "%s Attempting to obtain replicas for %s." % ( logStr, lfn ) )
res = self.getReplicas( lfn )
if not res[ 'OK' ]:
errStr = "%s Completely failed to get replicas for LFN." % logStr
self.log.error( errStr, "%s %s" % ( lfn, res['Message'] ) )
return res
if lfn not in res['Value']['Successful']:
errStr = "%s Failed to get replicas for LFN." % logStr
self.log.error( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Failed'][lfn] ) )
self.log.info( "%s Successfully obtained replicas for LFN." % logStr )
lfnReplicas = res['Value']['Successful'][lfn]
###########################################################
# Check the file is at the sourceSE
self.log.verbose( "%s: Determining whether source Storage Element is sane." % logStr )
if sourceSE and sourceSE not in lfnReplicas:
errStr = "%s LFN does not exist at supplied source SE." % logStr
self.log.error( errStr, "%s %s" % ( lfn, sourceSE ) )
return S_ERROR( errStr )
###########################################################
# If the file catalogue size is zero fail the transfer
self.log.verbose( "%s Attempting to obtain size for %s." % ( logStr, lfn ) )
res = self.getFileSize( lfn )
if not res['OK']:
errStr = "%s Completely failed to get size for LFN." % logStr
self.log.error( errStr, "%s %s" % ( lfn, res['Message'] ) )
return res
if lfn not in res['Value']['Successful']:
errStr = "%s Failed to get size for LFN." % logStr
self.log.error( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Failed'][lfn] ) )
catalogueSize = res['Value']['Successful'][lfn]
if catalogueSize == 0:
errStr = "%s Registered file size is 0." % logStr
self.log.error( errStr, lfn )
return S_ERROR( errStr )
self.log.info( "%s File size determined to be %s." % ( logStr, catalogueSize ) )
###########################################################
# Check whether the destination storage element is banned
self.log.verbose( "%s Determining whether %s ( destination ) is Write-banned." % ( logStr, destSE ) )
usableDestSE = self.resourceStatus.isUsableStorage( destSE, 'WriteAccess' )
if not usableDestSE:
infoStr = "%s Destination Storage Element is currently unusable for Write" % logStr
self.log.info( infoStr, destSE )
return S_ERROR( infoStr )
self.log.info( "%s Destination site not banned for Write." % logStr )
###########################################################
# Check whether the supplied source SE is sane
self.log.verbose( "%s: Determining whether source Storage Element is sane." % logStr )
if sourceSE:
usableSourceSE = self.resourceStatus.isUsableStorage( sourceSE, 'ReadAccess' )
if sourceSE not in lfnReplicas:
errStr = "%s LFN does not exist at supplied source SE." % logStr
self.log.error( errStr, "%s %s" % ( lfn, sourceSE ) )
return S_ERROR( errStr )
elif not usableSourceSE:
infoStr = "%s Supplied source Storage Element is currently unusable for Read." % logStr
self.log.info( infoStr, sourceSE )
return S_ERROR( infoStr )
self.log.info( "%s Replication initialization successful." % logStr )
resDict = {
'DestStorage' : destStorageElement,
'DestSE' : destSE,
'Replicas' : lfnReplicas,
'CatalogueSize' : catalogueSize
}
return S_OK( resDict )
def __resolveBestReplicas( self, lfn, sourceSE, lfnReplicas, catalogueSize ):
""" find best replicas """
###########################################################
# Determine the best replicas (remove banned sources, invalid storage elements and file with the wrong size)
logStr = "__resolveBestReplicas:"
replicaPreference = []
for diracSE, pfn in lfnReplicas.items():
if sourceSE and diracSE != sourceSE:
self.log.info( "%s %s replica not requested." % ( logStr, diracSE ) )
continue
usableDiracSE = self.resourceStatus.isUsableStorage( diracSE, 'ReadAccess' )
if not usableDiracSE:
self.log.info( "%s %s is currently unusable as a source." % ( logStr, diracSE ) )
# elif diracSE in bannedSources:
# self.log.info( "__resolveBestReplicas: %s is currently banned as a source." % diracSE )
else:
self.log.info( "%s %s is available for use." % ( logStr, diracSE ) )
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "%s The storage element is not currently valid." % logStr
self.log.error( errStr, "%s %s" % ( diracSE, res['Message'] ) )
else:
#useCatalogPFN = Operations().getValue( 'DataManagement/UseCatalogPFN', True )
#if not useCatalogPFN:
# pfn = storageElement.getPfnForLfn( lfn ).get( 'Value', pfn )
if storageElement.getRemoteProtocols()['Value']:
self.log.verbose( "%s Attempting to get source pfns for remote protocols." % logStr )
res = returnSingleResult( storageElement.getPfnForProtocol( pfn, self.thirdPartyProtocols ) )
if res['OK']:
sourcePfn = res['Value']
self.log.verbose( "%s Attempting to get source file size." % logStr )
res = storageElement.getFileSize( sourcePfn )
if res['OK']:
if sourcePfn in res['Value']['Successful']:
sourceFileSize = res['Value']['Successful'][sourcePfn]
self.log.info( "%s Source file size determined to be %s." % ( logStr, sourceFileSize ) )
if catalogueSize == sourceFileSize:
fileTuple = ( diracSE, sourcePfn )
replicaPreference.append( fileTuple )
else:
errStr = "%s Catalogue size and physical file size mismatch." % logStr
self.log.error( errStr, "%s %s" % ( diracSE, sourcePfn ) )
else:
errStr = "%s Failed to get physical file size." % logStr
self.log.error( errStr, "%s %s: %s" % ( sourcePfn, diracSE, res['Value']['Failed'][sourcePfn] ) )
else:
errStr = "%s Completely failed to get physical file size." % logStr
self.log.error( errStr, "%s %s: %s" % ( sourcePfn, diracSE, res['Message'] ) )
else:
errStr = "%s Failed to get PFN for replication for StorageElement." % logStr
self.log.error( errStr, "%s %s" % ( diracSE, res['Message'] ) )
else:
errStr = "%s Source Storage Element has no remote protocols." % logStr
self.log.info( errStr, diracSE )
if not replicaPreference:
errStr = "%s Failed to find any valid source Storage Elements." % logStr
self.log.error( errStr )
return S_ERROR( errStr )
else:
return S_OK( replicaPreference )
###################################################################
#
# These are the file catalog write methods
#
def registerFile( self, fileTuple, catalog = '' ):
""" Register a file or a list of files
:param self: self reference
:param tuple fileTuple: (lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum )
:param str catalog: catalog name
"""
if type( fileTuple ) == ListType:
fileTuples = fileTuple
elif type( fileTuple ) == TupleType:
fileTuples = [fileTuple]
else:
errStr = "registerFile: Supplied file info must be tuple of list of tuples."
self.log.error( errStr )
return S_ERROR( errStr )
self.log.verbose( "registerFile: Attempting to register %s files." % len( fileTuples ) )
res = self.__registerFile( fileTuples, catalog )
if not res['OK']:
errStr = "registerFile: Completely failed to register files."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
return res
def __registerFile( self, fileTuples, catalog ):
""" register file to cataloge """
seDict = {}
fileDict = {}
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuples:
if storageElementName:
seDict.setdefault( storageElementName, [] ).append( ( lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum ) )
else:
# If no SE name, this could be just registration in a dummy catalog like LHCb bookkeeping
fileDict[lfn] = {'PFN':'', 'Size':fileSize, 'SE':storageElementName, 'GUID':fileGuid, 'Checksum':checksum}
failed = {}
for storageElementName, fileTuple in seDict.items():
destStorageElement = StorageElement( storageElementName )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "__registerFile: The storage element is not currently valid."
self.log.error( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuple:
failed[lfn] = errStr
else:
storageElementName = destStorageElement.getStorageElementName()['Value']
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuple:
res = returnSingleResult( destStorageElement.getPfnForProtocol( physicalFile, self.registrationProtocol, withPort = False ) )
if not res['OK']:
pfn = physicalFile
else:
pfn = res['Value']
# tuple = ( lfn, pfn, fileSize, storageElementName, fileGuid, checksum )
fileDict[lfn] = {'PFN':pfn, 'Size':fileSize, 'SE':storageElementName, 'GUID':fileGuid, 'Checksum':checksum}
self.log.verbose( "__registerFile: Resolved %s files for registration." % len( fileDict ) )
if catalog:
fileCatalog = FileCatalog( catalog )
if not fileCatalog.isOK():
return S_ERROR( "Can't get FileCatalog %s" % catalog )
res = fileCatalog.addFile( fileDict )
else:
res = self.fileCatalogue.addFile( fileDict )
if not res['OK']:
errStr = "__registerFile: Completely failed to register files."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def registerReplica( self, replicaTuple, catalog = '' ):
""" Register a replica (or list of) supplied in the replicaTuples.
'replicaTuple' is a tuple or list of tuples of the form (lfn,pfn,se)
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [ replicaTuple ]
else:
errStr = "registerReplica: Supplied file info must be tuple of list of tuples."
self.log.error( errStr )
return S_ERROR( errStr )
self.log.verbose( "registerReplica: Attempting to register %s replicas." % len( replicaTuples ) )
res = self.__registerReplica( replicaTuples, catalog )
if not res['OK']:
errStr = "registerReplica: Completely failed to register replicas."
self.log.error( errStr, res['Message'] )
return res
def __registerReplica( self, replicaTuples, catalog ):
""" register replica to catalogue """
seDict = {}
for lfn, pfn, storageElementName in replicaTuples:
seDict.setdefault( storageElementName, [] ).append( ( lfn, pfn ) )
failed = {}
replicaTuples = []
for storageElementName, replicaTuple in seDict.items():
destStorageElement = StorageElement( storageElementName )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "__registerReplica: The storage element is not currently valid."
self.log.error( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
for lfn, pfn in replicaTuple:
failed[lfn] = errStr
else:
storageElementName = destStorageElement.getStorageElementName()['Value']
for lfn, pfn in replicaTuple:
res = returnSingleResult( destStorageElement.getPfnForProtocol( pfn, self.registrationProtocol, withPort = False ) )
if not res['OK']:
failed[lfn] = res['Message']
else:
replicaTuple = ( lfn, res['Value'], storageElementName, False )
replicaTuples.append( replicaTuple )
self.log.verbose( "__registerReplica: Successfully resolved %s replicas for registration." % len( replicaTuples ) )
# HACK!
replicaDict = {}
for lfn, pfn, se, _master in replicaTuples:
replicaDict[lfn] = {'SE':se, 'PFN':pfn}
if catalog:
fileCatalog = FileCatalog( catalog )
res = fileCatalog.addReplica( replicaDict )
else:
res = self.fileCatalogue.addReplica( replicaDict )
if not res['OK']:
errStr = "__registerReplica: Completely failed to register replicas."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
###################################################################
#
# These are the removal methods for physical and catalogue removal
#
def removeFile( self, lfn, force = None ):
""" Remove the file (all replicas) from Storage Elements and file catalogue
'lfn' is the file to be removed
"""
if force is None:
force = self.ignoreMissingInFC
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeFile: Supplied lfns must be string or list of strings."
self.log.error( errStr )
return S_ERROR( errStr )
# First check if the file exists in the FC
res = self.fileCatalogue.exists( lfns )
if not res['OK']:
return res
success = res['Value']['Successful']
lfns = [lfn for lfn in success if success[lfn] ]
if force:
# Files that don't exist are removed successfully
successful = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], True )
failed = {}
else:
successful = {}
failed = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], 'No such file or directory' )
# Check that we have write permissions to this directory.
if lfns:
res = self.__verifyOperationPermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removeFile: Write access not permitted for this credential."
self.log.error( errStr, lfns )
return S_ERROR( errStr )
self.log.verbose( "removeFile: Attempting to remove %s files from Storage and Catalogue. Get replicas first" % len( lfns ) )
res = self.fileCatalogue.getReplicas( lfns, True )
if not res['OK']:
errStr = "ReplicaManager.removeFile: Completely failed to get replicas for lfns."
self.log.error( errStr, res['Message'] )
return res
lfnDict = res['Value']['Successful']
for lfn, reason in res['Value'].get( 'Failed', {} ).items():
# Ignore files missing in FC if force is set
if reason == 'No such file or directory' and force:
successful[lfn] = True
elif reason == 'File has zero replicas':
lfnDict[lfn] = {}
else:
failed[lfn] = reason
res = self.__removeFile( lfnDict )
if not res['OK']:
errStr = "removeFile: Completely failed to remove files."
self.log.error( errStr, res['Message'] )
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
gDataStoreClient.commit()
return S_OK( resDict )
def __removeFile( self, lfnDict ):
""" remove file """
storageElementDict = {}
# # sorted and reversed
for lfn, repDict in sorted( lfnDict.items(), reverse = True ):
for se, pfn in repDict.items():
storageElementDict.setdefault( se, [] ).append( ( lfn, pfn ) )
failed = {}
successful = {}
for storageElementName in sorted( storageElementDict ):
fileTuple = storageElementDict[storageElementName]
res = self.__removeReplica( storageElementName, fileTuple )
if not res['OK']:
errStr = res['Message']
for lfn, pfn in fileTuple:
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
else:
for lfn, errStr in res['Value']['Failed'].items():
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
completelyRemovedFiles = []
for lfn in [lfn for lfn in lfnDict if lfn not in failed]:
completelyRemovedFiles.append( lfn )
if completelyRemovedFiles:
res = self.fileCatalogue.removeFile( completelyRemovedFiles )
if not res['OK']:
for lfn in completelyRemovedFiles:
failed[lfn] = "Failed to remove file from the catalog: %s" % res['Message']
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplica( self, storageElementName, lfn ):
""" Remove replica at the supplied Storage Element from Storage Element then file catalogue
'storageElementName' is the storage where the file is to be removed
'lfn' is the file to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplica: Supplied lfns must be string or list of strings."
self.log.error( errStr )
return S_ERROR( errStr )
# Check that we have write permissions to this directory.
res = self.__verifyOperationPermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removaReplica: Write access not permitted for this credential."
self.log.error( errStr, lfns )
return S_ERROR( errStr )
self.log.verbose( "removeReplica: Will remove catalogue entry for %s lfns at %s." % ( len( lfns ),
storageElementName ) )
res = self.fileCatalogue.getReplicas( lfns, True )
if not res['OK']:
errStr = "removeReplica: Completely failed to get replicas for lfns."
self.log.error( errStr, res['Message'] )
return res
failed = res['Value']['Failed']
successful = {}
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
elif len( repDict ) == 1:
# The file has only a single replica so don't remove
self.log.error( "The replica you are trying to remove is the only one.", "%s @ %s" % ( lfn,
storageElementName ) )
failed[lfn] = "Failed to remove sole replica"
else:
replicaTuples.append( ( lfn, repDict[storageElementName] ) )
res = self.__removeReplica( storageElementName, replicaTuples )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
gDataStoreClient.commit()
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def __removeReplica( self, storageElementName, fileTuple ):
""" remove replica """
lfnDict = {}
failed = {}
for lfn, pfn in fileTuple:
res = self.__verifyOperationPermission( lfn )
if not res['OK'] or not res['Value']:
errStr = "__removeReplica: Write access not permitted for this credential."
self.log.error( errStr, lfn )
failed[lfn] = errStr
else:
# This is the PFN as in hte FC
lfnDict[lfn] = pfn
# Now we should use the constructed PFNs if needed, for the physical removal
# Reverse lfnDict into pfnDict with required PFN
if self.useCatalogPFN:
pfnDict = dict( zip( lfnDict.values(), lfnDict.keys() ) )
else:
pfnDict = dict( [ ( self.getPfnForLfn( lfn, storageElementName )['Value'].get( 'Successful', {} ).get( lfn, lfnDict[lfn] ), lfn ) for lfn in lfnDict] )
# removePhysicalReplicas is called with real PFN list
res = self.__removePhysicalReplica( storageElementName, pfnDict.keys() )
if not res['OK']:
errStr = "__removeReplica: Failed to remove catalog replicas."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
failed.update( dict( [( pfnDict[pfn], error ) for pfn, error in res['Value']['Failed'].items()] ) )
# Here we use the FC PFN...
replicaTuples = [( pfnDict[pfn], lfnDict[pfnDict[lfn]], storageElementName ) for pfn in res['Value']['Successful']]
res = self.__removeCatalogReplica( replicaTuples )
if not res['OK']:
errStr = "__removeReplica: Completely failed to remove physical files."
self.log.error( errStr, res['Message'] )
failed.update( dict.fromkeys( [lfn for lfn, _pfn, _se in replicaTuples if lfn not in failed], res['Message'] ) )
successful = {}
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplicaFromCatalog( self, storageElementName, lfn ):
""" remove :lfn: replica from :storageElementName: SE
:param self: self reference
:param str storageElementName: SE name
:param mixed lfn: a single LFN or list of LFNs
"""
# Remove replica from the file catalog 'lfn' are the file
# to be removed 'storageElementName' is the storage where the file is to be removed
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplicaFromCatalog: Supplied lfns must be string or list of strings."
self.log.error( errStr )
return S_ERROR( errStr )
self.log.verbose( "removeReplicaFromCatalog: Will remove catalogue entry for %s lfns at %s." % \
( len( lfns ), storageElementName ) )
res = self.getCatalogReplicas( lfns, allStatus = True )
if not res['OK']:
errStr = "removeReplicaFromCatalog: Completely failed to get replicas for lfns."
self.log.error( errStr, res['Message'] )
return res
failed = {}
successful = {}
for lfn, reason in res['Value']['Failed'].items():
if reason in ( 'No such file or directory', 'File has zero replicas' ):
successful[lfn] = True
else:
failed[lfn] = reason
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
replicaTuples.append( ( lfn, repDict[storageElementName], storageElementName ) )
self.log.verbose( "removeReplicaFromCatalog: Resolved %s pfns for catalog removal at %s." % ( len( replicaTuples ),
storageElementName ) )
res = self.__removeCatalogReplica( replicaTuples )
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def removeCatalogPhysicalFileNames( self, replicaTuple ):
""" Remove replicas from the file catalog specified by replica tuple
'replicaTuple' is a tuple containing the replica to be removed and is of the form ( lfn, pfn, se )
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [replicaTuple]
else:
errStr = "removeCatalogPhysicalFileNames: Supplied info must be tuple or list of tuples."
self.log.error( errStr )
return S_ERROR( errStr )
return self.__removeCatalogReplica( replicaTuples )
def __removeCatalogReplica( self, replicaTuple ):
""" remove replica form catalogue """
oDataOperation = self.__initialiseAccountingObject( 'removeCatalogReplica', '', len( replicaTuple ) )
oDataOperation.setStartTime()
start = time.time()
# HACK!
replicaDict = {}
for lfn, pfn, se in replicaTuple:
replicaDict[lfn] = {'SE':se, 'PFN':pfn}
res = self.fileCatalogue.removeReplica( replicaDict )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'RegistrationTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'RegistrationOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removeCatalogReplica: Completely failed to remove replica."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
success = res['Value']['Successful']
if success:
self.log.info( "__removeCatalogReplica: Removed %d replicas" % len( success ) )
for lfn in success:
self.log.debug( "__removeCatalogReplica: Successfully removed replica.", lfn )
for lfn, error in res['Value']['Failed'].items():
self.log.error( "__removeCatalogReplica: Failed to remove replica.", "%s %s" % ( lfn, error ) )
oDataOperation.setValueByKey( 'RegistrationOK', len( success ) )
gDataStoreClient.addRegister( oDataOperation )
return res
def removePhysicalReplica( self, storageElementName, lfn ):
""" Remove replica from Storage Element.
'lfn' are the files to be removed
'storageElementName' is the storage where the file is to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removePhysicalReplica: Supplied lfns must be string or list of strings."
self.log.error( errStr )
return S_ERROR( errStr )
# Check that we have write permissions to this directory.
res = self.__verifyOperationPermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removePhysicalReplica: Write access not permitted for this credential."
self.log.error( errStr, lfns )
return S_ERROR( errStr )
self.log.verbose( "removePhysicalReplica: Attempting to remove %s lfns at %s." % ( len( lfns ),
storageElementName ) )
self.log.verbose( "removePhysicalReplica: Attempting to resolve replicas." )
res = self.getReplicas( lfns )
if not res['OK']:
errStr = "removePhysicalReplica: Completely failed to get replicas for lfns."
self.log.error( errStr, res['Message'] )
return res
failed = res['Value']['Failed']
successful = {}
pfnDict = {}
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
sePfn = repDict[storageElementName]
pfnDict[sePfn] = lfn
self.log.verbose( "removePhysicalReplica: Resolved %s pfns for removal at %s." % ( len( pfnDict ),
storageElementName ) )
res = self.__removePhysicalReplica( storageElementName, pfnDict.keys() )
for pfn, error in res['Value']['Failed'].items():
failed[pfnDict[pfn]] = error
for pfn in res['Value']['Successful']:
successful[pfnDict[pfn]] = True
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
def __removePhysicalReplica( self, storageElementName, pfnsToRemove ):
""" remove replica from storage element """
self.log.verbose( "__removePhysicalReplica: Attempting to remove %s pfns at %s." % ( len( pfnsToRemove ),
storageElementName ) )
storageElement = StorageElement( storageElementName )
res = storageElement.isValid()
if not res['OK']:
errStr = "__removePhysicalReplica: The storage element is not currently valid."
self.log.error( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
return S_ERROR( errStr )
oDataOperation = self.__initialiseAccountingObject( 'removePhysicalReplica',
storageElementName,
len( pfnsToRemove ) )
oDataOperation.setStartTime()
start = time.time()
res = storageElement.removeFile( pfnsToRemove )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'TransferTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removePhysicalReplica: Failed to remove replicas."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
else:
for surl, value in res['Value']['Failed'].items():
if 'No such file or directory' in value:
res['Value']['Successful'][surl] = surl
res['Value']['Failed'].pop( surl )
for surl in res['Value']['Successful']:
ret = returnSingleResult( storageElement.getPfnForProtocol( surl, self.registrationProtocol, withPort = False ) )
if not ret['OK']:
res['Value']['Successful'][surl] = surl
else:
res['Value']['Successful'][surl] = ret['Value']
oDataOperation.setValueByKey( 'TransferOK', len( res['Value']['Successful'] ) )
gDataStoreClient.addRegister( oDataOperation )
infoStr = "__removePhysicalReplica: Successfully issued accounting removal request."
self.log.verbose( infoStr )
return res
#########################################################################
#
# File transfer methods
#
def put( self, lfn, fileName, diracSE, path = None ):
""" Put a local file to a Storage Element
:param self: self reference
:param str lfn: LFN
:param str fileName: the full path to the local file
:param str diracSE: the Storage Element to which to put the file
:param str path: the path on the storage where the file will be put (if not provided the LFN will be used)
"""
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "put: Supplied file does not exist."
self.log.error( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "put: Supplied file is zero size."
self.log.error( errStr, fileName )
return S_ERROR( errStr )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "put: The storage element is not currently valid."
self.log.error( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
res = storageElement.getPfnForLfn( lfn )
if not res['OK']or lfn not in res['Value']['Successful']:
errStr = "put: Failed to generate destination PFN."
self.log.error( errStr, res.get( 'Message', res.get( 'Value', {} ).get( 'Failed', {} ).get( lfn ) ) )
return S_ERROR( errStr )
destPfn = res['Value']['Successful'][lfn]
fileDict = {destPfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
startTime = time.time()
res = storageElement.putFile( fileDict, singleFile = True )
putTime = time.time() - startTime
if not res['OK']:
errStr = "put: Failed to put file to Storage Element."
failed[lfn] = res['Message']
self.log.error( errStr, "%s: %s" % ( fileName, res['Message'] ) )
else:
self.log.info( "put: Put file to storage in %s seconds." % putTime )
successful[lfn] = destPfn
resDict = {'Successful': successful, 'Failed':failed}
return S_OK( resDict )
# def removeReplica(self,lfn,storageElementName,singleFile=False):
# def putReplica(self,lfn,storageElementName,singleFile=False):
# def replicateReplica(self,lfn,size,storageElementName,singleFile=False):
def getActiveReplicas( self, lfns ):
""" Get all the replicas for the SEs which are in Active status for reading.
"""
res = self.getReplicas( lfns, allStatus = False )
if not res['OK']:
return res
replicas = res['Value']
return self.checkActiveReplicas( replicas )
def checkActiveReplicas( self, replicaDict ):
""" Check a replica dictionary for active replicas
"""
if type( replicaDict ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict ) )
for key in [ 'Successful', 'Failed' ]:
if not key in replicaDict:
return S_ERROR( 'Missing key "%s" in replica dictionary' % key )
if type( replicaDict[key] ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict[key] ) )
seReadStatus = {}
for lfn, replicas in replicaDict['Successful'].items():
if type( replicas ) != DictType:
del replicaDict['Successful'][ lfn ]
replicaDict['Failed'][lfn] = 'Wrong replica info'
continue
for se in replicas.keys():
if se not in seReadStatus:
res = self.getSEStatus( se )
if res['OK']:
seReadStatus[se] = res['Value']['Read']
else:
seReadStatus[se] = False
if not seReadStatus[se]:
replicas.pop( se )
return S_OK( replicaDict )
def getSEStatus( self, se ):
""" check is SE is active """
result = StorageFactory().getStorageName( se )
if not result['OK']:
return S_ERROR( 'SE not known' )
resolvedName = result['Value']
res = self.resourceStatus.getStorageElementStatus( resolvedName, default = None )
if not res[ 'OK' ]:
return S_ERROR( 'SE not known' )
seStatus = { 'Read' : True, 'Write' : True }
if res['Value'][se].get( 'ReadAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Read' ] = False
if res['Value'][se].get( 'WriteAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Write' ] = False
return S_OK( seStatus )
def __initialiseAccountingObject( self, operation, se, files ):
""" create accouting record """
accountingDict = {}
accountingDict['OperationType'] = operation
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get( 'username', 'unknown' )
accountingDict['User'] = userName
accountingDict['Protocol'] = 'ReplicaManager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = se
accountingDict['TransferTotal'] = files
accountingDict['TransferOK'] = files
accountingDict['TransferSize'] = files
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = DIRAC.siteName()
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict( accountingDict )
return oDataOperation
##########################################
#
# Defunct methods only there before checking backward compatability
#
def onlineRetransfer( self, storageElementName, physicalFile ):
""" Requests the online system to re-transfer files
'storageElementName' is the storage element where the file should be removed from
'physicalFile' is the physical files
"""
return self._callStorageElementFcn( storageElementName, physicalFile, 'retransferOnlineFile' )
def getReplicas( self, lfns, allStatus = True ):
""" get replicas from catalogue """
res = self.getCatalogReplicas( lfns, allStatus = allStatus )
if not self.useCatalogPFN:
if res['OK']:
se_lfn = {}
catalogReplicas = res['Value']['Successful']
# We group the query to getPfnForLfn by storage element to gain in speed
for lfn in catalogReplicas:
for se in catalogReplicas[lfn]:
se_lfn.setdefault( se, [] ).append( lfn )
for se in se_lfn:
succPfn = self.getPfnForLfn( se_lfn[se], se ).get( 'Value', {} ).get( 'Successful', {} )
for lfn in succPfn:
# catalogReplicas still points res["value"]["Successful"] so res will be updated
catalogReplicas[lfn][se] = succPfn[lfn]
return res
def getFileSize( self, lfn ):
""" get file size from catalogue """
return self.getCatalogFileSize( lfn )
| gpl-3.0 | -7,696,063,295,908,667,000 | 43.10457 | 157 | 0.646042 | false |
level12/blazeweb | blazeweb/pytest_plugin.py | 2 | 1203 | def pytest_addoption(parser):
parser.addoption("--blazeweb_package", action="store",
help="blazeweb-package: app module to run for tests")
parser.addoption("--blazeweb_profile", action="store", default="Test",
help="blazeweb-profile: app settings profile to use (default is Test)")
def pytest_configure(config):
from blazeutils import tolist
from blazeweb.events import signal
from blazeweb.globals import ag, settings
from blazeweb.hierarchy import findobj
from blazeweb.scripting import load_current_app
_, _, _, wsgiapp = load_current_app(config.getoption('blazeweb_package'),
config.getoption('blazeweb_profile'))
# make the app available to the tests
ag.wsgi_test_app = wsgiapp
# an application can define functions to be called after the app
# is initialized but before any test inspection is done or tests
# are ran. We call those functions here:
for callstring in tolist(settings.testing.init_callables):
tocall = findobj(callstring)
tocall()
# we also support events for pre-test setup
signal('blazeweb.pre_test_init').send()
| bsd-3-clause | 7,284,290,946,232,201,000 | 41.964286 | 92 | 0.671654 | false |
matrix-org/synapse | tests/storage/test_devices.py | 1 | 5336 | # Copyright 2016-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import synapse.api.errors
from tests.unittest import HomeserverTestCase
class DeviceStoreTestCase(HomeserverTestCase):
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
def test_store_new_device(self):
self.get_success(
self.store.store_device("user_id", "device_id", "display_name")
)
res = self.get_success(self.store.get_device("user_id", "device_id"))
self.assertDictContainsSubset(
{
"user_id": "user_id",
"device_id": "device_id",
"display_name": "display_name",
},
res,
)
def test_get_devices_by_user(self):
self.get_success(
self.store.store_device("user_id", "device1", "display_name 1")
)
self.get_success(
self.store.store_device("user_id", "device2", "display_name 2")
)
self.get_success(
self.store.store_device("user_id2", "device3", "display_name 3")
)
res = self.get_success(self.store.get_devices_by_user("user_id"))
self.assertEqual(2, len(res.keys()))
self.assertDictContainsSubset(
{
"user_id": "user_id",
"device_id": "device1",
"display_name": "display_name 1",
},
res["device1"],
)
self.assertDictContainsSubset(
{
"user_id": "user_id",
"device_id": "device2",
"display_name": "display_name 2",
},
res["device2"],
)
def test_count_devices_by_users(self):
self.get_success(
self.store.store_device("user_id", "device1", "display_name 1")
)
self.get_success(
self.store.store_device("user_id", "device2", "display_name 2")
)
self.get_success(
self.store.store_device("user_id2", "device3", "display_name 3")
)
res = self.get_success(self.store.count_devices_by_users())
self.assertEqual(0, res)
res = self.get_success(self.store.count_devices_by_users(["unknown"]))
self.assertEqual(0, res)
res = self.get_success(self.store.count_devices_by_users(["user_id"]))
self.assertEqual(2, res)
res = self.get_success(
self.store.count_devices_by_users(["user_id", "user_id2"])
)
self.assertEqual(3, res)
def test_get_device_updates_by_remote(self):
device_ids = ["device_id1", "device_id2"]
# Add two device updates with a single stream_id
self.get_success(
self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"])
)
# Get all device updates ever meant for this remote
now_stream_id, device_updates = self.get_success(
self.store.get_device_updates_by_remote("somehost", -1, limit=100)
)
# Check original device_ids are contained within these updates
self._check_devices_in_updates(device_ids, device_updates)
def _check_devices_in_updates(self, expected_device_ids, device_updates):
"""Check that an specific device ids exist in a list of device update EDUs"""
self.assertEqual(len(device_updates), len(expected_device_ids))
received_device_ids = {
update["device_id"] for edu_type, update in device_updates
}
self.assertEqual(received_device_ids, set(expected_device_ids))
def test_update_device(self):
self.get_success(
self.store.store_device("user_id", "device_id", "display_name 1")
)
res = self.get_success(self.store.get_device("user_id", "device_id"))
self.assertEqual("display_name 1", res["display_name"])
# do a no-op first
self.get_success(self.store.update_device("user_id", "device_id"))
res = self.get_success(self.store.get_device("user_id", "device_id"))
self.assertEqual("display_name 1", res["display_name"])
# do the update
self.get_success(
self.store.update_device(
"user_id", "device_id", new_display_name="display_name 2"
)
)
# check it worked
res = self.get_success(self.store.get_device("user_id", "device_id"))
self.assertEqual("display_name 2", res["display_name"])
def test_update_unknown_device(self):
exc = self.get_failure(
self.store.update_device(
"user_id", "unknown_device_id", new_display_name="display_name 2"
),
synapse.api.errors.StoreError,
)
self.assertEqual(404, exc.value.code)
| apache-2.0 | 3,141,021,267,772,333,600 | 34.573333 | 88 | 0.587706 | false |
JQIamo/artiq | artiq/coredevice/comm_moninj.py | 1 | 2537 | import asyncio
import logging
import struct
from enum import Enum
__all__ = ["TTLProbe", "TTLOverride", "CommMonInj"]
logger = logging.getLogger(__name__)
class TTLProbe(Enum):
level = 0
oe = 1
class TTLOverride(Enum):
en = 0
level = 1
oe = 2
class CommMonInj:
def __init__(self, monitor_cb, injection_status_cb, disconnect_cb=None):
self.monitor_cb = monitor_cb
self.injection_status_cb = injection_status_cb
self.disconnect_cb = disconnect_cb
async def connect(self, host, port=1383):
self._reader, self._writer = await asyncio.open_connection(host, port)
try:
self._writer.write(b"ARTIQ moninj\n")
self._receive_task = asyncio.ensure_future(self._receive_cr())
except:
self._writer.close()
del self._reader
del self._writer
raise
async def close(self):
self.disconnect_cb = None
try:
self._receive_task.cancel()
try:
await asyncio.wait_for(self._receive_task, None)
except asyncio.CancelledError:
pass
finally:
self._writer.close()
del self._reader
del self._writer
def monitor(self, enable, channel, probe):
packet = struct.pack(">bblb", 0, enable, channel, probe)
self._writer.write(packet)
def inject(self, channel, override, value):
packet = struct.pack(">blbb", 1, channel, override, value)
self._writer.write(packet)
def get_injection_status(self, channel, override):
packet = struct.pack(">blb", 2, channel, override)
self._writer.write(packet)
async def _receive_cr(self):
try:
while True:
ty = await self._reader.read(1)
if not ty:
return
if ty == b"\x00":
payload = await self._reader.read(9)
channel, probe, value = struct.unpack(">lbl", payload)
self.monitor_cb(channel, probe, value)
elif ty == b"\x01":
payload = await self._reader.read(6)
channel, override, value = struct.unpack(">lbb", payload)
self.injection_status_cb(channel, override, value)
else:
raise ValueError("Unknown packet type", ty)
finally:
if self.disconnect_cb is not None:
self.disconnect_cb()
| lgpl-3.0 | 838,825,745,491,158,500 | 29.202381 | 78 | 0.546709 | false |
hzlf/openbroadcast.ch | app/media_embed/utils.py | 1 | 1679 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
EMBED_SERVICE_PROVIDERS = [
# Video
'Youtube',
'Vimeo',
# Audio
]
def process_provider_url(url, exclude_providers=[]):
provider = None
object_id = None
# youtube
if not 'youtube' in exclude_providers:
if '//youtube.com' in url or '//www.youtube.com' in url or '//youtu.be' in url:
provider = 'youtube'
object_id = get_youtube_id_by_url(url)
# vimeo
if not 'vimeo' in exclude_providers:
if '//vimeo.com' in url:
provider = 'vimeo'
object_id = get_vimeo_id_by_url(url)
return provider, object_id
def get_youtube_id_by_url(url):
"""
examples:
- http://youtu.be/SA2iWivDJiE
- http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu
- http://www.youtube.com/embed/SA2iWivDJiE
- http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US
"""
query = urlparse(url)
if query.hostname == 'youtu.be':
return query.path[1:]
if query.hostname in ('www.youtube.com', 'youtube.com', 'm.youtube.com'):
if query.path == '/watch':
p = parse_qs(query.query)
return p['v'][0]
if query.path[:7] == '/embed/':
return query.path.split('/')[2]
if query.path[:3] == '/v/':
return query.path.split('/')[2]
return None
def get_vimeo_id_by_url(url):
"""
examples:
- https://vimeo.com/178240219
"""
query = urlparse(url)
return query.path.split('/')[1]
| gpl-3.0 | 296,941,797,632,779,200 | 23.691176 | 87 | 0.57832 | false |
prats226/python-amazon-product-api-0.2.8 | tests/utils.py | 1 | 2424 |
from lxml import objectify
import re
try: # make it python2.4/2.5 compatible!
from urlparse import urlparse, parse_qs
except ImportError: # pragma: no cover
from urlparse import urlparse
from cgi import parse_qs
def convert_camel_case(operation):
"""
Converts ``CamelCaseOperationName`` into ``python_style_method_name``.
"""
return re.sub('([a-z])([A-Z])', r'\1_\2', operation).lower()
def extract_operations_from_wsdl(path):
"""
Extracts operations from Amazon's WSDL file.
"""
root = objectify.parse(open(path)).getroot()
wsdlns = 'http://schemas.xmlsoap.org/wsdl/'
return set(root.xpath('//ws:operation/@name', namespaces={'ws' : wsdlns}))
#: list of changeable and/or sensitive (thus ignorable) request arguments
IGNORABLE_ARGUMENTS = ('Signature', 'AWSAccessKeyId', 'Timestamp', 'AssociateTag')
def arguments_from_cached_xml(xml):
"""
Extracts request arguments from cached response file. (Almost) any request
sent to the API will be answered with an XML response containing the
arguments originally used in XML elements ::
<OperationRequest>
<Arguments>
<Argument Name="Service" Value="AWSECommerceService"/>
<Argument Name="Signature" Value="XXXXXXXXXXXXXXX"/>
<Argument Name="Operation" Value="BrowseNodeLookup"/>
<Argument Name="BrowseNodeId" Value="927726"/>
<Argument Name="AWSAccessKeyId" Value="XXXXXXXXXXXXXXX"/>
<Argument Name="Timestamp" Value="2010-10-15T22:09:00Z"/>
<Argument Name="Version" Value="2009-10-01"/>
</Arguments>
</OperationRequest>
"""
root = objectify.fromstring(xml).getroottree().getroot()
return dict((arg.get('Name'), arg.get('Value'))
for arg in root.OperationRequest.Arguments.Argument
if arg.get('Name') not in IGNORABLE_ARGUMENTS)
def arguments_from_url(url):
"""
Extracts request arguments from URL.
"""
params = parse_qs(urlparse(url).query)
for key, val in params.items():
# turn everything into unicode
if type(val) == list:
val = map(lambda x: unicode(x, encoding='utf-8'), val)
# reduce lists to single value
if type(val) == list and len(val) == 1:
params[key] = val[0]
if key in IGNORABLE_ARGUMENTS:
del params[key]
return params
| bsd-3-clause | 8,023,360,274,834,195,000 | 35.727273 | 82 | 0.636964 | false |
imiyoo2010/mitmproxy | doc-src/index.py | 1 | 2510 | import os, sys, datetime
import countershape
from countershape import Page, Directory, PythonModule, markup, model
import countershape.template
sys.path.insert(0, "..")
from libmproxy import filt, version
MITMPROXY_SRC = os.environ.get("MITMPROXY_SRC", os.path.abspath(".."))
ns.VERSION = version.VERSION
if ns.options.website:
ns.idxpath = "doc/index.html"
this.layout = countershape.Layout("_websitelayout.html")
else:
ns.idxpath = "index.html"
this.layout = countershape.Layout("_layout.html")
ns.title = countershape.template.Template(None, "<h1>@!this.title!@</h1>")
this.titlePrefix = "%s - " % version.NAMEVERSION
this.markup = markup.Markdown(extras=["footnotes"])
ns.docMaintainer = "Aldo Cortesi"
ns.docMaintainerEmail = "[email protected]"
ns.copyright = u"\u00a9 mitmproxy project, %s" % datetime.date.today().year
def mpath(p):
p = os.path.join(MITMPROXY_SRC, p)
return os.path.expanduser(p)
with open(mpath("README.mkd")) as f:
readme = f.read()
ns.index_contents = readme.split("\n", 1)[1] #remove first line (contains build status)
def example(s):
d = file(mpath(s)).read().rstrip()
extemp = """<div class="example">%s<div class="example_legend">(%s)</div></div>"""
return extemp%(countershape.template.Syntax("py")(d), s)
ns.example = example
filt_help = []
for i in filt.filt_unary:
filt_help.append(
("~%s"%i.code, i.help)
)
for i in filt.filt_rex:
filt_help.append(
("~%s regex"%i.code, i.help)
)
for i in filt.filt_int:
filt_help.append(
("~%s int"%i.code, i.help)
)
filt_help.sort()
filt_help.extend(
[
("!", "unary not"),
("&", "and"),
("|", "or"),
("(...)", "grouping"),
]
)
ns.filt_help = filt_help
def nav(page, current, state):
if current.match(page, False):
pre = '<li class="active">'
else:
pre = "<li>"
p = state.application.getPage(page)
return pre + '<a href="%s">%s</a></li>'%(model.UrlTo(page), p.title)
ns.nav = nav
ns.navbar = countershape.template.File(None, "_nav.html")
pages = [
Page("index.html", "Introduction"),
Page("install.html", "Installation"),
Page("mitmproxy.html", "mitmproxy"),
Page("mitmdump.html", "mitmdump"),
Page("howmitmproxy.html", "How mitmproxy works"),
Page("ssl.html", "Overview"),
Directory("certinstall"),
Directory("scripting"),
Directory("tutorials"),
Page("transparent.html", "Overview"),
Directory("transparent"),
]
| mit | -5,763,400,009,474,492,000 | 27.202247 | 95 | 0.627888 | false |
e7dal/hexy | setup.py | 1 | 1766 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import imp
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('readme.txt').read()
history = open('history.txt').read().replace('.. :changelog:', '')
curr_path = os.path.dirname(os.path.realpath(__file__))
deps = os.path.join(curr_path, 'requirements.in')
dev_deps = os.path.join(curr_path, 'dev_requirements.in')
requirements = open(deps).read()
test_requirements = open(dev_deps).read()
CODE_DIRECTORY = 'hexy'
metadata = imp.load_source(
'metadata', os.path.join(CODE_DIRECTORY, 'metadata.py'))
#orderdict needed for structlog
sys_version_str='.'.join((str(s) for s in sys.version_info[0:3]))
setup(
name=metadata.package,
version=metadata.version,
author=metadata.authors[0],
author_email=metadata.emails[0],
maintainer=metadata.authors[0],
maintainer_email=metadata.emails[0],
url=metadata.url,
description=metadata.description,
long_description=readme + '\n\n' + history,
packages=[
'hexy',
'hexy.util',
'hexy.commands'
],
package_dir={'hexy':
'hexy'},
py_modules=['hexy'],
include_package_data=True,
install_requires=requirements,
license="GPL-3.0",
zip_safe=False,
keywords='hexy, ascii,hexagonal,drawing,toolkit,widgets',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: POSIX :: Linux',
'Environment :: Console',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
entry_points='''
[console_scripts]
hexy = hexy.cli:cli
'''
)
| gpl-3.0 | -8,694,179,322,663,520,000 | 25.757576 | 66 | 0.637599 | false |
evernote/pootle | pootle/core/views.py | 1 | 13622 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Zuza Software Foundation
# Copyright 2013-2014 Evernote Corporation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import json
import operator
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db.models import ObjectDoesNotExist, ProtectedError, Q
from django.forms.models import modelform_factory
from django.http import Http404, HttpResponse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.defaults import (permission_denied as django_403,
page_not_found as django_404,
server_error as django_500)
from django.views.generic import View
from pootle_misc.util import PootleJSONEncoder, ajax_required, jsonify
class SuperuserRequiredMixin(object):
"""Require users to have the `is_superuser` bit set."""
def dispatch(self, request, *args, **kwargs):
if not request.user.is_superuser:
msg = _('You do not have rights to administer Pootle.')
raise PermissionDenied(msg)
return super(SuperuserRequiredMixin, self) \
.dispatch(request, *args, **kwargs)
class LoginRequiredMixin(object):
"""Require a logged-in user."""
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class TestUserFieldMixin(LoginRequiredMixin):
"""Require a field from the URL pattern to match a field of the
current user.
The URL pattern field used for comparing against the current user
can be customized by setting the `username_field` attribute.
Note that there's free way for admins.
"""
test_user_field = 'username'
def dispatch(self, *args, **kwargs):
user = self.request.user
url_field_value = kwargs[self.test_user_field]
field_value = getattr(user, self.test_user_field, '')
can_access = user.is_superuser or str(field_value) == url_field_value
if not can_access:
raise PermissionDenied(_('You cannot access this page.'))
return super(TestUserFieldMixin, self).dispatch(*args, **kwargs)
class NoDefaultUserMixin(object):
"""Removes the `default` special user from views."""
def dispatch(self, request, *args, **kwargs):
username = kwargs.get('username', None)
if username is not None and username == 'default':
raise Http404
return super(NoDefaultUserMixin, self) \
.dispatch(request, *args, **kwargs)
class AjaxResponseMixin(object):
"""Mixin to add AJAX support to a form.
This needs to be used with a `FormView`.
"""
@method_decorator(ajax_required)
def dispatch(self, *args, **kwargs):
return super(AjaxResponseMixin, self).dispatch(*args, **kwargs)
def render_to_json_response(self, context, **response_kwargs):
data = jsonify(context)
response_kwargs['content_type'] = 'application/json'
return HttpResponse(data, **response_kwargs)
def form_invalid(self, form):
response = super(AjaxResponseMixin, self).form_invalid(form)
return self.render_to_json_response(form.errors, status=400)
def form_valid(self, form):
response = super(AjaxResponseMixin, self).form_valid(form)
return self.render_to_json_response({})
class APIView(View):
"""View to implement internal RESTful APIs.
Based on djangbone https://github.com/af/djangbone
"""
# Model on which this view operates. Setting this is required
model = None
# Base queryset for accessing data. If `None`, model's default manager
# will be used
base_queryset = None
# Set this to restrict the view to a subset of the available methods
restrict_to_methods = None
# Field names to be included
fields = ()
# Individual forms to use for each method. By default it'll
# auto-populate model forms built using `self.model` and `self.fields`
add_form_class = None
edit_form_class = None
# Tuple of sensitive field names that will be excluded from any
# serialized responses
sensitive_field_names = ('password', 'pw')
# Set to an integer to enable GET pagination
page_size = None
# HTTP GET parameter to use for accessing pages
page_param_name = 'p'
# HTTP GET parameter to use for search queries
search_param_name = 'q'
# Field names in which searching will be allowed
search_fields = None
# Override these if you have custom JSON encoding/decoding needs
json_encoder = PootleJSONEncoder()
json_decoder = json.JSONDecoder()
@property
def allowed_methods(self):
methods = [m for m in self.http_method_names if hasattr(self, m)]
if self.restrict_to_methods is not None:
restricted_to = map(lambda x: x.lower(), self.restrict_to_methods)
methods = filter(lambda x: x in restricted_to, methods)
return methods
def __init__(self, *args, **kwargs):
if self.model is None:
raise ValueError('No model class specified.')
self.pk_field_name = self.model._meta.pk.name
if self.base_queryset is None:
self.base_queryset = self.model._default_manager
self._init_fields()
self._init_forms()
return super(APIView, self).__init__(*args, **kwargs)
def _init_fields(self):
if len(self.fields) < 1:
form = self.add_form_class or self.edit_form_class
if form is not None:
self.fields = form._meta.fields
else: # Assume all fields by default
self.fields = (f.name for f in self.model._meta.fields)
self.serialize_fields = (f for f in self.fields if
f not in self.sensitive_field_names)
def _init_forms(self):
if 'post' in self.allowed_methods and self.add_form_class is None:
self.add_form_class = modelform_factory(self.model,
fields=self.fields)
if 'put' in self.allowed_methods and self.edit_form_class is None:
self.edit_form_class = modelform_factory(self.model,
fields=self.fields)
def dispatch(self, request, *args, **kwargs):
if request.method.lower() in self.allowed_methods:
handler = getattr(self, request.method.lower(),
self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
"""GET handler."""
if kwargs.get(self.pk_field_name, None) is not None:
return self.get_single_item(request, *args, **kwargs)
return self.get_collection(request, *args, **kwargs)
def get_single_item(self, request, *args, **kwargs):
"""Returns a single model instance."""
try:
qs = self.base_queryset.filter(pk=kwargs[self.pk_field_name])
assert len(qs) == 1
except AssertionError:
raise Http404
return self.json_response(self.serialize_qs(qs))
def get_collection(self, request, *args, **kwargs):
"""Retrieve a full collection."""
return self.json_response(self.serialize_qs(self.base_queryset))
def post(self, request, *args, **kwargs):
"""Creates a new model instance.
The form to be used can be customized by setting
`self.add_form_class`. By default a model form will be used with
the fields from `self.fields`.
"""
try:
request_dict = self.json_decoder.decode(request.body)
except ValueError:
return self.status_msg('Invalid JSON data', status=400)
form = self.add_form_class(request_dict)
if form.is_valid():
new_object = form.save()
# Serialize the new object to json using our built-in methods.
# The extra DB read here is not ideal, but it keeps the code
# DRY:
wrapper_qs = self.base_queryset.filter(pk=new_object.pk)
return self.json_response(
self.serialize_qs(wrapper_qs, single_object=True)
)
return self.form_invalid(form)
def put(self, request, *args, **kwargs):
"""Update the current model."""
if self.pk_field_name not in kwargs:
return self.status_msg('PUT is not supported for collections',
status=405)
try:
request_dict = self.json_decoder.decode(request.body)
instance = self.base_queryset.get(pk=kwargs[self.pk_field_name])
except ValueError:
return self.status_msg('Invalid JSON data', status=400)
except ObjectDoesNotExist:
raise Http404
form = self.edit_form_class(request_dict, instance=instance)
if form.is_valid():
item = form.save()
wrapper_qs = self.base_queryset.filter(id=item.id)
return self.json_response(
self.serialize_qs(wrapper_qs, single_object=True)
)
return self.form_invalid(form)
def delete(self, request, *args, **kwargs):
"""Delete the model and return its JSON representation."""
if self.pk_field_name not in kwargs:
return self.status_msg('DELETE is not supported for collections',
status=405)
qs = self.base_queryset.filter(id=kwargs[self.pk_field_name])
if qs:
output = self.serialize_qs(qs)
obj = qs[0]
try:
obj.delete()
return self.json_response(output)
except ProtectedError as e:
return self.status_msg(e[0], status=405)
raise Http404
def serialize_qs(self, queryset, single_object=False):
"""Serialize a queryset into a JSON object.
:param single_object: if `True` (or the URL specified an id), it
will return a single JSON object.
If `False`, a JSON object is returned with an array of objects
in `models` and the total object count in `count`.
"""
if single_object or self.kwargs.get(self.pk_field_name):
values = queryset.values(*self.serialize_fields)
# For single-item requests, convert ValuesQueryset to a dict simply
# by slicing the first item
serialize_values = values[0]
else:
search_keyword = self.request.GET.get(self.search_param_name, None)
if search_keyword is not None:
filter_by = self.get_search_filter(search_keyword)
queryset = queryset.filter(filter_by)
values = queryset.values(*self.serialize_fields)
# Process pagination options if they are enabled
if isinstance(self.page_size, int):
try:
page_param = self.request.GET.get(self.page_param_name, 1)
page_number = int(page_param)
offset = (page_number - 1) * self.page_size
except ValueError:
offset = 0
values = values[offset:offset+self.page_size]
serialize_values = {
'models': list(values),
'count': queryset.count(),
}
return self.json_encoder.encode(serialize_values)
def get_search_filter(self, keyword):
search_fields = getattr(self, 'search_fields', None)
if search_fields is None:
search_fields = self.fields # Assume all fields
field_queries = list(
zip(map(lambda x: '%s__icontains' % x, search_fields),
(keyword,)*len(search_fields))
)
lookups = [Q(x) for x in field_queries]
return reduce(operator.or_, lookups)
def status_msg(self, msg, status=400):
data = self.json_encoder.encode({'msg': msg})
return self.json_response(data, status=status)
def form_invalid(self, form):
data = self.json_encoder.encode({'errors': form.errors})
return self.json_response(data, status=400)
def json_response(self, output, **response_kwargs):
response_kwargs['content_type'] = 'application/json'
return HttpResponse(output, **response_kwargs)
def permission_denied(request):
return django_403(request, template_name='errors/403.html')
def page_not_found(request):
return django_404(request, template_name='errors/404.html')
def server_error(request):
return django_500(request, template_name='errors/500.html')
| gpl-2.0 | -2,622,846,277,347,944,000 | 35.132626 | 79 | 0.624284 | false |
projectatomic/atomic-reactor | tests/constants.py | 1 | 4756 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals, absolute_import
import os
from textwrap import dedent
HERE = os.path.dirname(__file__)
FILES = os.path.join(HERE, 'files')
MOCK = os.environ.get('NOMOCK') is None
INPUT_IMAGE = "busybox:latest"
DOCKERFILE_FILENAME = 'Dockerfile'
DOCKERFILE_GIT = "https://github.com/TomasTomecek/docker-hello-world.git"
DOCKERFILE_SHA1 = "6e592f1420efcd331cd28b360a7e02f669caf540"
DOCKERFILE_OK_PATH = os.path.join(FILES, 'docker-hello-world')
DOCKERFILE_MULTISTAGE_PATH = os.path.join(FILES, 'docker-hello-world-multistage')
DOCKERFILE_MULTISTAGE_SCRATCH_PATH = os.path.join(FILES, 'docker-hello-world-multistage-scratch')
DOCKERFILE_MULTISTAGE_CUSTOM_PATH = os.path.join(FILES, 'docker-hello-world-multistage-custom')
DOCKERFILE_MULTISTAGE_CUSTOM_BAD_PATH =\
os.path.join(FILES, 'docker-hello-world-multistage-custom_multiple')
DOCKERFILE_ERROR_BUILD_PATH = os.path.join(FILES, 'docker-hello-world-error-build')
SOURCE_CONFIG_ERROR_PATH = os.path.join(FILES, 'docker-hello-world-error-config')
DOCKERFILE_SUBDIR_PATH = os.path.join(FILES, 'df-in-subdir')
FLATPAK_GIT = "git://pkgs.fedoraproject.org/modules/eog.git"
FLATPAK_SHA1 = "603bb298c8fb60936590e159b7a6387d6e090a09"
SOURCE = {
'provider': 'git',
'uri': DOCKERFILE_GIT,
'provider_params': {
'git_commit': 'master',
}
}
MOCK_SOURCE = {'provider': 'git', 'uri': 'asd'}
REGISTRY_PORT = "5000"
DOCKER0_IP = "172.17.42.1"
TEST_IMAGE_NAME = "atomic-reactor-test-image:latest"
TEST_IMAGE = "atomic-reactor-test-image"
LOCALHOST_REGISTRY = "localhost:%s" % REGISTRY_PORT
DOCKER0_REGISTRY = "%s:%s" % (DOCKER0_IP, REGISTRY_PORT)
LOCALHOST_REGISTRY_HTTP = "http://%s" % LOCALHOST_REGISTRY
DOCKER0_REGISTRY_HTTP = "http://%s" % DOCKER0_REGISTRY
COMMAND = "eporeporjgpeorjgpeorjgpeorjgpeorjgpeorjg"
IMPORTED_IMAGE_ID = 'eee28534d167d7b3297eace1fc32c46aabedc40696e48ae04c7654f974700cc2'
IMAGE_RAISE_RETRYGENERATOREXCEPTION = 'registry.example.com/non-existing-parent-image'
REACTOR_CONFIG_MAP = dedent("""\
version: 1
koji:
hub_url: https://koji.example.com/hub
root_url: https://koji.example.com/root
auth:
proxyuser: proxyuser
krb_principal: krb_principal
krb_keytab_path: /tmp/krb_keytab
odcs:
api_url: https://odcs.example.com/api/1
auth:
ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
insecure: True
signing_intents:
- name: release
keys: [R123]
- name: beta
keys: [R123, B456]
- name: unsigned
keys: []
default_signing_intent: default
smtp:
host: smtp.example.com
from_address: [email protected]
error_addresses:
- [email protected]
domain: example.com
send_to_submitter: True
send_to_pkg_owner: True
arrangement_version: 6
artifacts_allowed_domains:
- download.example.com/released
- download.example.com/candidates
yum_repo_allowed_domains:
- repo1.example.com
- repo2.example.com
image_labels:
vendor: "Spam Inc."
authoritative-source-url: registry.public.example.com
distribution-scope: public
image_label_info_url_format: "https://catalog.example.com/{com.redhat.component}:{name}-{version}"
image_equal_labels:
- [description, io.k8s.description]
openshift:
url: https://openshift.example.com
auth:
enable: True
ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
insecure: True
build_json_dir: /usr/share/osbs/
group_manifests: False
platform_descriptors:
- platform: x86_64
architecture: amd64
content_versions:
- v2
registries:
- url: https://container-registry.example.com/v2
auth:
cfg_path: /var/run/secrets/atomic-reactor/v2-registry-dockercfg
- url: https://another-container-registry.example.com
insecure: True
- url: https://better-container-registry.example.com/v2
expected_media_types:
- application/json
source_registry:
url: https://registry.private.example.com
insecure: True
auth:
cfg_path: /var/run/secrets/atomic-reactor/private-registry-dockercfg
sources_command: "fedpkg sources"
required_secrets:
- kojisecret
- odcssecret
- v2-registry-dockercfg
worker_token_secrets:
- x86-64-worker-1
- x86-64-worker-2
prefer_schema1_digest: True
yum_proxy: http://proxy.example.com
hide_files:
tmpdir: /tmp
files:
- /etc/yum.repos.d/repo_ignore_1.repo
- /etc/yum.repos.d/repo_ignore_2.repo
skip_koji_check_for_base_image: False
deep_manifest_list_inspection: True
fail_on_digest_mismatch: True
clusters:
foo:
- name: blah
max_concurrent_builds: 1
""")
| bsd-3-clause | 720,660,319,337,230,600 | 25.276243 | 98 | 0.719512 | false |
simonbr73/nyc-subway-finder | interfaces.py | 1 | 4396 | import sys
class TextInterface:
"""Text-based interface that contains methods for getting input from the user and displaying search
results.
"""
def __init__(self, trains_list):
"""Initializes the TextInterface object with a list of all train objects in the simulation.
"""
self.trains = trains_list
def getUserInput(self):
"""This method prompts the user to specify a direction of travel and a station at which
they want to find trains. If a user types a direction other than n or s, the program will
ask them to provide a valid input before continuing.
"""
print "\nWelcome! This program lets you search for New York City subway trains running on the 1, 2, 3, 4, 5, 6, or S lines."
print "Note that for S trains, northbound is eastbound in real life and southbound is westbound in real life."
print "\nFirst, choose a direction - northbound or southbound. Type n for northbound or s for southbound."
# valid_input will remain False until either n or s is typed
valid_input = False
while valid_input == False:
direction = raw_input()
if direction == "n":
valid_input = True
direction = 'northbound'
elif direction == 's':
valid_input = True
direction = 'southbound'
# If you really don't like our program, you can quit by typing q
elif direction == 'q':
sys.exit()
else:
print "We didn't understand that. Please try again."
print "\nNow, search for the station you want trains from."
station = raw_input()
return direction, station
def showApproachingTrains(self, station, list_of_trains):
"""Takes 2 arguments, the station at which the user is looking for trains and a list of
trains currently approaching that station, where each item in the list is formatted
[train_index, stop_number, arrival_time]. If the list is empty, it informs the user that
no trains are near the station. Otherwise, it looks up information about each train in
the list and displays it to the user.
"""
print "..."
if len(list_of_trains) == 0:
print "Sorry, there aren't any trains currently approaching", station
else:
print "Here is a list of trains arriving at or departing from", station, "in the next 30 minutes:\n"
for train_list in list_of_trains:
train_number = train_list[0] # Used to look up train object in the master list of trains
stop_number = train_list[1]
if int(self.trains[train_number].getArrivalTime(stop_number)) <= 30:
self.trains[train_number].showInfo(stop_number)
print ""
def showStationSearchResults(self, results_list):
"""Takes 1 argument, a list of possible station results. If there is only one possible
result, this function will never be called, so it only has to handle list of length 0 or >1.
If the length of the list is 0, the program will ask the user whether they want to do
another search or quit. Otherwise, all possible results will be displayed next to a unique
integer, and the user will be asked to type in an integer to choose the station they want.
"""
print "..."
if len(results_list) == 0:
print "Sorry, we couldn't find a station with that name.\n"
self.againOrQuit()
else:
print "We found several stations with that name. Please choose one from the list below."
for i in range(len(results_list)):
print (i+1), ': ', results_list[i]
choice = int(raw_input("Type the number of the station you want: "))
return results_list[choice-1]
def againOrQuit(self):
"""Asks the user whether they want to perform a new search or quit the program.
"""
print "Type n to do a new search or q to exit the program."
choice = raw_input()
if choice == "n":
return True
if choice == "q":
return False
else:
print "We didn't understand that. Please try again."
return self.againOrQuit()
| mit | -7,588,274,881,581,400,000 | 48.954545 | 132 | 0.613285 | false |
etianen/moody-templates | src/moody/loader.py | 1 | 5355 | """A caching template loader that allows disk-based templates to be used."""
import os, sys
from abc import ABCMeta, abstractmethod
class TemplateDoesNotExist(Exception):
"""A named template could not be found."""
class Source(metaclass=ABCMeta):
"""A source of template data."""
__slots__ = ()
@abstractmethod
def load_source(self, template_name):
"""
Loads the template source code for the template of the given name.
If no source code can be found, returns None.
"""
class MemorySource(Source):
"""A template loader that loads from memory."""
__slots__ = ("templates",)
def __init__(self, templates):
"""Initializes the MemorySource from a dict of template source strings."""
self.templates = templates
def load_source(self, template_name):
"""Loads the source from the memory template dict."""
return self.templates.get(template_name)
def __str__(self):
"""Returns a string representation."""
return "<memory>"
class DirectorySource(Source):
"""A template loader that loads from a directory on disk."""
__slots__ = ("dirname")
def __init__(self, dirname):
"""
Initializes the DirectorySource.
On windows, the dirname should be specified using forward-slashes.
"""
self.dirname = dirname
def load_source(self, template_name):
"""Loads the source from disk."""
template_path = os.path.normpath(os.path.join(self.dirname, template_name))
if os.path.exists(template_path):
with open(template_path, "r") as template_file:
return template_file.read()
return None
def __str__(self):
"""Returns a string representation."""
return self.dirname
class DebugLoader:
"""
A template loader that doesn't cache compiled templates.
Terrible performance, but great for debugging.
"""
__slots__ = ("_sources", "_parser",)
def __init__(self, sources, parser):
"""
Initializes the Loader.
When specifying template_dirs on Windows,the forward slash '/' should be used as a path separator.
"""
self._sources = list(reversed(sources))
self._parser = parser
def compile(self, template, name="__string__", params=None, meta=None):
"""Compiles the given template source."""
default_meta = {
"__loader__": self
}
default_meta.update(meta or {})
return self._parser.compile(template, name, params, default_meta)
def _load_all(self, template_name):
"""Loads and returns all the named templates from the sources."""
# Load from all the template sources.
templates = []
for source in self._sources:
template_src = source.load_source(template_name)
if template_src is not None:
meta = {
"__super__": templates and templates[-1] or None,
}
templates.append(self.compile(template_src, template_name, {}, meta))
return templates
def load(self, *template_names):
"""
Loads and returns the named template.
If more than one template name is given, then the first template that exists will be used.
On Windows, the forward slash '/' should be used as a path separator.
"""
if not template_names:
raise ValueError("You must specify at least one template name.")
for template_name in template_names:
templates = self._load_all(template_name)
if templates:
return templates[-1]
# Raise an error.
template_name_str = ", ".join(repr(template_name) for template_name in template_names)
source_name_str = ", ".join(str(source) for source in self._sources)
raise TemplateDoesNotExist("Could not find a template named {} in any of {}.".format(template_name_str, source_name_str))
def render(self, *template_names, **params):
"""
Loads and renders the named template.
If more than one template name is given, then the first template that exists will be used.
On Windows, the forward slash '/' should be used as a path separator.
"""
return self.load(*template_names).render(**params)
class Loader(DebugLoader):
"""
A template loader.
Compiled templates are cached for performance.
"""
__slots__ = ("_cache",)
def __init__(self, sources, parser):
"""Initializes the loader."""
super(Loader, self).__init__(sources, parser)
self._cache = {}
def clear_cache(self, ):
"""Clears the template cache."""
self._cache.clear()
def _load_all(self, template_name):
"""A caching version of the debug loader's load method."""
if template_name in self._cache:
return self._cache[template_name]
template = super(Loader, self)._load_all(template_name)
self._cache[template_name] = template
return template | bsd-3-clause | -6,064,786,215,887,489,000 | 30.692308 | 129 | 0.580579 | false |
ewjoachim/pyler | pyler/euler_test_base.py | 1 | 2100 | import signal
import unittest
import time
from . import website as w
class EulerProblem(unittest.TestCase):
problem_id = None
def solver(self, input_val):
raise NotImplementedError()
simple_input = None
simple_output = None
real_input = None
def solve_real(self):
"""
Returns the solution of the Problem for the real input
"""
return self.solver(self.real_input)
def solve_simple(self):
"""
Returns the solution of the Problem for the simple input
"""
return self.solver(self.simple_input)
@classmethod
def setUpClass(cls):
if cls.solver is EulerProblem.solver:
raise unittest.SkipTest(
"Not running the tests for a not implemented problem")
def test_simple(self):
"""
Checks the simple example
"""
self.assertEqual(self.solve_simple(), self.simple_output)
def test_real(self):
"""
Checks the real problem against the website
"""
website = w.Website()
real_output = self.solve_real()
self.assertTrue(w.check_solution(
website, self.problem_id, solution=real_output))
# Windows has no Alarm signal. Sorry pal.
use_signal = hasattr(signal, "SIGALRM")
def test_time(self):
"""
Checks that the real problem runs under a minute
"""
time_limit = 60
try:
if self.use_signal:
def handler(signum, frame): # pylint: disable=unused-argument
raise TimeoutError()
old_handler = signal.signal(signal.SIGALRM, handler)
signal.alarm(time_limit)
before = time.time()
self.solve_real()
after = time.time()
if after - before > time_limit:
raise TimeoutError()
except TimeoutError:
self.fail("Test failed to end in less than a minute.")
finally:
if self.use_signal:
signal.signal(signal.SIGALRM, old_handler)
| mit | 8,948,021,599,226,098,000 | 26.631579 | 78 | 0.57381 | false |
almeidapaulopt/frappe | frappe/model/db_query.py | 1 | 21364 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from six import iteritems, string_types
"""build query for doclistview and return results"""
import frappe, json, copy, re
import frappe.defaults
import frappe.share
import frappe.permissions
from frappe.utils import flt, cint, getdate, get_datetime, get_time, make_filter_tuple, get_filter, add_to_date
from frappe import _
from frappe.model import optional_fields
from frappe.model.utils.user_settings import get_user_settings, update_user_settings
from datetime import datetime
class DatabaseQuery(object):
def __init__(self, doctype):
self.doctype = doctype
self.tables = []
self.conditions = []
self.or_conditions = []
self.fields = None
self.user = None
self.ignore_ifnull = False
self.flags = frappe._dict()
def execute(self, query=None, fields=None, filters=None, or_filters=None,
docstatus=None, group_by=None, order_by=None, limit_start=False,
limit_page_length=None, as_list=False, with_childnames=False, debug=False,
ignore_permissions=False, user=None, with_comment_count=False,
join='left join', distinct=False, start=None, page_length=None, limit=None,
ignore_ifnull=False, save_user_settings=False, save_user_settings_fields=False,
update=None, add_total_row=None, user_settings=None):
if not ignore_permissions and not frappe.has_permission(self.doctype, "read", user=user):
frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(self.doctype))
raise frappe.PermissionError(self.doctype)
# fitlers and fields swappable
# its hard to remember what comes first
if (isinstance(fields, dict)
or (isinstance(fields, list) and fields and isinstance(fields[0], list))):
# if fields is given as dict/list of list, its probably filters
filters, fields = fields, filters
elif fields and isinstance(filters, list) \
and len(filters) > 1 and isinstance(filters[0], string_types):
# if `filters` is a list of strings, its probably fields
filters, fields = fields, filters
if fields:
self.fields = fields
else:
self.fields = ["`tab{0}`.`name`".format(self.doctype)]
if start: limit_start = start
if page_length: limit_page_length = page_length
if limit: limit_page_length = limit
self.filters = filters or []
self.or_filters = or_filters or []
self.docstatus = docstatus or []
self.group_by = group_by
self.order_by = order_by
self.limit_start = 0 if (limit_start is False) else cint(limit_start)
self.limit_page_length = cint(limit_page_length) if limit_page_length else None
self.with_childnames = with_childnames
self.debug = debug
self.join = join
self.distinct = distinct
self.as_list = as_list
self.ignore_ifnull = ignore_ifnull
self.flags.ignore_permissions = ignore_permissions
self.user = user or frappe.session.user
self.update = update
self.user_settings_fields = copy.deepcopy(self.fields)
if user_settings:
self.user_settings = json.loads(user_settings)
if query:
result = self.run_custom_query(query)
else:
result = self.build_and_run()
if with_comment_count and not as_list and self.doctype:
self.add_comment_count(result)
if save_user_settings:
self.save_user_settings_fields = save_user_settings_fields
self.update_user_settings()
return result
def build_and_run(self):
args = self.prepare_args()
args.limit = self.add_limit()
if args.conditions:
args.conditions = "where " + args.conditions
if self.distinct:
args.fields = 'distinct ' + args.fields
query = """select %(fields)s from %(tables)s %(conditions)s
%(group_by)s %(order_by)s %(limit)s""" % args
return frappe.db.sql(query, as_dict=not self.as_list, debug=self.debug, update=self.update)
def prepare_args(self):
self.parse_args()
self.sanitize_fields()
self.extract_tables()
self.set_optional_columns()
self.build_conditions()
args = frappe._dict()
if self.with_childnames:
for t in self.tables:
if t != "`tab" + self.doctype + "`":
self.fields.append(t + ".name as '%s:name'" % t[4:-1])
# query dict
args.tables = self.tables[0]
# left join parent, child tables
for child in self.tables[1:]:
args.tables += " {join} {child} on ({child}.parent = {main}.name)".format(join=self.join,
child=child, main=self.tables[0])
if self.grouped_or_conditions:
self.conditions.append("({0})".format(" or ".join(self.grouped_or_conditions)))
args.conditions = ' and '.join(self.conditions)
if self.or_conditions:
args.conditions += (' or ' if args.conditions else "") + \
' or '.join(self.or_conditions)
self.set_field_tables()
args.fields = ', '.join(self.fields)
self.set_order_by(args)
self.validate_order_by_and_group_by(args.order_by)
args.order_by = args.order_by and (" order by " + args.order_by) or ""
self.validate_order_by_and_group_by(self.group_by)
args.group_by = self.group_by and (" group by " + self.group_by) or ""
return args
def parse_args(self):
"""Convert fields and filters from strings to list, dicts"""
if isinstance(self.fields, string_types):
if self.fields == "*":
self.fields = ["*"]
else:
try:
self.fields = json.loads(self.fields)
except ValueError:
self.fields = [f.strip() for f in self.fields.split(",")]
for filter_name in ["filters", "or_filters"]:
filters = getattr(self, filter_name)
if isinstance(filters, string_types):
filters = json.loads(filters)
if isinstance(filters, dict):
fdict = filters
filters = []
for key, value in iteritems(fdict):
filters.append(make_filter_tuple(self.doctype, key, value))
setattr(self, filter_name, filters)
def sanitize_fields(self):
'''
regex : ^.*[,();].*
purpose : The regex will look for malicious patterns like `,`, '(', ')', ';' in each
field which may leads to sql injection.
example :
field = "`DocType`.`issingle`, version()"
As field contains `,` and mysql function `version()`, with the help of regex
the system will filter out this field.
'''
regex = re.compile('^.*[,();].*')
blacklisted_keywords = ['select', 'create', 'insert', 'delete', 'drop', 'update', 'case']
blacklisted_functions = ['concat', 'concat_ws', 'if', 'ifnull', 'nullif', 'coalesce',
'connection_id', 'current_user', 'database', 'last_insert_id', 'session_user',
'system_user', 'user', 'version']
def _raise_exception():
frappe.throw(_('Cannot use sub-query or function in fields'), frappe.DataError)
for field in self.fields:
if regex.match(field):
if any(keyword in field.lower() for keyword in blacklisted_keywords):
_raise_exception()
if any("{0}(".format(keyword) in field.lower() \
for keyword in blacklisted_functions):
_raise_exception()
def extract_tables(self):
"""extract tables from fields"""
self.tables = ['`tab' + self.doctype + '`']
# add tables from fields
if self.fields:
for f in self.fields:
if ( not ("tab" in f and "." in f) ) or ("locate(" in f) or ("count(" in f):
continue
table_name = f.split('.')[0]
if table_name.lower().startswith('group_concat('):
table_name = table_name[13:]
if table_name.lower().startswith('ifnull('):
table_name = table_name[7:]
if not table_name[0]=='`':
table_name = '`' + table_name + '`'
if not table_name in self.tables:
self.append_table(table_name)
def append_table(self, table_name):
self.tables.append(table_name)
doctype = table_name[4:-1]
if (not self.flags.ignore_permissions) and (not frappe.has_permission(doctype)):
frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(doctype))
raise frappe.PermissionError(doctype)
def set_field_tables(self):
'''If there are more than one table, the fieldname must not be ambigous.
If the fieldname is not explicitly mentioned, set the default table'''
if len(self.tables) > 1:
for i, f in enumerate(self.fields):
if '.' not in f:
self.fields[i] = '{0}.{1}'.format(self.tables[0], f)
def set_optional_columns(self):
"""Removes optional columns like `_user_tags`, `_comments` etc. if not in table"""
columns = frappe.db.get_table_columns(self.doctype)
# remove from fields
to_remove = []
for fld in self.fields:
for f in optional_fields:
if f in fld and not f in columns:
to_remove.append(fld)
for fld in to_remove:
del self.fields[self.fields.index(fld)]
# remove from filters
to_remove = []
for each in self.filters:
if isinstance(each, string_types):
each = [each]
for element in each:
if element in optional_fields and element not in columns:
to_remove.append(each)
for each in to_remove:
if isinstance(self.filters, dict):
del self.filters[each]
else:
self.filters.remove(each)
def build_conditions(self):
self.conditions = []
self.grouped_or_conditions = []
self.build_filter_conditions(self.filters, self.conditions)
self.build_filter_conditions(self.or_filters, self.grouped_or_conditions)
# match conditions
if not self.flags.ignore_permissions:
match_conditions = self.build_match_conditions()
if match_conditions:
self.conditions.append("(" + match_conditions + ")")
def build_filter_conditions(self, filters, conditions, ignore_permissions=None):
"""build conditions from user filters"""
if ignore_permissions is not None:
self.flags.ignore_permissions = ignore_permissions
if isinstance(filters, dict):
filters = [filters]
for f in filters:
if isinstance(f, string_types):
conditions.append(f)
else:
conditions.append(self.prepare_filter_condition(f))
def prepare_filter_condition(self, f):
"""Returns a filter condition in the format:
ifnull(`tabDocType`.`fieldname`, fallback) operator "value"
"""
f = get_filter(self.doctype, f)
tname = ('`tab' + f.doctype + '`')
if not tname in self.tables:
self.append_table(tname)
if 'ifnull(' in f.fieldname:
column_name = f.fieldname
else:
column_name = '{tname}.{fname}'.format(tname=tname,
fname=f.fieldname)
can_be_null = True
# prepare in condition
if f.operator.lower() in ('in', 'not in'):
values = f.value or ''
if not isinstance(values, (list, tuple)):
values = values.split(",")
fallback = "''"
value = (frappe.db.escape((v or '').strip(), percent=False) for v in values)
value = '("{0}")'.format('", "'.join(value))
else:
df = frappe.get_meta(f.doctype).get("fields", {"fieldname": f.fieldname})
df = df[0] if df else None
if df and df.fieldtype in ("Check", "Float", "Int", "Currency", "Percent"):
can_be_null = False
if f.operator.lower() == 'between' and \
(f.fieldname in ('creation', 'modified') or (df and (df.fieldtype=="Date" or df.fieldtype=="Datetime"))):
value = get_between_date_filter(f.value, df)
fallback = "'0000-00-00 00:00:00'"
elif df and df.fieldtype=="Date":
value = getdate(f.value).strftime("%Y-%m-%d")
fallback = "'0000-00-00'"
elif (df and df.fieldtype=="Datetime") or isinstance(f.value, datetime):
value = get_datetime(f.value).strftime("%Y-%m-%d %H:%M:%S.%f")
fallback = "'0000-00-00 00:00:00'"
elif df and df.fieldtype=="Time":
value = get_time(f.value).strftime("%H:%M:%S.%f")
fallback = "'00:00:00'"
elif f.operator.lower() in ("like", "not like") or (isinstance(f.value, string_types) and
(not df or df.fieldtype not in ["Float", "Int", "Currency", "Percent", "Check"])):
value = "" if f.value==None else f.value
fallback = '""'
if f.operator.lower() in ("like", "not like") and isinstance(value, string_types):
# because "like" uses backslash (\) for escaping
value = value.replace("\\", "\\\\").replace("%", "%%")
else:
value = flt(f.value)
fallback = 0
# put it inside double quotes
if isinstance(value, string_types) and not f.operator.lower() == 'between':
value = '"{0}"'.format(frappe.db.escape(value, percent=False))
if (self.ignore_ifnull
or not can_be_null
or (f.value and f.operator.lower() in ('=', 'like'))
or 'ifnull(' in column_name.lower()):
condition = '{column_name} {operator} {value}'.format(
column_name=column_name, operator=f.operator,
value=value)
else:
condition = 'ifnull({column_name}, {fallback}) {operator} {value}'.format(
column_name=column_name, fallback=fallback, operator=f.operator,
value=value)
return condition
def build_match_conditions(self, as_condition=True):
"""add match conditions if applicable"""
self.match_filters = []
self.match_conditions = []
only_if_shared = False
if not self.user:
self.user = frappe.session.user
if not self.tables: self.extract_tables()
meta = frappe.get_meta(self.doctype)
role_permissions = frappe.permissions.get_role_permissions(meta, user=self.user)
self.shared = frappe.share.get_shared(self.doctype, self.user)
if not meta.istable and not role_permissions.get("read") and not self.flags.ignore_permissions:
only_if_shared = True
if not self.shared:
frappe.throw(_("No permission to read {0}").format(self.doctype), frappe.PermissionError)
else:
self.conditions.append(self.get_share_condition())
else:
# apply user permissions?
if role_permissions.get("apply_user_permissions", {}).get("read"):
# get user permissions
user_permissions = frappe.permissions.get_user_permissions(self.user)
self.add_user_permissions(user_permissions,
user_permission_doctypes=role_permissions.get("user_permission_doctypes").get("read"))
if role_permissions.get("if_owner", {}).get("read"):
self.match_conditions.append("`tab{0}`.owner = '{1}'".format(self.doctype,
frappe.db.escape(self.user, percent=False)))
if as_condition:
conditions = ""
if self.match_conditions:
# will turn out like ((blog_post in (..) and blogger in (...)) or (blog_category in (...)))
conditions = "((" + ") or (".join(self.match_conditions) + "))"
doctype_conditions = self.get_permission_query_conditions()
if doctype_conditions:
conditions += (' and ' + doctype_conditions) if conditions else doctype_conditions
# share is an OR condition, if there is a role permission
if not only_if_shared and self.shared and conditions:
conditions = "({conditions}) or ({shared_condition})".format(
conditions=conditions, shared_condition=self.get_share_condition())
return conditions
else:
return self.match_filters
def get_share_condition(self):
return """`tab{0}`.name in ({1})""".format(self.doctype, ", ".join(["'%s'"] * len(self.shared))) % \
tuple([frappe.db.escape(s, percent=False) for s in self.shared])
def add_user_permissions(self, user_permissions, user_permission_doctypes=None):
user_permission_doctypes = frappe.permissions.get_user_permission_doctypes(user_permission_doctypes, user_permissions)
meta = frappe.get_meta(self.doctype)
for doctypes in user_permission_doctypes:
match_filters = {}
match_conditions = []
# check in links
for df in meta.get_fields_to_check_permissions(doctypes):
user_permission_values = user_permissions.get(df.options, [])
cond = 'ifnull(`tab{doctype}`.`{fieldname}`, "")=""'.format(doctype=self.doctype, fieldname=df.fieldname)
if user_permission_values:
if not cint(frappe.get_system_settings("apply_strict_user_permissions")):
condition = cond + " or "
else:
condition = ""
condition += """`tab{doctype}`.`{fieldname}` in ({values})""".format(
doctype=self.doctype, fieldname=df.fieldname,
values=", ".join([('"'+frappe.db.escape(v, percent=False)+'"') for v in user_permission_values]))
else:
condition = cond
match_conditions.append("({condition})".format(condition=condition))
match_filters[df.options] = user_permission_values
if match_conditions:
self.match_conditions.append(" and ".join(match_conditions))
if match_filters:
self.match_filters.append(match_filters)
def get_permission_query_conditions(self):
condition_methods = frappe.get_hooks("permission_query_conditions", {}).get(self.doctype, [])
if condition_methods:
conditions = []
for method in condition_methods:
c = frappe.call(frappe.get_attr(method), self.user)
if c:
conditions.append(c)
return " and ".join(conditions) if conditions else None
def run_custom_query(self, query):
if '%(key)s' in query:
query = query.replace('%(key)s', 'name')
return frappe.db.sql(query, as_dict = (not self.as_list))
def set_order_by(self, args):
meta = frappe.get_meta(self.doctype)
if self.order_by:
args.order_by = self.order_by
else:
args.order_by = ""
# don't add order by from meta if a mysql group function is used without group by clause
group_function_without_group_by = (len(self.fields)==1 and
( self.fields[0].lower().startswith("count(")
or self.fields[0].lower().startswith("min(")
or self.fields[0].lower().startswith("max(")
) and not self.group_by)
if not group_function_without_group_by:
sort_field = sort_order = None
if meta.sort_field and ',' in meta.sort_field:
# multiple sort given in doctype definition
# Example:
# `idx desc, modified desc`
# will covert to
# `tabItem`.`idx` desc, `tabItem`.`modified` desc
args.order_by = ', '.join(['`tab{0}`.`{1}` {2}'.format(self.doctype,
f.split()[0].strip(), f.split()[1].strip()) for f in meta.sort_field.split(',')])
else:
sort_field = meta.sort_field or 'modified'
sort_order = (meta.sort_field and meta.sort_order) or 'desc'
args.order_by = "`tab{0}`.`{1}` {2}".format(self.doctype, sort_field or "modified", sort_order or "desc")
# draft docs always on top
if meta.is_submittable:
args.order_by = "`tab{0}`.docstatus asc, {1}".format(self.doctype, args.order_by)
def validate_order_by_and_group_by(self, parameters):
"""Check order by, group by so that atleast one column is selected and does not have subquery"""
if not parameters:
return
_lower = parameters.lower()
if 'select' in _lower and ' from ' in _lower:
frappe.throw(_('Cannot use sub-query in order by'))
for field in parameters.split(","):
if "." in field and field.strip().startswith("`tab"):
tbl = field.strip().split('.')[0]
if tbl not in self.tables:
if tbl.startswith('`'):
tbl = tbl[4:-1]
frappe.throw(_("Please select atleast 1 column from {0} to sort/group").format(tbl))
def add_limit(self):
if self.limit_page_length:
return 'limit %s, %s' % (self.limit_start, self.limit_page_length)
else:
return ''
def add_comment_count(self, result):
for r in result:
if not r.name:
continue
r._comment_count = 0
if "_comments" in r:
r._comment_count = len(json.loads(r._comments or "[]"))
def update_user_settings(self):
# update user settings if new search
user_settings = json.loads(get_user_settings(self.doctype))
if hasattr(self, 'user_settings'):
user_settings.update(self.user_settings)
if self.save_user_settings_fields:
user_settings['fields'] = self.user_settings_fields
update_user_settings(self.doctype, user_settings)
def get_order_by(doctype, meta):
order_by = ""
sort_field = sort_order = None
if meta.sort_field and ',' in meta.sort_field:
# multiple sort given in doctype definition
# Example:
# `idx desc, modified desc`
# will covert to
# `tabItem`.`idx` desc, `tabItem`.`modified` desc
order_by = ', '.join(['`tab{0}`.`{1}` {2}'.format(doctype,
f.split()[0].strip(), f.split()[1].strip()) for f in meta.sort_field.split(',')])
else:
sort_field = meta.sort_field or 'modified'
sort_order = (meta.sort_field and meta.sort_order) or 'desc'
order_by = "`tab{0}`.`{1}` {2}".format(doctype, sort_field or "modified", sort_order or "desc")
# draft docs always on top
if meta.is_submittable:
order_by = "`tab{0}`.docstatus asc, {1}".format(doctype, order_by)
return order_by
@frappe.whitelist()
def get_list(doctype, *args, **kwargs):
'''wrapper for DatabaseQuery'''
kwargs.pop('cmd', None)
return DatabaseQuery(doctype).execute(None, *args, **kwargs)
def is_parent_only_filter(doctype, filters):
#check if filters contains only parent doctype
only_parent_doctype = True
if isinstance(filters, list):
for flt in filters:
if doctype not in flt:
only_parent_doctype = False
if 'Between' in flt:
flt[3] = get_between_date_filter(flt[3])
return only_parent_doctype
def get_between_date_filter(value, df=None):
'''
return the formattted date as per the given example
[u'2017-11-01', u'2017-11-03'] => '2017-11-01 00:00:00.000000' AND '2017-11-04 00:00:00.000000'
'''
from_date = None
to_date = None
date_format = "%Y-%m-%d %H:%M:%S.%f"
if df:
date_format = "%Y-%m-%d %H:%M:%S.%f" if df.fieldtype == 'Datetime' else "%Y-%m-%d"
if value and isinstance(value, (list, tuple)):
if len(value) >= 1: from_date = value[0]
if len(value) >= 2: to_date = value[1]
if not df or (df and df.fieldtype == 'Datetime'):
to_date = add_to_date(to_date,days=1)
data = "'%s' AND '%s'" % (
get_datetime(from_date).strftime(date_format),
get_datetime(to_date).strftime(date_format))
return data
| mit | -5,802,090,927,492,263,000 | 32.43349 | 120 | 0.66921 | false |
rdkit/rdkit-orig | Code/ML/FeatureSelect/Wrap/testCMIM.py | 2 | 1730 | from rdkit.ML import FeatureSelect as FS
from rdkit import DataStructs as DS
from rdkit import RDConfig
import unittest
class TestCase(unittest.TestCase):
def setUp(self) :
pass
def test0FromList(self) :
examples = []
bv = DS.ExplicitBitVect(5)
bv.SetBitsFromList([0,2,4])
examples.append([0,bv,0])
bv = DS.ExplicitBitVect(5)
bv.SetBitsFromList([0,2,4])
examples.append([0,bv,0])
bv = DS.ExplicitBitVect(5)
bv.SetBitsFromList([0,3,4])
examples.append([0,bv,1])
bv = DS.ExplicitBitVect(5)
bv.SetBitsFromList([0,2,4])
examples.append([0,bv,0])
bv = DS.ExplicitBitVect(5)
bv.SetBitsFromList([0,2])
examples.append([0,bv,1])
r = FS.selectCMIM(examples,2)
self.failUnlessEqual(r,(2,4))
r = FS.selectCMIM(examples,1)
self.failUnlessEqual(r,(2,))
r = FS.selectCMIM(examples,3)
self.failUnlessEqual(r,(2,4,-1))
def test1FromList(self) :
examples = []
bv = DS.SparseBitVect(5)
bv.SetBitsFromList([0,2,4])
examples.append([0,bv,0])
bv = DS.SparseBitVect(5)
bv.SetBitsFromList([0,2,4])
examples.append([0,bv,0])
bv = DS.SparseBitVect(5)
bv.SetBitsFromList([0,3,4])
examples.append([0,bv,1])
bv = DS.SparseBitVect(5)
bv.SetBitsFromList([0,2,4])
examples.append([0,bv,0])
bv = DS.SparseBitVect(5)
bv.SetBitsFromList([0,2])
examples.append([0,bv,1])
r = FS.selectCMIM(examples,2)
self.failUnlessEqual(r,(2,4))
r = FS.selectCMIM(examples,1)
self.failUnlessEqual(r,(2,))
r = FS.selectCMIM(examples,3)
self.failUnlessEqual(r,(2,4,-1))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -1,545,998,874,891,628,800 | 21.763158 | 40 | 0.610405 | false |
thalesians/tsa | src/test/python/thalesians/tsa/q/testq.py | 1 | 5227 | import datetime
import unittest
import thalesians.tsa.q as kdb
class TestDataTypes(unittest.TestCase):
def setUp(self):
self.conn = kdb.q('localhost', 41822)
def testInteger(self):
self.conn.k('{[x]test::x}', (15,))
self.assertEqual(self.conn.k('test'), 15)
self.conn.k('test:2')
self.assertEqual(self.conn.k('test'), 2)
def testFloat(self):
self.conn.k('{[x]test::x}', (15.,))
self.assertEqual(self.conn.k('test'), 15.)
self.conn.k('test:2f')
self.assertEqual(self.conn.k('test'), 2.0)
def testMonth(self):
self.conn.k('{[x]test::x}', (kdb.Month(1),))
self.assertEqual(self.conn.k('test').i, kdb.Month(1).i)
self.conn.k('test:2008.09m')
self.assertEqual(str(self.conn.k('test')), '2008-09')
def testSecond(self):
self.conn.k('{[x]test::x}', (kdb.Second(61),))
self.assertEqual(self.conn.k('test').i, kdb.Second(61).i)
self.conn.k('test:00:01:01')
self.assertEqual(str(self.conn.k('test')), '00:01:01')
self.assertEqual(self.conn.k('test'), kdb.Second(61))
def testMinute(self):
self.conn.k('{[x]test::x}', (kdb.Minute(61),))
self.assertEqual(self.conn.k('test').i, kdb.Minute(61).i)
self.conn.k('test:01:01')
self.assertEqual(str(self.conn.k('test')), '01:01')
self.assertEqual(self.conn.k('test'), kdb.Minute(61))
def testDate(self):
d = datetime.date(2000,1,1)
self.conn.k('{[x;y]test::y}', [0,d])
self.assertEqual(self.conn.k('test'), d)
self.conn.k('test:2008.09.09')
self.assertEqual(str(self.conn.k('test')), '2008-09-09')
self.conn.k('test:1908.09.09')
self.assertEqual(str(self.conn.k('test')), '1908-09-09')
self.assertEqual(self.conn.k('{x}',[datetime.date(2009,7,12)]), datetime.date(2009,7,12))
def testDateTime(self):
dt = datetime.datetime(2000,1,1,12,00)
self.conn.k('{[x]test::x}', (dt,))
self.assertEqual(self.conn.k('test'), dt)
self.conn.k('{[x;y]test::y}', [0,dt])
self.assertEqual(self.conn.k('test'), dt)
self.conn.k('test:2008.09.09T01:01:01.001')
self.assertEqual(str(self.conn.k('test')), '2008-09-09 01:01:01.001000')
self.conn.k('test:1999.09.09T01:01:01.001')
self.assertEqual(str(self.conn.k('test')), '1999-09-09 01:01:01.001000')
self.conn.k('test:1908.09.13T01:01:01.005')
self.assertEqual(str(self.conn.k('test')), '1908-09-13 01:01:01.005000')
def testTime(self):
t = datetime.datetime(2000,1,1,12,00).time()
self.conn.k('{[x]test::x}', (t,))
self.assertEqual(self.conn.k('test'), t)
self.conn.k('test:01:01:01.001')
self.assertEqual(str(self.conn.k('test')), '01:01:01.001000')
self.conn.k('test:15:30:15.001')
self.assertEqual(str(self.conn.k('test')), '15:30:15.001000')
def testString(self):
string = 'teststring'
self.conn.k('{[x]test::x}', (string,))
self.assertEqual(self.conn.k('test'), string)
self.conn.k('test:`$"'+string+'"')
self.assertEqual(str(self.conn.k('test')), string)
def testChar(self):
string = ['t','e']
self.conn.k('{[x]test::x}', (string,))
self.assertEqual(self.conn.k('test'), string)
self.conn.k('test:"'+"".join(string)+'"')
self.assertEqual(self.conn.k('test'), string)
string = 't'
self.conn.k('{[x]test::x}', (string,))
self.assertEqual(self.conn.k('test'), string)
self.conn.k('test:"'+"".join(string)+'"')
self.assertEqual(self.conn.k('test'), string)
def testBlob(self):
dict = {'hello': 'world'}
self.conn.k('test:`$"'+cPickle.dumps(dict)+'"')
self.assertEqual(cPickle.loads(self.conn.k('test')), dict)
def testDict(self):
self.conn.k('test:(enlist `key)!(enlist `value)')
self.conn.k('test')
x = ['key',]
y = ['value',]
dict = kdb.Dict(x, y)
self.assertEqual(self.conn.k('test'), dict)
self.conn.k('{[x]test::x}', (dict,))
self.conn.k('test')
self.assertEqual(self.conn.k('test'), dict)
def testFlip(self):
self.conn.k('test:([]a: 1 2 3;b:`foo`bar`baz)')
self.conn.k('test')
x = ['a','b']
y = [[1, 2, 3],['foo','bar','baz']]
flip = kdb.Flip(kdb.Dict(x,y))
self.assertEqual(self.conn.k('test'), flip)
self.conn.k('{[x]test::x}',(flip,))
self.conn.k('test')
self.assertEqual(self.conn.k('test'), flip)
t = self.conn.k('select b, a from test')
x = ['b','a']
y = [['foo','bar','baz'],[1, 2, 3]]
flip = kdb.Flip(kdb.Dict(x,y))
self.assertEqual(t.x, flip.x)
def testKeyedTables(self):
self.conn.k('test:([a: `a`b`c]b:`foo`bar`baz)')
kdb.td(self.conn.k('test'))
self.conn.k('test:([a: `a`b`c;c: 1 2 3]b:`foo`bar`baz;d: `here`I`come)')
self.assertEqual(str(self.conn.k('test')), """[a,1]foo,here
[b,2]bar,I
[c,3]baz,come
""")
def tearDown(self):
self.conn.close()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -5,831,748,582,302,765,000 | 36.604317 | 97 | 0.548881 | false |
ratzeni/vcf-miner.client | setup.py | 1 | 1546 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'VERSION')) as f:
__version__ = f.read().strip()
with open(os.path.join(here, 'requirements.txt')) as f:
required = f.read().splitlines()
with open(os.path.join(here, 'README.rst')) as f:
long_description = f.read()
extra_files = [os.path.join(here, 'requirements.txt'),
os.path.join(here, 'README.rst'),
os.path.join(here, 'VERSION'),
]
AuthorInfo = (
("Atzeni Rossano", "[email protected]"),
)
setup(name = "vcfminerclient",
version=__version__,
description = "client package for VCFMiner",
long_description=long_description,
author=",".join(a[0] for a in AuthorInfo),
author_email=",".join("<%s>" % a[1] for a in AuthorInfo),
zip_safe=True,
url='https://github.com/ratzeni/vcf-miner.client',
packages=find_packages(exclude=('tests',)),
keywords='utilities',
install_requires=required,
package_data={'': extra_files},
license='MIT',
platforms="Posix; MacOS X; Windows",
classifiers=["Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Internet",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5"],
)
| mit | -9,032,565,296,916,977,000 | 31.893617 | 63 | 0.575679 | false |
marcuschia/ShaniXBMCWork | script.video.F4mProxy/lib/f4mDownloader.py | 1 | 49882 | # -*- coding: utf-8 -*-
import xml.etree.ElementTree as etree
import base64
from struct import unpack, pack
import sys
import io
import os
import time
import itertools
import xbmcaddon
import xbmc
import urllib2,urllib
import traceback
import urlparse
import posixpath
import re
import hmac
import hashlib
import binascii
import zlib
from hashlib import sha256
import cookielib
import akhds
#import youtube_dl
#from youtube_dl.utils import *
addon_id = 'script.video.F4mProxy'
selfAddon = xbmcaddon.Addon(id=addon_id)
__addonname__ = selfAddon.getAddonInfo('name')
__icon__ = selfAddon.getAddonInfo('icon')
downloadPath = xbmc.translatePath(selfAddon.getAddonInfo('profile'))#selfAddon["profile"])
F4Mversion=''
#from Crypto.Cipher import AES
value_unsafe = '%+&;#'
VALUE_SAFE = ''.join(chr(c) for c in range(33, 127)
if chr(c) not in value_unsafe)
def urlencode_param(value):
"""Minimal URL encoding for query parameter"""
return urllib.quote_plus(value, safe=VALUE_SAFE)
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return unpack('!Q', self.read(8))[0]
def read_unsigned_int(self):
return unpack('!I', self.read(4))[0]
def read_unsigned_char(self):
return unpack('!B', self.read(1))[0]
def read_string(self):
res = b''
while True:
char = self.read(1)
if char == b'\x00':
break
res+=char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read(4)
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read(real_size-header_end)
def read_asrt(self, debug=False):
version = self.read_unsigned_char()
self.read(3) # flags
quality_entry_count = self.read_unsigned_char()
quality_modifiers = []
for i in range(quality_entry_count):
quality_modifier = self.read_string()
quality_modifiers.append(quality_modifier)
segment_run_count = self.read_unsigned_int()
segments = []
#print 'segment_run_count',segment_run_count
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
#print 'segments',segments
return {'version': version,
'quality_segment_modifiers': quality_modifiers,
'segment_run': segments,
}
def read_afrt(self, debug=False):
version = self.read_unsigned_char()
self.read(3) # flags
time_scale = self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
quality_entries = []
for i in range(quality_entry_count):
mod = self.read_string()
quality_entries.append(mod)
fragments_count = self.read_unsigned_int()
#print 'fragments_count',fragments_count
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
#print 'fragments',fragments
return {'version': version,
'time_scale': time_scale,
'fragments': fragments,
'quality_entries': quality_entries,
}
def read_abst(self, debug=False):
version = self.read_unsigned_char()
self.read(3) # flags
bootstrap_info_version = self.read_unsigned_int()
streamType=self.read_unsigned_char()#self.read(1) # Profile,Live,Update,Reserved
islive=False
if (streamType & 0x20) >> 5:
islive=True
print 'LIVE',streamType,islive
time_scale = self.read_unsigned_int()
current_media_time = self.read_unsigned_long_long()
smpteTimeCodeOffset = self.read_unsigned_long_long()
movie_identifier = self.read_string()
server_count = self.read_unsigned_char()
servers = []
for i in range(server_count):
server = self.read_string()
servers.append(server)
quality_count = self.read_unsigned_char()
qualities = []
for i in range(server_count):
quality = self.read_string()
qualities.append(server)
drm_data = self.read_string()
metadata = self.read_string()
segments_count = self.read_unsigned_char()
#print 'segments_count11',segments_count
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
#print 'fragments_run_count11',fragments_run_count
fragments = []
for i in range(fragments_run_count):
# This info is only useful for the player, it doesn't give more info
# for the download process
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {'segments': segments,
'movie_identifier': movie_identifier,
'drm_data': drm_data,
'fragments': fragments,
},islive
def read_bootstrap_info(self):
"""
Read the bootstrap information from the stream,
returns a dict with the following keys:
segments: A list of dicts with the following keys
segment_run: A list of (first_segment, fragments_per_segment) tuples
"""
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info, startFromFregment=None, live=True):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
print 'segment_run_table',segment_run_table
# I've only found videos with one segment
#if len(segment_run_table['segment_run'])>1:
# segment_run_table['segment_run']=segment_run_table['segment_run'][-2:] #pick latest
frag_start = boot_info['fragments'][0]['fragments']
#print boot_info['fragments']
# sum(j for i, j in segment_run_table['segment_run'])
first_frag_number=frag_start[0]['first']
last_frag_number=frag_start[-1]['first']
if last_frag_number==0:
last_frag_number=frag_start[-2]['first']
endfragment=0
segment_to_start=None
for current in range (len(segment_run_table['segment_run'])):
seg,fregCount=segment_run_table['segment_run'][current]
#print 'segmcount',seg,fregCount
if (not live):
frag_end=last_frag_number
else:
frag_end=first_frag_number+fregCount-1
if fregCount>10000:
frag_end=last_frag_number
#if frag_end
segment_run_table['segment_run'][current]=(seg,fregCount,first_frag_number,frag_end)
if (not startFromFregment==None) and startFromFregment>=first_frag_number and startFromFregment<=frag_end:
segment_to_start=current
first_frag_number+=fregCount
# print 'current status',segment_run_table['segment_run']
#if we have no index then take the last segment
if segment_to_start==None:
segment_to_start=len(segment_run_table['segment_run'])-1
#if len(segment_run_table['segment_run'])>2:
# segment_to_start=len(segment_run_table['segment_run'])-2;
if live:
startFromFregment=segment_run_table['segment_run'][-1][3]
# if len(boot_info['fragments'][0]['fragments'])>1: #go bit back
# startFromFregment= boot_info['fragments'][0]['fragments'][-1]['first']
else:
startFromFregment= boot_info['fragments'][0]['fragments'][0]['first'] #start from begining
#if len(boot_info['fragments'][0]['fragments'])>2: #go little bit back
# startFromFregment= boot_info['fragments'][0]['fragments'][-2]['first']
#print 'startFromFregment',startFromFregment,boot_info,len(boot_info['fragments'][0]['fragments'])
#print 'segment_to_start',segment_to_start
for currentIndex in range (segment_to_start,len(segment_run_table['segment_run'])):
currentSegment=segment_run_table['segment_run'][currentIndex]
#print 'currentSegment',currentSegment
(seg,fregCount,frag_start,frag_end)=currentSegment
#print 'startFromFregment',startFromFregment,
if (not startFromFregment==None) and startFromFregment>=frag_start and startFromFregment<=frag_end:
frag_start=startFromFregment
#print 'frag_start',frag_start,frag_end
for currentFreg in range(frag_start,frag_end+1):
res.append((seg,currentFreg ))
# print 'fragmentlist',res,boot_info
return res
#totalFrags=sum(j for i, j in segment_run_table['segment_run'])
#lastSegment=segment_run_table['segment_run'][-1]
#lastSegmentStart= lastSegment[0]
#lastSegmentFragCount = lastSegment[1]
#print 'totalFrags',totalFrags
#first_frag_number = frag_start[0]['first']
#startFragOfLastSegment= first_frag_number +totalFrags - lastSegmentFragCount
#for (i, frag_number) in zip(range(1, lastSegmentFragCount+1), itertools.count(startFragOfLastSegment)):
# res.append((lastSegmentStart,frag_number )) #this was i, i am using first segement start
#return res
#segment_run_entry = segment_run_table['segment_run'][0]
#print 'segment_run_entry',segment_run_entry,segment_run_table
#n_frags = segment_run_entry[1]
#startingPoint = segment_run_entry[0]
#fragment_run_entry_table = boot_info['fragments'][0]['fragments']
#frag_entry_index = 0
#first_frag_number = fragment_run_entry_table[0]['first']
#first_frag_number=(startingPoint*n_frags) -(n_frags)+1
#print 'THENUMBERS',startingPoint,n_frags,first_frag_number
#for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
# res.append((startingPoint,frag_number )) #this was i, i am using first segement start
#return res
def join(base,url):
join = urlparse.urljoin(base,url)
url = urlparse.urlparse(join)
path = posixpath.normpath(url[2])
return urlparse.urlunparse(
(url.scheme,url.netloc,path,url.params,url.query,url.fragment)
)
def _add_ns(prop):
#print 'F4Mversion',F4Mversion
return '{http://ns.adobe.com/f4m/%s}%s' %(F4Mversion, prop)
#class ReallyQuietDownloader(youtube_dl.FileDownloader):
# def to_screen(sef, *args, **kargs):
# pass
class F4MDownloader():
"""
A downloader for f4m manifests or AdobeHDS.
"""
outputfile =''
clientHeader=None
cookieJar=cookielib.LWPCookieJar()
def __init__(self):
self.init_done=False
def getUrl(self,url, ischunkDownloading=False):
try:
post=None
print 'url',url
#openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler)
cookie_handler = urllib2.HTTPCookieProcessor(self.cookieJar)
openner = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
if post:
req = urllib2.Request(url, post)
else:
req = urllib2.Request(url)
ua_header=False
if self.clientHeader:
for n,v in self.clientHeader:
req.add_header(n,v)
if n=='User-Agent':
ua_header=True
if not ua_header:
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko')
#response = urllib2.urlopen(req)
if self.proxy and ( (not ischunkDownloading) or self.use_proxy_for_chunks ):
req.set_proxy(self.proxy, 'http')
response = openner.open(req)
data=response.read()
return data
except:
print 'Error in getUrl'
traceback.print_exc()
return None
def _write_flv_header2(self, stream):
"""Writes the FLV header and the metadata to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x01')
stream.write(b'\x00\x00\x00\x09')
# FLV File body
stream.write(b'\x00\x00\x00\x09')
def _write_flv_header(self, stream, metadata):
"""Writes the FLV header and the metadata to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x05')
stream.write(b'\x00\x00\x00\x09')
# FLV File body
stream.write(b'\x00\x00\x00\x00')
# FLVTAG
if metadata:
stream.write(b'\x12') # Script data
stream.write(pack('!L',len(metadata))[1:]) # Size of the metadata with 3 bytes
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
stream.write(metadata)
# All this magic numbers have been extracted from the output file
# produced by AdobeHDS.php (https://github.com/K-S-V/Scripts)
stream.write(b'\x00\x00\x01\x73')
def init(self, out_stream, url, proxy=None,use_proxy_for_chunks=True,g_stopEvent=None, maxbitrate=0, auth='',swf=None):
try:
self.init_done=False
self.total_frags=0
self.init_url=url
self.clientHeader=None
self.status='init'
self.proxy = proxy
self.auth=auth
#self.auth="pvtoken=exp%3D9999999999%7Eacl%3D%252f%252a%7Edata%3DZXhwPTE0MDYzMDMxMTV+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPWQxODA5MWVkYTQ4NDI3NjFjODhjOWQwY2QxNTk3YTI0MWQwOWYwNWI1N2ZmMDE0ZjcxN2QyMTVjZTJkNmJjMDQ%3D%2196e4sdLWrezE46RaCBzzP43/LEM5en2KujAosbeDimQ%3D%7Ehmac%3DACF8A1E4467676C9BCE2721CA5EFF840BD6ED1780046954039373A3B0D942ADC&hdntl=exp=1406303115~acl=%2f*~data=hdntl~hmac=4ab96fa533fd7c40204e487bfc7befaf31dd1f49c27eb1f610673fed9ff97a5f&als=0,2,0,0,0,NaN,0,0,0,37,f,52293145.57,52293155.9,t,s,GARWLHLMHNGA,2.11.3,37&hdcore=2.11.3"
if self.auth ==None or self.auth =='None' :
self.auth=''
if self.proxy and len(self.proxy)==0:
self.proxy=None
self.use_proxy_for_chunks=use_proxy_for_chunks
self.out_stream=out_stream
self.g_stopEvent=g_stopEvent
self.maxbitrate=maxbitrate
if '|' in url:
sp = url.split('|')
url = sp[0]
self.clientHeader = sp[1]
self.clientHeader= urlparse.parse_qsl(self.clientHeader)
print 'header recieved now url and headers are',url, self.clientHeader
self.status='init done'
self.url=url
self.swf=swf
#self.downloadInternal( url)
return self.preDownoload()
#os.remove(self.outputfile)
except:
traceback.print_exc()
self.status='finished'
return False
def preDownoload(self):
global F4Mversion
try:
self.seqNumber=0
self.live=False #todo find if its Live or not
man_url = self.url
url=self.url
print 'Downloading f4m manifest'
manifest = self.getUrl(man_url)#.read()
if not manifest:
return False
print len(manifest)
try:
print manifest
except: pass
self.status='manifest done'
#self.report_destination(filename)
#dl = ReallyQuietDownloader(self.ydl, {'continuedl': True, 'quiet': True, 'noprogress':True})
version_fine="xmlns=\".*?\/([0-9].*?)\""
F4Mversion =re.findall(version_fine, manifest)[0]
#print F4Mversion,_add_ns('media')
auth_patt='<pv-2.0>(.*?)<'
auth_obj =re.findall(auth_patt, manifest)
self.auth20=''
if auth_obj and len(auth_obj)>0:
self.auth20=auth_obj[0] #not doing anything for time being
print 'auth got from xml',self.auth,self.auth20
#quick for one example where the xml was wrong.
if '\"bootstrapInfoId' in manifest:
manifest=manifest.replace('\"bootstrapInfoId','\" bootstrapInfoId')
doc = etree.fromstring(manifest)
print doc
# Added the-one 05082014
# START
# Check if manifest defines a baseURL tag
baseURL_tag = doc.find(_add_ns('baseURL'))
if baseURL_tag != None:
man_url = baseURL_tag.text
url = man_url
self.url = url
print 'base url defined as: %s' % man_url
# END
try:
#formats = [(int(f.attrib.get('bitrate', -1)),f) for f in doc.findall(_add_ns('media'))]
formats=[]
for f in doc.findall(_add_ns('media')):
vtype=f.attrib.get('type', '')
if f.attrib.get('type', '')=='video' or vtype=='' :
formats.append([int(f.attrib.get('bitrate', -1)),f])
print 'format works',formats
except:
formats=[(int(0),f) for f in doc.findall(_add_ns('media'))]
#print 'formats',formats
formats = sorted(formats, key=lambda f: f[0])
if self.maxbitrate==0:
rate, media = formats[-1]
elif self.maxbitrate==-1:
rate, media = formats[0]
else: #find bitrate
brselected=None
rate, media=None,None
for r, m in formats:
if r<=self.maxbitrate:
rate, media=r,m
else:
break
if media==None:
rate, media = formats[-1]
dest_stream = self.out_stream
print 'rate selected',rate
self.metadata=None
try:
self.metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
print 'metadata stream read done'#,media.find(_add_ns('metadata')).text
#self._write_flv_header(dest_stream, metadata)
#dest_stream.flush()
except: pass
# Modified the-one 05082014
# START
# url and href can be used interchangeably
# so if url attribute is not present
# check for href attribute
try:
mediaUrl=media.attrib['url']
except:
mediaUrl=media.attrib['href']
# END
# Added the-one 05082014
# START
# if media url/href points to another f4m file
if '.f4m' in mediaUrl:
sub_f4m_url = join(man_url,mediaUrl)
print 'media points to another f4m file: %s' % sub_f4m_url
print 'Downloading f4m sub manifest'
sub_manifest = self.getUrl(sub_f4m_url)#.read()
if not sub_manifest:
return False
print len(sub_manifest)
try:
print sub_manifest
except: pass
self.status='sub manifest done'
F4Mversion =re.findall(version_fine, sub_manifest)[0]
doc = etree.fromstring(sub_manifest)
print doc
media = doc.find(_add_ns('media'))
if media == None:
return False
try:
self.metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
print 'metadata stream read done'
except: pass
try:
mediaUrl=media.attrib['url']
except:
mediaUrl=media.attrib['href']
# END
try:
bootStrapID = media.attrib['bootstrapInfoId']
except: bootStrapID='xx'
#print 'mediaUrl',mediaUrl
base_url = join(man_url,mediaUrl)#compat_urlparse.urljoin(man_url,media.attrib['url'])
keybase_url=join(man_url,'key_')
if mediaUrl.endswith('/') and not base_url.endswith('/'):
base_url += '/'
self.base_url=base_url
self.keybase_url=keybase_url
bsArray=doc.findall(_add_ns('bootstrapInfo'))
print 'bootStrapID',bootStrapID
#bootStrapID='bootstrap_450'
bootstrap=self.getBootStrapWithId(bsArray,bootStrapID)
if bootstrap==None: #if not available then find any!
print 'bootStrapID NOT Found'
bootstrap=doc.findall(_add_ns('bootstrapInfo'))[0]
else:
print 'found bootstrap with id',bootstrap
#print 'bootstrap',bootstrap
bootstrapURL1=''
try:
bootstrapURL1=bootstrap.attrib['url']
except: pass
bootstrapURL=''
bootstrapData=None
queryString=None
if bootstrapURL1=='':
bootstrapData=base64.b64decode(doc.findall(_add_ns('bootstrapInfo'))[0].text)
#
else:
from urlparse import urlparse
queryString = urlparse(url).query
print 'queryString11',queryString
if len(queryString)==0: queryString=None
if queryString==None or '?' in bootstrap.attrib['url']:
bootstrapURL = join(man_url,bootstrap.attrib['url'])# take out querystring for later
queryString = urlparse(bootstrapURL).query
print 'queryString override',queryString
if len(queryString)==0:
queryString=None
if len(self.auth)>0:
bootstrapURL+='?'+self.auth
queryString=self.auth#self._pv_params('',self.auth20)#not in use
elif len(self.auth20)>0:
queryString=self._pv_params(self.swf,self.auth20)
bootstrapURL+='?'+queryString
else:
print 'queryString!!',queryString
bootstrapURL = join(man_url,bootstrap.attrib['url'])+'?'+queryString
if len(self.auth)>0:
authval=self.auth#self._pv_params('',self.auth20)#not in use
bootstrapURL = join(man_url,bootstrap.attrib['url'])+'?'+authval
queryString=authval
elif len(self.auth20)>0:
authval=self._pv_params(self.swf,self.auth20)#requires swf param
bootstrapURL = join(man_url,bootstrap.attrib['url'])+'?'+authval
queryString=authval
print 'bootstrapURL',bootstrapURL
if queryString==None:
queryString=''
self.bootstrapURL=bootstrapURL
self.queryString=queryString
self.bootstrap, self.boot_info, self.fragments_list,self.total_frags=self.readBootStrapInfo(bootstrapURL,bootstrapData)
self.init_done=True
return True
except:
traceback.print_exc()
return False
def readAKKey(self, data):
messageKeyExists=False
key=""
firstByte=ord(data[0])
pos=1
returnIV=None
if firstByte==12: #version12
pos+=4+4+2+1;
# print 'indeedddd',firstByte
# print 'data',repr(data)
messageByte=ord(data[pos])
pos+=1
messageKeyExists=(messageByte & 4) > 0;
messageIV=(messageByte & 2) > 0;
if messageIV:
pos+=16
# print 'IV exists'
if messageKeyExists:
# print 'Message Key exists!!!'
returnIV=data[pos-16:pos]
d = str(data[pos]);
pos+=1
key = d;
while(d != '\x00'):
d = str(data[pos]);
pos+=1
if d != '\x00':
key+= d;
else:
print 'SOMETHING WRONG.... got other than 12'
print 1/0#shouldn't come where
return messageKeyExists, key,pos,returnIV
def getFrames(self,box_data, remainingdata):
frames=[]
KeepProcessing = False;
currentStep= 0;
tagLen = 0;
if(box_data):
if remainingdata and len(remainingdata)>0:
box_data=remainingdata+box_data
remainingdata=None
lookForTagStart = 0;
KeepProcessing = True;
while(KeepProcessing and lookForTagStart<len(box_data)):
currentStep = ord(box_data[lookForTagStart]);
tagLen = ord(box_data[lookForTagStart + 1]) << 16 | ord(box_data[lookForTagStart + 2]) << 8 | ord(box_data[lookForTagStart + 3]) & 255;
nextTag = lookForTagStart + 11 + tagLen + 4
if (nextTag > len(box_data) and currentStep > 0):
remainingdata = [];
remainingdata=box_data[lookForTagStart:]
KeepProcessing = False;
elif (currentStep > 0):
chunk = []
chunk=box_data[lookForTagStart:lookForTagStart+tagLen + 11 + 4]
frames.append((1,chunk))
elif (currentStep == 0):
KeepProcessing = False;
#if nextTag==len(box_data):
# KeepProcessing=False
#print nextTag, len(box_data)
lookForTagStart = nextTag;
return frames,remainingdata
# #def AES(self,key):
# return Rijndael(key, keySize=16, blockSize=16, padding=padWithPadLen())
# def AES_CBC(self,key):
# return CBC(blockCipherInstance=AES(key))
def addBytesToOutput(self,prefix,data,post,segmentid,buffer):
dataLen=0
if data and len(data)>0:
dataLen=len(data)
#print 'Incomming',repr(prefix)
prefix=list(prefix)
prefix[3]=chr(dataLen & 255)
prefix[2]=chr(dataLen >> 8 & 255);
prefix[1]=chr(dataLen >> 16 & 255);
#print repr(prefix)
prefix=''.join(prefix)
#print repr(prefix)
#print len(prefix)
finalArray=prefix
if data and len(data)>0:
finalArray+=data
if post and len(post):
finalArray+=post
# with open("c:\\temp\\myfile.mp4", 'a+b') as output:
# output.write(finalArray)
lenReturned=len(finalArray)
buffer.write(finalArray)
buffer.flush()
return lenReturned
def keep_sending_video(self,dest_stream, segmentToStart=None, totalSegmentToSend=0):
try:
self.status='download Starting'
self.downloadInternal(self.url,dest_stream,segmentToStart,totalSegmentToSend)
except:
traceback.print_exc()
self.status='finished'
def downloadInternal(self,url,dest_stream ,segmentToStart=None,totalSegmentToSend=0):
global F4Mversion
try:
#dest_stream = self.out_stream
queryString=self.queryString
print 'segmentToStart',segmentToStart
if self.live or segmentToStart==0 or segmentToStart==None:
print 'writing metadata'#,len(self.metadata)
self._write_flv_header(dest_stream, self.metadata)
dest_stream.flush()
#elif segmentToStart>0 and not self.live:
# self._write_flv_header2(dest_stream)
# dest_stream.flush()
url=self.url
bootstrap, boot_info, fragments_list,total_frags=(self.bootstrap, self.boot_info, self.fragments_list,self.total_frags)
print boot_info, fragments_list,total_frags
self.status='bootstrap done'
self.status='file created'
self.downloaded_bytes = 0
self.bytes_in_disk = 0
self.frag_counter = 0
start = time.time()
frags_filenames = []
self.seqNumber=0
if segmentToStart and not self.live :
self.seqNumber=segmentToStart
if self.seqNumber>=total_frags:
self.seqNumber=total_frags-1
#for (seg_i, frag_i) in fragments_list:
#for seqNumber in range(0,len(fragments_list)):
self.segmentAvailable=0
frameSent=0
keyValue=""
keyData=None
firstPacket=True
remainingFrameData=None
decrypter=None
errors=0
file=0
lastIV=None
AKSession=None
while True:
#if not self.live:
# _write_flv_header2
try:
if self.g_stopEvent.isSet():
return
except: pass
seg_i, frag_i=fragments_list[self.seqNumber]
self.seqNumber+=1
frameSent+=1
name = u'Seg%d-Frag%d' % (seg_i, frag_i)
#print 'base_url',base_url,name
if AKSession:
name+=AKSession
url = self.base_url + name
if queryString and '?' not in url:
url+='?'+queryString
elif '?' in self.base_url:
url = self.base_url.split('?')[0] + name+'?'+self.base_url.split('?')[1]
#print(url),base_url,name
#frag_filename = u'%s-%s' % (tmpfilename, name)
#success = dl._do_download(frag_filename, {'url': url})
print 'downloading....',url
success=False
urlTry=0
while not success and urlTry<5:
success = self.getUrl(url,True)
if not success: xbmc.sleep(300)
urlTry+=1
print 'downloaded',not success==None,url
if not success:
return False
#with open(frag_filename, 'rb') as down:
if 1==1:
down_data = success#down.read()
reader = FlvReader(down_data)
while True:
_, box_type, box_data = reader.read_box_info()
print 'box_type',box_type,len(box_data)
#if box_type == b'afra':
# dest_stream.write(box_data)
# dest_stream.flush()
# break
if box_type == b'mdat':
isDrm=True if ord(box_data[0])&1 else False
boxlength=len(box_data)
seglen=0
file+=1
# if file>6: print 1/0
skip=False
doDecrypt=False
# print 'first byte',repr(box_data[0]),'kk'
isAkamaiEncrypted=True if ord(box_data[0])==11 or ord(box_data[0])==10 else False
if isAkamaiEncrypted:
# print 'Total MDAT count',len(box_data), len(box_data)%16
_loc8_ = ord(box_data[1]) << 16 | ord(box_data[2]) << 8 | ord(box_data[3]) & 255;
_loc9_ = box_data[11:11+_loc8_]
# print 'this is encrypted',len(_loc9_),_loc8_,repr(box_data[1:70])
keyExists,Key,dataread,lastIV=self.readAKKey(_loc9_)
if keyExists:
# print 'key exists and its len is ',_loc8_,repr(Key)
doDecrypt=True
keyValueNew=Key.split('key_')[1]
# print 'previous key is'+keyValue,'new key is',keyValueNew
if keyValue=="":
keyValue="_"+keyValueNew
AKSession=keyValue
keyurl = self.keybase_url +keyValueNew
if queryString and '?' not in keyurl:
keyurl+='?'+queryString+'&guid=CHRLRCMRHGUD'
print 'the key url is ',keyurl,'thanks'
keyData=self.getUrl(keyurl,False)
skip=False
firstPacket=True
elif not keyValue=="_"+keyValueNew:
keyValue="_"+keyValueNew#take new key
AKSession=keyValue
keyurl = self.keybase_url +keyValueNew
if queryString and '?' not in keyurl:
keyurl+='?'+queryString+'&guid=CHRLRCMRHGUD'
keyData=self.getUrl(keyurl,False)
firstPacket=True
#todo decryptit! and put it in box_data
#print 'before skip'
if skip:
break;
if keyData:
print 'key data is',repr(keyData),len(keyData)
#do decrypt here. frame by frame
#now generate frames
#put remaining in remaining
#for each frame decrypt and write and flush
try:
frames=[]
# print 'before frames data', repr(box_data[0:70])
frames,remainingFrameData=self.getFrames(box_data,remainingFrameData)
# print 'after frames data first frame', repr(frames[0][0:70])
#print 'frames',frames
for frame in frames:
data=frame[1]
datalen=ord(data[1]) << 16 | ord(data[2]) << 8 | ord(data[3]) & 255;
preFrame=len(data)
#print 'samp>',len(data),datalen,ord(data[0]) ,'<samp'
if firstPacket:
firstPacket=False
# data=data[0:datalen]
#print 'first>',len(data),ord(data[0]),datalen,'<first'
# else:
if 1==1:
#if not not key frame then decrypt else
firstByte=ord(data[0])
frameHeader=data[:11]
framePad=data[11 + datalen:11 + datalen+4];
if firstByte==10 or firstByte==11:
if firstByte==10:
frameHeader = list(frameHeader)
frameHeader[0]=chr(8)
frameHeader=''.join(frameHeader)
if firstByte==11:
frameHeader = list(frameHeader)
frameHeader[0]=chr(9)
frameHeader=''.join(frameHeader)
data=data[11:11+datalen]
#print 'sub>',len(data),firstByte,datalen,datalen%16,len(data)%16 ,'<sub'
keyExistsNew,KeyNew,dataread,ignoreIV=self.readAKKey(data)
# print 'dataread',dataread,keyExistsNew,KeyNew,ignoreIV
try:
data=akhds.tagDecrypt(data,keyData)
except:
print 'decryption error'
errors+=1
traceback.print_exc()
if errors>10: print 1/0
# print 'pre return size %d, %d %d'%(len(frameHeader),len(data), len(framePad))
seglen1=self.addBytesToOutput(frameHeader,data,framePad,1,dest_stream)
seglen+=seglen1
# print 'pre frame %d, after %d'%(preFrame,seglen1)
else:
print 'hmm no 10 or 11?'
# print 'pre return size %d, %d %d'%(len(frameHeader),len(data), len(framePad))
seglen1=self.addBytesToOutput(frameHeader,None,None,1,dest_stream)
seglen+=seglen1
# print 'pre frame %d, after %d'%(preFrame,seglen1)
#est_stream.write(data)
#dest_stream.flush()
#dest_stream.write(self.decryptData(data,keyData))
#dest_stream.flush()
except:
print traceback.print_exc()
self.g_stopEvent.set()
else:
dest_stream.write(box_data)
dest_stream.flush()
print 'box length is %d and seg total is %d'%(boxlength,seglen)
break
# Using the following code may fix some videos, but
# only in mplayer, VLC won't play the sound.
# mdat_reader = FlvReader(box_data)
# media_type = mdat_reader.read_unsigned_char()
# while True:
# if mdat_reader.read_unsigned_char() == media_type:
# if mdat_reader.read_unsigned_char() == 0x00:
# break
# dest_stream.write(pack('!B', media_type))
# dest_stream.write(b'\x00')
# dest_stream.write(mdat_reader.read())
# break
self.status='play'
if self.seqNumber==len(fragments_list) or (totalSegmentToSend>0 and frameSent==totalSegmentToSend):
if not self.live:
break
self.seqNumber=0
#todo if the url not available then get manifest and get the data again
total_frags=None
try:
bootstrap, boot_info, fragments_list,total_frags=self.readBootStrapInfo(self.bootstrapURL,None,updateMode=True,lastSegment=seg_i, lastFragement=frag_i)
except:
traceback.print_exc()
pass
if total_frags==None:
break
del self.downloaded_bytes
del self.frag_counter
except:
traceback.print_exc()
return
def getBootStrapWithId (self,BSarray, id):
try:
for bs in BSarray:
print 'compare val is ',bs.attrib['id'], 'id', id
if bs.attrib['id']==id:
print 'gotcha'
return bs
except: pass
return None
def readBootStrapInfo(self,bootstrapUrl,bootStrapData, updateMode=False, lastFragement=None,lastSegment=None):
try:
retries=0
while retries<=10:
try:
if self.g_stopEvent.isSet():
print 'event is set. returning'
return
except: pass
if bootStrapData==None:
bootStrapData =self.getUrl(bootstrapUrl)
if bootStrapData==None:
retries+=1
continue
#print 'bootstrapData',len(bootStrapData)
bootstrap = bootStrapData#base64.b64decode(bootStrapData)#doc.findall(_add_ns('bootstrapInfo'))[0].text)
#print 'boot stream read done'
boot_info,self.live = read_bootstrap_info(bootstrap)
#print 'boot_info read done',boot_info
newFragement=None
if not lastFragement==None:
newFragement=lastFragement+1
fragments_list = build_fragments_list(boot_info,newFragement,self.live)
total_frags = len(fragments_list)
#print 'fragments_list',fragments_list, newFragement
#print lastSegment
if updateMode and (len(fragments_list)==0 or ( newFragement and newFragement>fragments_list[0][1])):
#todo check lastFragement to see if we got valid data
print 'retrying......'
bootStrapData=None
retries+=1
xbmc.sleep(2000)
continue
return bootstrap, boot_info, fragments_list,total_frags
except:
traceback.print_exc()
def _pv_params(self, pvswf, pv):
"""Returns any parameters needed for Akamai HD player verification.
Algorithm originally documented by KSV, source:
http://stream-recorder.com/forum/showpost.php?p=43761&postcount=13
"""
#return pv;
#pv="ZXhwPTE0NDAxNTUyODJ+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPWMyZjk4MmVjZjFjODQyM2IzZDkxMzExMjNmY2ExN2U4Y2UwMjU4NWFhODg3MWFjYzM5YmI0MmVlNTYxYzM5ODc="
# pv="ZXhwPTE0NDAzMjc3ODF+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPTYyYTE2MzU2MTNjZTI4ZWI2MTg0MmRjYjFlZTZlYTYwYTA5NWUzZDczNTQ5MTQ1ZDVkNTc0M2M2Njk5MDJjNjY="
# pv="ZXhwPTE0Mzk2MDgzMTl+YWNsPSUyZip+ZGF0YT1wdmMsc35obWFjPTExYTJiNzQ4NjQyYmY1M2VlNzk5MzhhNTMzNjc1MTAzZjk2NWViOGVhODY4MzUwODkwZGM1MjVmNjI3ODM4MzQ="
try:
data, hdntl = pv.split(";")
except ValueError:
data = pv
hdntl = ""
print 'DATA IS',data
print 'hdntl IS',hdntl
if data=="": return hdntl
first_stage_msg=binascii.unhexlify('056377146640142763057567157640125041016376130175171220177717044510157134116364123221072012122137150351003442036164015632157517073355151142067436113220106435137171174171127530157325044270025004')
first_stage_key=data
hash_data=""
if pvswf is None:
print 'swf required for pv2 decryption'
pvswf=""
if pvswf.startswith('http'):
swf = self.getUrl(pvswf,False)
hash = hashlib.sha256()
hash.update(self.swfdecompress(swf))
hash = base64.b64encode(hash.digest()).decode("ascii")
else:
hash=pvswf # the incoming is the hash!
print 'hash',hash
# shouldhash="AFe6zmDCNudrcFNyePaAzAn/KRT5ES99ql4SNqldM2I="
# if shouldhash==hash:
# print '**************HASH MATCH ********************'
# else:
# print '********* NOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTT**********'
second_stage_key = hmac.new(first_stage_key,first_stage_msg , sha256).digest()
# second_stage_data=hash_data #
second_stage_data=base64.b64decode( hash)
buffer="106,45,165,20,106,45,165,20,38,45,165,87,11,98,228,14,107,89,233,25,101,36,223,76,97,28,175,18,23,86,164,6,1,56,157,64,123,58,186,100,54,34,184,14,3,44,164,20,106,6,222,84,122,45,165,20,106,28,196,84,122,111,183,84,122,45,165,20,106,45,165,20,106,45,165,20,106,45,165,20,106,45,165,20,106,45,165,20,106,45,165,20,106,45,165,20"
buffer=buffer.split(',');
second_stage_data+=chr(int(buffer[len(second_stage_data)]))
# print len(second_stage_data),repr(second_stage_data)
third_stage_key= hmac.new(second_stage_key, second_stage_data, sha256).digest()
#hash=shouldhash
msg = "exp=9999999999~acl=%2f%2a~data={0}!{1}".format(data, hash)
auth = hmac.new(third_stage_key, msg.encode("ascii"), sha256)
pvtoken = "{0}~hmac={1}".format(msg, auth.hexdigest())
# The "hdntl" parameter can be accepted as a cookie or passed in the
# query string, but the "pvtoken" parameter can only be in the query
# string
print 'pvtoken',pvtoken
params=urllib.urlencode({'pvtoken':pvtoken})+'&'+hdntl+'&hdcore=2.11.3'
params=params.replace('%2B','+')
params=params.replace('%2F','/')
print params
return params
def swfdecompress(self,data):
if data[:3] == b"CWS":
data = b"F" + data[1:8] + zlib.decompress(data[8:])
return data
| gpl-2.0 | -2,045,013,516,949,013,200 | 42.145133 | 548 | 0.494427 | false |
lluisball/godenerg | axpert/charger.py | 1 | 2546 | from time import sleep
from datetime import datetime, timedelta
from axpert.protocol import (
CMD_REL, parse_inverter_conf, empty_inverter_conf, CmdSpec
)
from axpert.settings import charger_conf
from axpert.datalogger import get_avg_last
FLOAT_VOL = charger_conf['float_voltage']
ABSORB_VOL = charger_conf['absorbtion_voltage']
ABSORB_AMPS_THRESHOLD = charger_conf['absorbtion_amps_threshold']
CHARGE_START_CHECK = charger_conf['charge_check_start']
CHARGE_END_CHECK = charger_conf['charge_check_end']
def get_inverter_conf(executor):
try:
response = executor(CMD_REL.get('settings'))
return parse_inverter_conf(response.data)
except:
return empty_inverter_conf()
def set_float_volts_to(log, executor, target):
try:
log.info('Changing float charge setting to %.1f' % target)
executor(CmdSpec(code='PBFT', size=11, val='%.1f'% target, json=None))
except Exception as e:
log.error('Could not set the float charge setting')
log.exception(e)
def manual_charger(log, executor):
def _stop_charge_check(now):
if now.hour in range(CHARGE_START_CHECK, CHARGE_END_CHECK + 1) \
and now.minute in [1, 10, 20, 30, 40, 50] \
and now.second in [1, 15, 30, 45]:
inverter_conf = get_inverter_conf(executor)
if not inverter_conf.float_volt \
or inverter_conf.float_volt == FLOAT_VOL:
return
avg_last_batt_volts, avg_last_batt_amps = get_avg_last(
log, minutes=30
)
if (ABSORB_VOL - 0.20) < avg_last_batt_volts < (ABSORB_VOL + 0.20)\
and avg_last_batt_amps < ABSORB_AMPS_THRESHOLD:
set_float_volts_to(log, executor, FLOAT_VOL)
def _start_charge_check(now):
if now.hour in [3, 4] \
and now.minute in [1, 3] \
and now.second in [1, 10, 20]:
inverter_conf = get_inverter_conf(executor)
if inverter_conf.float_volt \
and inverter_conf.float_volt == FLOAT_VOL:
set_float_volts_to(log, executor, ABSORB_VOL)
while True:
now = datetime.now()
try:
_start_charge_check(now)
_stop_charge_check(now)
except Exception as e:
log.error('Error in charger!')
log.error(e)
finally:
sleep(1)
| gpl-3.0 | 5,399,676,398,103,707,000 | 34.361111 | 79 | 0.569521 | false |
wooster/framework_depviz | depviz.py | 1 | 10369 | #!/usr/bin/env python
"""
The MIT License (MIT)
Copyright (c) 2013 Andrew Wooster
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import subprocess
import sys
def escape_arg(argument):
"""Escapes an argument to a command line utility."""
argument = argument.replace('\\', "\\\\").replace("'", "\'").replace('"', '\\"').replace("!", "\\!").replace("`", "\\`")
return "\"%s\"" % argument
def run_command(command, verbose=False):
"""Runs the command and returns the status and the output."""
if verbose:
sys.stderr.write("Running: %s\n" % command)
p = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdin, stdout = (p.stdin, p.stdout)
output = stdout.read()
output = output.strip("\n")
status = stdin.close()
stdout.close()
p.wait()
return (p.returncode, output)
DEPENDENCY_PRIVATE = 'Private'
DEPENDENCY_PUBLIC = 'Public'
DEPENDENCY_UNKNOWN = 'Unknown'
def dependencies_for_resolved_framework_path(lib_base, framework_path, dependencies, dep_to_visibility, exclude_dylibs=True):
def visibility_from_path(path):
visibility = DEPENDENCY_UNKNOWN
if '/PrivateFrameworks/' in path:
visibility = DEPENDENCY_PRIVATE
elif '/Frameworks/' in path:
visibility = DEPENDENCY_PUBLIC
return visibility
real_framework_path = framework_path
if not framework_path.startswith(lib_base):
real_framework_path = lib_base + framework_path
if not os.path.exists(real_framework_path):
real_framework_path = framework_path
if not os.path.exists(real_framework_path):
print >> sys.stderr, "Unable to find framework:", real_framework_path
return
(path, filename) = os.path.split(real_framework_path)
(base, ext) = os.path.splitext(filename)
(status, output) = run_command("otool -L %s" % escape_arg(real_framework_path))
lines = output.splitlines()
dep_to_visibility[base] = visibility_from_path(real_framework_path)
for line in lines:
if not line.startswith("\t"):
continue
if not "(" in line:
continue
parts = line.split("(")
if not len(parts) > 1:
continue
f_path = parts[0].strip()
(_, depname) = os.path.split(f_path)
if depname == base:
# Exclude self-dependency.
continue
visibility = visibility_from_path(f_path)
if exclude_dylibs and f_path.endswith("dylib"):
continue
should_recurse = (dep_to_visibility.get(depname) is None)
dep_to_visibility[depname] = visibility
dependencies.setdefault(base, [])
if not depname in dependencies[base]:
dependencies[base].append(depname)
if should_recurse:
dependencies_for_resolved_framework_path(lib_base, f_path, dependencies, dep_to_visibility, exclude_dylibs=exclude_dylibs)
def dependencies_for_framework_path(framework_path, dependencies, dep_to_visibility, exclude_dylibs=True):
(path, filename) = os.path.split(framework_path)
(base, ext) = os.path.splitext(filename)
lib_path = os.path.join(framework_path, base)
lib_parts = lib_path.split(os.sep)
lib_base_parts = []
for part in lib_parts:
if part == "System":
break
lib_base_parts.append(part)
lib_base = os.sep.join(lib_base_parts)
return dependencies_for_resolved_framework_path(lib_base, lib_path, dependencies, dep_to_visibility, exclude_dylibs=exclude_dylibs)
def dependencies_for_system_library_path(library_path):
entries = os.listdir(library_path)
if "/System/Library" not in library_path or "Frameworks" not in entries or "PrivateFrameworks" not in entries:
print >> sys.stderr, "Path doesn't look like it points to the System/Library folder of an SDK."
sys.exit(1)
dependencies = {}
dep_to_visibility = {}
def update_dependencies(dependencies, dep_to_visibility, library_path, f_path):
framework_paths = os.listdir(os.path.join(library_path, f_path))
for framework_path in framework_paths:
if not framework_path.endswith(".framework"):
continue
full_path = os.path.join(library_path, f_path, framework_path)
dependencies_for_framework_path(full_path, dependencies, dep_to_visibility)
update_dependencies(dependencies, dep_to_visibility, library_path, "Frameworks")
update_dependencies(dependencies, dep_to_visibility, library_path, "PrivateFrameworks")
return (dependencies, dep_to_visibility)
def dot_for_dependencies(dependencies, dep_to_visibility, framework_depnames=None):
l = []
l.append("digraph G {")
l.append("\tnode [shape=box];")
def color_for_visibility(visibility):
if visibility == DEPENDENCY_PRIVATE:
return "#FFD1E0"
elif visibility == DEPENDENCY_PUBLIC:
return "#D1FFD2"
else:
return "#FFFFFF"
if framework_depnames is None:
nodes = {}
seen_deps = []
i = 0
for framework_name, visibility in dep_to_visibility.iteritems():
if framework_name in seen_deps:
continue
nodename = "Node%d" % i
i += 1
nodes[framework_name] = nodename
seen_deps.append(framework_name)
color = color_for_visibility(dep_to_visibility[framework_name])
l.append("\t%s [label=\"%s\", fillcolor=\"%s\"];" % (nodename, framework_name, color))
for framework_name, deps in dependencies.iteritems():
if nodes.get(framework_name) is None:
print >> sys.stderr, "Unknown node", framework_name
continue
from_nodename = nodes[framework_name]
if len(deps) == 0:
l.append("\t\"%s\" -> {};" % framework_name)
for lib_name in deps:
to_nodename = nodes[lib_name]
l.append("\t%s -> %s; // %s -> %s" % (from_nodename, to_nodename, framework_name, lib_name))
else:
def gather_dependents(dependencies, framework_name, seen=None):
"""Get a list of all the frameworks wich depend on framework_name, recursively."""
results = []
if seen is None:
seen = []
for framework, deps in dependencies.iteritems():
if framework_name in deps:
if framework in seen:
continue
seen.append(framework)
# framework depends on framework_name
results.append(framework_name)
for result in gather_dependents(dependencies, framework, seen=seen):
results.append(result)
return list(set(results))
frameworks_to_print = []
for framework_depname in framework_depnames:
for f in gather_dependents(dependencies, framework_depname):
frameworks_to_print.append(f)
frameworks_to_print = list(set(frameworks_to_print))
nodes = {}
seen_deps = []
i = 0
for framework_name, visibility in dep_to_visibility.iteritems():
if framework_name in seen_deps:
continue
if framework_name not in frameworks_to_print:
continue
nodename = "Node%d" % i
i += 1
nodes[framework_name] = nodename
seen_deps.append(framework_name)
color = color_for_visibility(dep_to_visibility[framework_name])
l.append("\t%s [label=\"%s\", style=filled, fillcolor=\"%s\"];" % (nodename, framework_name, color))
for framework_name, deps in dependencies.iteritems():
if framework_name in frameworks_to_print:
if nodes.get(framework_name) is None:
print >> sys.stderr, "Unknown node", framework_name
continue
from_nodename = nodes[framework_name]
if len(deps) == 0:
l.append("\t\"%s\" -> {};" % framework_name)
for lib_name in deps:
if lib_name in frameworks_to_print:
to_nodename = nodes[lib_name]
l.append("\t%s -> %s; // %s -> %s" % (from_nodename, to_nodename, framework_name, lib_name))
l.append("}\n")
return "\n".join(l)
def main(library_path, framework_depnames=None):
library_path = os.path.expanduser(library_path)
(dependencies, dep_to_visibility) = dependencies_for_system_library_path(library_path)
dot_output = dot_for_dependencies(dependencies, dep_to_visibility, framework_depnames=framework_depnames)
print >> sys.stdout, dot_output
if __name__ == "__main__":
if len(sys.argv) < 2:
print >> sys.stderr, "Usage: %s [SDK System Library Path] [framework name ...]"
print >> sys.stderr, " Where the library path is like /System/Library"
print >> sys.stderr, " Where the framework name (optional) is one to determine what depends on it"
sys.exit(1)
framework_depnames = None
if len(sys.argv) > 2:
framework_depnames = sys.argv[2:]
main(sys.argv[1], framework_depnames=framework_depnames)
| mit | -6,358,658,463,639,979,000 | 43.311966 | 135 | 0.621564 | false |
jsubpy/jsub | jsub/exts/repo/file_system.py | 1 | 2428 | import os
import json
import logging
import fcntl
from jsub.util import safe_mkdir
from jsub.util import safe_rmdir
from jsub.error import RepoReadError
from jsub.error import TaskNotFoundError
ID_FILENAME = 'id'
class FileSystem(object):
def __init__(self, param):
self.__jsub_dir = os.path.expanduser(param.get('taskDir', '~/jsub/'))
# self.__id_file = os.path.join(self.__jsub_dir, ID_FILENAME)
self.__logger = logging.getLogger('JSUB')
# self.__create_repo_dir()
self.__json_format = param.get('format', 'compact')
def save_task(self, data):
if 'id' not in data:
safe_mkdir(self.__jsub_dir)
data['id'] = self.__new_task_id()
safe_mkdir(os.path.join(self.__jsub_dir,str(data['id']),'taskInfo'))
task_path = os.path.join(self.__jsub_dir, str(data['id']),'taskInfo','repo')
data_str = self.__json_str(data)
with open(task_path, 'a+') as f:
fcntl.flock(f, fcntl.LOCK_EX)
f.seek(0)
f.truncate()
f.write(data_str)
def delete_task(self, task_id):
safe_rmdir(os.path.join(self.__jsub_dir,str(task_id)))
def find_by_id(self, task_id):
return self.task_data(task_id)
def find_by_ids(self, task_ids):
all_data = []
for task_id in task_ids:
try:
td = self.task_data(task_id)
all_data.append(td)
except RepoReadError as e:
self.__logger.debug(e)
return all_data
def all_task_data(self, order='asc'):
task_ids =[d for d in os.listdir(self.__jsub_dir) if os.path.isdir(os.path.join(self.__jsub_dir,d))]
task_ids.sort(key=int, reverse=(order=='desc'))
return self.find_by_ids(task_ids)
def task_data(self, task_id):
safe_mkdir(os.path.join(self.__jsub_dir,str(task_id),'taskInfo'))
task_path = os.path.join(self.__jsub_dir,str(task_id),'taskInfo','repo')
with open(task_path, 'a+') as f:
fcntl.flock(f, fcntl.LOCK_EX)
f.seek(0)
data_str = f.read()
try:
return json.loads(data_str)
except ValueError as e:
raise RepoReadError('JSON decode error on task %s: %s' % (task_id, e))
# def __create_repo_dir(self):
# safe_mkdir(self.__jsub_dir)
def __new_task_id(self):
task_ids =[int(d) for d in os.listdir(self.__jsub_dir) if os.path.isdir(os.path.join(self.__jsub_dir,d))]
if not task_ids:
return 1
task_ids.sort(key=int, reverse=True)
return(task_ids[0]+1)
def __json_str(self, data):
if self.__json_format == 'pretty':
return json.dumps(data, indent=2)
return json.dumps(data, separators=(',', ':'))
| mit | 5,301,751,721,617,016,000 | 27.564706 | 107 | 0.656919 | false |
sumihai-tekindo/account_sicepat | sicepat_erp/invoice_line_exp_cost/invoice_line_exp_cost.py | 1 | 7446 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Pambudi Satria (<https://github.com/pambudisatria>).
# @author Pambudi Satria <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models, api
from openerp.osv import osv
import openerp.addons.decimal_precision as dp
class account_invoice_line(models.Model):
_inherit = "account.invoice.line"
@api.one
@api.depends('price_unit', 'discount', 'invoice_line_tax_id', 'quantity',
'product_id', 'invoice_id.partner_id', 'invoice_id.currency_id')
def _compute_price(self):
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
taxes = self.invoice_line_tax_id.compute_all(price, self.quantity, product=self.product_id, partner=self.invoice_id.partner_id)
tax_extra_ship_cost = self.invoice_line_tax_id.compute_all(self.extra_shipping_cost, 1, product=self.product_id, partner=self.invoice_id.partner_id)
tax_insurance_fee = self.invoice_line_tax_id.compute_all(self.insurance_fee, 1, product=self.product_id, partner=self.invoice_id.partner_id)
tax_admcost_insurance = self.invoice_line_tax_id.compute_all(self.admcost_insurance, 1, product=self.product_id, partner=self.invoice_id.partner_id)
tax_packing_cost = self.invoice_line_tax_id.compute_all(self.packing_cost, 1, product=self.product_id, partner=self.invoice_id.partner_id)
self.price_subtotal = taxes['total'] + tax_extra_ship_cost['total'] + tax_insurance_fee['total'] + tax_admcost_insurance['total'] + tax_packing_cost['total']
if self.invoice_id:
self.price_subtotal = self.invoice_id.currency_id.round(self.price_subtotal)
def _compute_insurance_fee(self):
insurance_fee = self.insurance_value*((0.2)/100)
extra_shipping_cost = fields.Float(string='Extra Shipping Cost', digits= dp.get_precision('Product Price'), default=0.0)
insurance_value = fields.Float(string='Insurance Value', digits= dp.get_precision('Product Price'), default=0.0)
insurance_fee = fields.Float(string='Insurance Fee', digits= dp.get_precision('Product Price'), default=0.0)
admcost_insurance = fields.Float(string='Cost Administration of Insurance', digits= dp.get_precision('Product Price'), default=0.0)
packing_cost = fields.Float(string='Packing Cost', digits= dp.get_precision('Product Price'), default=0.0)
class account_invoice_tax(models.Model):
_inherit = "account.invoice.tax"
@api.v8
def compute(self, invoice):
tax_grouped = {}
currency = invoice.currency_id.with_context(date=invoice.date_invoice or fields.Date.context_today(invoice))
company_currency = invoice.company_id.currency_id
for line in invoice.invoice_line:
taxes = line.invoice_line_tax_id.compute_all(
(line.price_unit * (1 - (line.discount or 0.0) / 100.0)),
line.quantity, line.product_id, invoice.partner_id)['taxes']
taxes += line.invoice_line_tax_id.compute_all(
line.extra_shipping_cost, 1, line.product_id, invoice.partner_id)['taxes']
taxes += line.invoice_line_tax_id.compute_all(
line.insurance_fee, 1, line.product_id, invoice.partner_id)['taxes']
taxes += line.invoice_line_tax_id.compute_all(
line.admcost_insurance, 1, line.product_id, invoice.partner_id)['taxes']
taxes += line.invoice_line_tax_id.compute_all(
line.packing_cost, 1, line.product_id, invoice.partner_id)['taxes']
for tax in taxes:
val = {
'invoice_id': invoice.id,
'name': tax['name'],
'amount': tax['amount'],
'manual': False,
'sequence': tax['sequence'],
'base': currency.round(tax['price_unit'] * line['quantity']),
}
if invoice.type in ('out_invoice','in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_collected_id']
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['ref_base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['ref_tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_paid_id']
# If the taxes generate moves on the same financial account as the invoice line
# and no default analytic account is defined at the tax level, propagate the
# analytic account from the invoice line to the tax line. This is necessary
# in situations were (part of) the taxes cannot be reclaimed,
# to ensure the tax move is allocated to the proper analytic account.
if not val.get('account_analytic_id') and line.account_analytic_id and val['account_id'] == line.account_id.id:
val['account_analytic_id'] = line.account_analytic_id.id
key = (val['tax_code_id'], val['base_code_id'], val['account_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
for t in tax_grouped.values():
t['base'] = currency.round(t['base'])
t['amount'] = currency.round(t['amount'])
t['base_amount'] = currency.round(t['base_amount'])
t['tax_amount'] = currency.round(t['tax_amount'])
return tax_grouped
| gpl-3.0 | -6,949,873,827,119,664,000 | 58.048387 | 165 | 0.592399 | false |
jzerbe/taskifier | taskifier/internal/__init__.py | 1 | 3830 | from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
import json
from taskifier import const
from taskifier.models import Task, TaskOwner
from taskifier.internal.TaskPayloadHelper import TaskPayloadHelper
def DELETE(task_owner, task_id):
task = _get_task_by_id(task_id)
if task and _is_owner(task_owner, task):
task.delete()
return {const.KEY_ID: task_id,
const.KEY_SOURCE: "",
const.KEY_DEST: "",
const.KEY_CONTENT: "",
const.KEY_READY_TIME: ""}
return {const.KEY_RESP_STATUS: "ERROR",
const.KEY_RESP_STATUS_TEXT: "task not found or bad auth"}
def GET(task_owner, task_id):
task = _get_task_by_id(task_id)
if task and _is_owner(task_owner, task):
return {const.KEY_ID: task_id,
const.KEY_SOURCE: task.source,
const.KEY_DEST: task.dest,
const.KEY_CONTENT: task.content,
const.KEY_READY_TIME: _get_json_from_datetime(task.ready_time)}
return {const.KEY_RESP_STATUS: "ERROR",
const.KEY_RESP_STATUS_TEXT: "task not found or bad auth"}
def POST(task_owner, task_id, request_payload):
if task_owner is None:
return {const.KEY_RESP_STATUS: "ERROR",
const.KEY_RESP_STATUS_TEXT: "specified TaskOwner object not found"}
taskPayloadHelper = TaskPayloadHelper(request_payload)
if not taskPayloadHelper.is_valid() or taskPayloadHelper.is_duplicate():
return {const.KEY_RESP_STATUS: "ERROR",
const.KEY_RESP_STATUS_TEXT: "payload is invalid or already exists"}
if task_id is None:
task = Task(owner=task_owner,
source=taskPayloadHelper[const.KEY_SOURCE],
dest=taskPayloadHelper[const.KEY_DEST],
content=taskPayloadHelper[const.KEY_CONTENT],
ready_time=taskPayloadHelper.get_ready_datetime())
task.save()
task_id = task.id
else:
task = _get_task_by_id(task_id)
task.source = taskPayloadHelper[const.KEY_SOURCE]
task.dest = taskPayloadHelper[const.KEY_DEST]
task.content = taskPayloadHelper[const.KEY_CONTENT]
task.ready_time = taskPayloadHelper.get_ready_datetime()
task.save()
return {const.KEY_ID: task_id,
const.KEY_SOURCE: taskPayloadHelper[const.KEY_SOURCE],
const.KEY_DEST: taskPayloadHelper[const.KEY_DEST],
const.KEY_CONTENT: taskPayloadHelper[const.KEY_CONTENT],
const.KEY_READY_TIME: taskPayloadHelper[const.KEY_READY_TIME]}
def get_owner(owner_key):
query_set = TaskOwner.objects.filter(key=owner_key)
if query_set and (len(query_set) == 1):
return query_set[0]
else:
return None
def _get_json_from_datetime(obj):
dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime) else None
json_str = json.dumps(obj, default=dthandler)
json_str = json_str.replace('"', '')
json_str = _rreplace(json_str, "000", "Z")
return json_str
def _get_task_by_id(task_id):
if task_id:
task = None
try:
task = Task.objects.get(id=task_id)
except ObjectDoesNotExist:
task = None
return task
else:
return None
def _is_owner(task_owner, task):
if task and task_owner and isinstance(task, Task) and isinstance(task_owner, TaskOwner):
return (task_owner.key == task.owner.key)
else:
return False
def _rreplace(s, old, new):
offset = 0 - len(old)
remainder = s[:offset]
replace_array = s.split(remainder)
replace_confirm = replace_array[(len(replace_array) - 1)]
if replace_confirm == old:
return s[:-len(old)] + new
return s
| bsd-3-clause | -939,080,448,045,002,200 | 35.132075 | 92 | 0.620627 | false |
nss350/magPy | core/windowSelector.py | 1 | 17249 | """
Created on Thu Mar 24 08:18:04 2016
@author: npop
The window selector calculates which global windows to use
Calculates overlapping windows between given sites
This removes the burden on the upcoming processor
Upcoming processor can then simply get the data for the windows
And process them
"""
import os
from datetime import date, time, datetime, timedelta
# my classes
from spectrumReader import SpectrumReader
from windowMasker import WindowMasker
# utils
from utilsIO import *
from utilsWindow import *
class WindowSelector(object):
###################
### CONSTRUCTOR
##################
def __init__(self, proj, fs, decParams, winParams):
self.proj = proj
self.fs = fs
self.decParams = decParams
self.winParams = winParams
self.sites = []
# shared indices
self.sharedIndices = {}
# the masks to use for each site
self.siteMasks = {}
# weights to use for each site
self.siteWeights = {}
# the spec files for eac site at fs
self.siteSpecFolders = {}
self.siteSpecReaders = {}
# global index ranges for all of the spec files
self.siteSpecRanges = {}
# set of all global indices for each site
self.siteGlobalIndices = {}
self.prepend = "spectra"
# time constraints
# priority is datetimes
# then dates
# then times
self.datetimes = {}
self.dates = {}
self.times = {}
# final constraints saved in
self.datetimeConstraints = {}
# add a list for each decimation level
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.datetimes[iDec] = []
self.dates[iDec] = []
self.times[iDec] = []
self.datetimeConstraints[iDec] = []
###################
### GET FUNCTIONS
##################
def getSites(self):
return self.sites
def getSharedWindows(self):
return self.sharedIndices
def getSharedWindowsLevel(self, iDec):
return self.sharedIndices[iDec]
def getNumSharedWindows(self, iDec):
return len(self.sharedIndices[iDec])
def getWindowsForFreq(self, iDec, eIdx):
sharedIndices = self.getSharedWindowsLevel(iDec)
# now mask for the particular frequency - mask for each given site
for s in self.getSites():
for mask in self.getMasks()[s]:
# remove the masked windows from shared indices
sharedIndices = sharedIndices - mask.getMaskWindowsFreq(iDec, eIdx)
return sharedIndices
# do other helper, which calculates the number of non masked windows for the whole level
# this should significantly speed up calculation when constraints are applied
def getUnmaskedWindowsLevel(self, iDec):
indices = set()
evalFreq = self.getDecParams().getEvalFrequenciesForLevel(iDec)
for eIdx, eFreq in enumerate(evalFreq):
indices.update(self.getWindowsForFreq(iDec, eIdx))
return indices
def getSpecReaders(self):
return self.siteSpecReaders
def getSpecRanges(self):
return self.siteSpecRanges
def getGlobalIndices(self):
return self.siteGlobalIndices
def getSampleFreq(self):
return self.fs
def getPrepend(self):
return self.prepend
def getDecParams(self):
return self.decParams
def getWinParams(self):
return self.winParams
def getDatetimeConstraints(self):
self.calcDatetimeConstraints()
return self.datetimeConstraints
def getLevelDatetimeConstraints(self, iDec):
self.calcDatetimeConstraints()
return self.datetimeConstraints[iDec]
def getMasks(self):
return self.siteMasks
def getSpecReaderForWindow(self, site, iDec, iWin):
specRanges = self.getSpecRanges()[site][iDec]
specReaders = self.getSpecReaders()[site][iDec]
for sF in specRanges:
if iWin >= specRanges[sF][0] and iWin <= specRanges[sF][1]:
return sF, specReaders[sF]
# if here, no window found
self.printWarning("Shared window {}, decimation level {} does not appear in any files given the constraints applied".format(iWin, iDec))
return False, False
def getDataSize(self, iDec):
# return data size of first file
dataSize = -1
site = self.getSites()[0]
specReaders = self.getSpecReaders()[site][iDec]
for sF in specReaders:
return specReaders[sF].getDataSize()
###################
### SET FUNCTIONS
##################
def setSites(self, sites):
# first remove repeated sites
sitesSet = set(sites)
sites = list(sitesSet)
# now continue
self.sites = sites
for s in self.sites:
self.siteMasks[s] = []
self.siteSpecFolders[s] = []
self.siteSpecReaders[s] = {}
self.siteSpecRanges[s] = {}
# use sets to hold gIndices
# optimised to find intersections
self.siteGlobalIndices[s] = {}
# at the end, calculate global indices
self.calcGlobalIndices()
# this is the prepend for the spectra files
def setPrepend(prepend):
self.prepend = prepend
###################
### ADD CONSTRAINTS
##################
# for datetime constrains, dates take priority
def addDatetimeConstraint(self, start, stop):
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.addLevelDatetimeConstraint(start, stop, iDec)
def addLevelDatetimeConstraint(self, start, stop, iDec):
datetimeStart = datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
datetimeStop = datetime.strptime(stop, '%Y-%m-%d %H:%M:%S')
self.datetimes[iDec].append([datetimeStart, datetimeStop])
def addDateConstraint(self, dateC):
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.addLevelDateConstraint(dateC, iDec)
def addLevelDateConstraint(self, dateC, iDec):
datetimeC = datetime.strptime(dateC, '%Y-%m-%d').date()
self.dates[iDec].append(datetimeC)
def addTimeConstraint(self, start, stop):
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.addLevelTimeConstraint(start, stop, iDec)
def addLevelTimeConstraint(self, start, stop, iDec):
timeStart = datetime.strptime(start, '%H:%M:%S').time()
timeStop = datetime.strptime(stop, '%H:%M:%S').time()
self.times[iDec].append([timeStart, timeStop])
# this is a mask for with values for each evaluation frequency
def addWindowMask(self, site, maskName, **kwargs):
winMasker = WindowMasker(self.proj, site, self.getSampleFreq(), self.getDecParams(), self.getWinParams())
winMasker.readWindowFile(maskName)
self.siteMasks[site].append(winMasker)
###################
### GET SHARED GLOBAL WINDOWS
### THIS DOES NOT INCLUDE ANY MASKS WHICH MIGHT BE APPLIED
##################
def calcSharedWindows(self):
if len(self.getSites()) == 0:
self.printWarning("No sites given to Window Selector. At least one site needs to be given.")
return False
# calculate datetime constraints
self.calcDatetimeConstraints()
# initialise the sharedIndices with a set from one site
sites = self.getSites()
siteInit = sites[0]
numLevels = self.getDecParams().getNumLevels()
for iDec in xrange(0, numLevels):
self.sharedIndices[iDec] = self.getGlobalIndices()[siteInit][iDec]
# now for each decimation level
# calculate the shared ones
for iDec in xrange(0, numLevels):
for s in self.getSites():
self.sharedIndices[iDec] = self.sharedIndices[iDec].intersection(self.getGlobalIndices()[s][iDec])
# apply time constraints
# time constraints should be formulate as a set
# and then, find the intersection again
for iDec in xrange(0, numLevels):
constraints = self.getLevelDatetimeConstraints(iDec)
if len(constraints) != 0:
datetimeIndices = set()
for dC in constraints:
gIndexStart, firstWindowStart = datetime2gIndex(self.proj.getRefTime(), dC[0], self.decParams.getSampleFreqLevel(iDec), self.winParams.getWindowSize(iDec), self.winParams.getOverlap(iDec))
gIndexEnd, firstWindowEnd = datetime2gIndex(self.proj.getRefTime(), dC[1], self.decParams.getSampleFreqLevel(iDec), self.winParams.getWindowSize(iDec), self.winParams.getOverlap(iDec))
gIndexEnd = gIndexEnd - 1 # as the function returns the next window starting after time
if gIndexEnd < gIndexStart:
gIndexEnd = gIndexStart
datetimeIndices.update(range(gIndexStart, gIndexEnd))
self.printText("Decimation level = {}. Applying date constraint {} - {}, global index constraint {} - {}".format(iDec, dC[0], dC[1], gIndexStart, gIndexEnd))
self.sharedIndices[iDec] = self.sharedIndices[iDec].intersection(datetimeIndices)
###################
### GET WINDOW RANGES
##################
def calcGlobalIndices(self):
# get all the spectra files with the correct sampling frequency
for s in self.getSites():
timeFilesFs = self.proj.getSiteTimeFilesFs(s, self.getSampleFreq())
specFiles = self.proj.getSiteSpectraFiles(s)
specFilesFs = []
for sF in specFiles:
if sF in timeFilesFs:
specFilesFs.append(sF)
self.siteSpecFolders[s] = specFilesFs
# for each decimation level
# loop through each of the spectra folders
# and find the global indices ranges for each decimation level
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
# get the dictionaries ready
self.siteSpecReaders[s][iDec] = {}
self.siteSpecRanges[s][iDec] = {}
self.siteGlobalIndices[s][iDec] = set()
# loop through spectra folders and figure out global indices
for sF in self.siteSpecFolders[s]:
specReader = SpectrumReader(os.path.join(self.proj.getSpecDataPathSite(s), sF))
check = specReader.openBinaryForReading(self.getPrepend(), iDec)
# see if file exists
# if not, continue
if not check:
continue
self.siteSpecReaders[s][iDec][sF] = specReader
globalRange = specReader.getGlobalRange()
self.siteSpecRanges[s][iDec][sF] = globalRange
# and save set of global indices
self.siteGlobalIndices[s][iDec].update(range(globalRange[0], globalRange[1]+1))
# Datetime constraints: priority is datetime, then dates, then times
def calcDatetimeConstraints(self):
# calculate site dates if required
siteDates = self.calcSiteDates()
# datetime constraints are for each decimation level
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
# calculate date and time constraints for each level
# begin with the datetime constraints - these have highest priority
self.datetimeConstraints[iDec] = self.datetimes[iDec]
# check to see whether any date and time constraints
if len(self.dates[iDec]) == 0 and len(self.times[iDec]) == 0:
continue
dateConstraints = []
if len(self.dates[iDec]) != 0:
# apply time constraints only on specified days
dateConstraints = self.dates[iDec]
else:
dateConstraints = siteDates
# finally, add the time constraints to the dates
# otherwise add the whole day
dateAndTimeConstraints = []
if len(self.times[iDec]) == 0:
# add whole days
for dC in dateConstraints:
start = datetime.combine(dC, time(0,0,0))
stop = datetime.combine(dC, time(23,59,59))
dateAndTimeConstraints.append([start, stop])
else:
# add each time for each day
for tC in self.times[iDec]:
for dC in dateConstraints:
start = datetime.combine(dC, tC[0])
stop = datetime.combine(dC, tC[1])
# check if this goes over a day
if tC[1] < tC[0]:
# then need to increment the day
dCNext = dC + timedelta(days=1)
stop = datetime.combine(dCNext, tC[1])
# append to constraints
dateAndTimeConstraints.append([start, stop])
# finally, combine datetimes and dateAndTimeConstraints
self.datetimeConstraints[iDec] = self.datetimeConstraints[iDec] + dateAndTimeConstraints
self.datetimeConstraints[iDec] = sorted(self.datetimeConstraints[iDec])
def calcSiteDates(self):
starts = []
stops = []
for s in self.getSites():
starts.append(self.proj.getSiteStart(s))
stops.append(self.proj.getSiteStop(s))
# need all the dates between
d1 = max(starts).date()
d2 = min(stops).date()
if d1 > d2:
self.printWarning("A site passed to the window selector does not overlap with any other sites. There will be no shared windows")
return
# now with d2 > d1
siteDates = []
delta = d2 - d1
# + 1 because inclusive of stop and start days
for i in range(delta.days + 1):
siteDates.append(d1 + timedelta(days=i))
return siteDates
###################
### DEBUG
##################
def printInfo(self):
self.printText("####################")
self.printText("WINDOW SELECTOR INFO BEGIN")
self.printText("####################")
self.printText("Sampling frequency [Hz] = {:.6f}".format(self.getSampleFreq()))
self.printText("Sites = {}".format(", ".join(self.getSites())))
self.printText("####################")
self.printText("WINDOW SELECTOR INFO END")
self.printText("####################")
def printAllSiteInfo(self):
for s in self.getSites():
self.printSiteInfo(s)
def printSiteInfo(self, site):
self.printText("####################")
self.printText("WINDOW SELECTOR SITE INFO BEGIN")
self.printText("####################")
self.printText("Sampling frequency [Hz] = {:.6f}".format(self.getSampleFreq()))
self.printText("Site = {}".format(site))
self.printText("Site global index information")
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.printText("Decimation Level = {:d}".format(iDec))
ranges = self.getSpecRanges()
for sF in sorted(list(ranges[site][iDec].keys())):
startTime1, endTime1 = gIndex2datetime(ranges[site][iDec][sF][0], self.proj.getRefTime(), self.getSampleFreq()/self.decParams.getDecFactor(iDec), self.winParams.getWindowSize(iDec), self.winParams.getOverlap(iDec))
startTime2, endTime2 = gIndex2datetime(ranges[site][iDec][sF][1], self.proj.getRefTime(), self.getSampleFreq()/self.decParams.getDecFactor(iDec), self.winParams.getWindowSize(iDec), self.winParams.getOverlap(iDec))
self.printText(
"Measurement file = {}\ttime range = {} - {}\tGlobal Indices Range = {:d} - {:d}".format(
sF, startTime1, endTime2, ranges[site][iDec][sF][0], ranges[site][iDec][sF][1]
)
)
self.printText("####################")
self.printText("WINDOW SELECTOR SITE INFO END")
self.printText("####################")
def printSharedIndices(self):
self.printText("####################")
self.printText("WINDOW SELECTOR SHARED INDICES INFO BEGIN")
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.printText("####################")
self.printText("Decimation Level = {:d}".format(iDec))
self.printText("Number of shared windows = {:d}".format(self.getNumSharedWindows(iDec)))
self.printText("Shared Window Indices: {}".format(list2ranges(self.getSharedWindows()[iDec])))
self.printText("NOTE: These are the shared windows at each decimation level. Windows for each evaluation frequency might vary depending on masks")
self.printText("####################")
self.printText("WINDOW SELECTOR SHARED INDICES INFO END")
self.printText("####################")
def printDatetimeConstraints(self):
# calculate datetime constraints
self.calcDatetimeConstraints()
# print out the info
self.printText("####################")
self.printText("WINDOW SELECTOR CONSTRAINT INFO BEGIN")
self.printText("####################")
self.printText("Datetime constraints")
numLevels = self.decParams.getNumLevels()
for iDec in xrange(0, numLevels):
self.printText("Decimation Level = {:d}".format(iDec))
for d in self.getLevelDatetimeConstraints(iDec):
self.printText("Constraint {} - {}".format(d[0], d[1]))
self.printText("####################")
self.printText("WINDOW SELECTOR CONSTRAINT INFO END")
self.printText("####################")
def printWindowMasks(self):
self.printText("####################")
self.printText("WINDOW SELECTOR MASK INFO BEGIN")
self.printText("####################")
for s in self.getSites():
self.printText("Site = {}".format(s))
if len(self.getMasks()[s]) == 0:
self.printText("\tNo masks for this site")
else:
for mask in self.getMasks()[s]:
self.printText("\tMask = {}".format(mask.getMaskName()))
self.printText("####################")
self.printText("WINDOW SELECTOR MASK INFO END")
self.printText("####################")
def printWindowsForFrequency(self):
self.printText("####################")
self.printText("WINDOW SELECTOR FREQUENCY WINDOWS INFO BEGIN")
self.printText("####################")
for iDec in xrange(0, self.getDecParams().getNumLevels()):
evalFreq = self.getDecParams().getEvalFrequenciesForLevel(iDec)
unmaskedWindows = self.getNumSharedWindows(iDec)
for eIdx, eFreq in enumerate(evalFreq):
maskedWindows = self.getWindowsForFreq(iDec, eIdx)
self.printText("Evaluation frequency = {:.6f}, shared windows = {:d}, windows after masking = {:d}".format(eFreq, unmaskedWindows, len(maskedWindows)))
self.printText("{}".format(list2ranges(maskedWindows)))
self.printText("####################")
self.printText("WINDOW SELECTOR FREQUENCY WINDOWS INFO END")
self.printText("####################")
def printText(self, infoStr):
generalPrint("Window Selector Info", infoStr)
def printWarning(self, warnStr):
warningPrint("Window Selector Warning", warnStr)
| apache-2.0 | -2,665,271,720,281,101,300 | 35.856838 | 222 | 0.681663 | false |
amahabal/PySeqsee | farg/core/util.py | 1 | 3569 | # Copyright (C) 2011, 2012 Abhijit Mahabal
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this
# program. If not, see <http://www.gnu.org/licenses/>
"""General utilities."""
import colorsys
import random
def HSVToColorString(hue, saturation, value):
"""Convert from HSV to RGB color space."""
rgb = ('%02x' % (255.0 * x) for x in colorsys.hsv_to_rgb(hue, saturation, value))
return '#' + ''.join(rgb)
def Toss(x):
"""x is a number between 0 and 1. Returns true with probability x."""
return random.uniform(0, 1) <= x
def ChooseAboutN(n, choices):
"""Choose in a way that the expected number of choices is n.
Args:
n: The expected number of responses.
choices: an iterable of 2-tuples, where the second value is the weight.
An example to show how it works: let choices contain 5 things with weights 10, 20, 30
40, and 50 (thus summing to 150), and let n=3. Then we will keep the first item in the
output with probability 3 * 10/150 (i.e., 20%).
Returns:
A list of a roughly n-sized subset of choices.
"""
choices = list(choices) # Needed since we iterate twice over the iterable.
total = sum(w for _c, w in choices)
return [x[0] for x in choices if Toss(1.0 * n * x[1] / total)]
def WeightedChoice(choices):
"""Chooses an item, biased by weight.
Args:
choices: an iterable of 2-tuples, where the second value is the weight.
Returns:
An element of choices.
"""
choices = list(choices) # Needed since we iterate twice over the iterable.
total = sum(weight for item, weight in choices)
random_sum = random.uniform(0, total)
upto = 0
for item, weight in choices:
if upto + weight > random_sum:
return item
upto += weight
assert False, "Shouldn't get here"
def SelectWeightedByActivation(ltm, choices):
"""Given an ltm and nodes in ltm, chooses one biased by activation."""
choices = ((x, ltm.GetNode(content=x).GetActivation(current_time=0)) for x in choices)
return WeightedChoice(choices)
def UnweightedChoice(choices):
"""Chooses one item uniformly randomly from is an iterable."""
choices = list(choices) # Needed since we need to choose nth element and need length.
random_choice = random.uniform(0, len(choices))
return choices[int(random_choice)]
def WeightedShuffle(choices):
"""Shuffle items by weight.
Args:
choices: an iterable of 2-tuples, where the second value is the weight.
Yields:
Repeatedly yields first elements of the 2-tuple, resulting, when complete, in a shuffle.
"""
total = sum(weight for item, weight in choices)
while total > 0:
random_val = random.uniform(0, total)
upto = 0
for idx, choice in enumerate(choices):
item, weight = choice
if upto + weight > random_val:
total -= weight
choices = choices[0:idx] + choices[idx + 1:]
yield item
continue
upto += weight
def Squash(val, cap):
"""Use a sigmoidal squashing function to squash to 100."""
if val < 1:
return val
return cap * val / (cap - 1.0 + val)
| gpl-3.0 | -742,354,667,693,318,000 | 32.046296 | 93 | 0.692351 | false |
lbjay/cds-invenio | modules/bibformat/lib/bibformat_bfx_engine_config.py | 1 | 5843 | ## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""BibFormat BFX engine configuration."""
__revision__ = "$Id$"
import os
from invenio.config import CFG_ETCDIR
CFG_BIBFORMAT_BFX_TEMPLATES_PATH = "%s%sbibformat%sformat_templates" % (CFG_ETCDIR, os.sep, os.sep)
CFG_BIBFORMAT_BFX_FORMAT_TEMPLATE_EXTENSION = "bfx"
CFG_BIBFORMAT_BFX_ELEMENT_NAMESPACE = "http://cdsware.cern.ch/invenio/"
CFG_BIBFORMAT_BFX_LABEL_DEFINITIONS = {
#record is a reserved keyword, don't use it
#define one or more addresses for each name or zero if you plan to define them later
'controlfield': [r'/???'],
'datafield': [r'/?????'],
'datafield.subfield': [r'datafield/?'],
'recid': [r'/001'],
'article_id': [],
'language': [r'/041__/a'],
'title': [r'/245__/a'],
'subtitle': [r'/245__/b'],
'secondary_title': [r'/773__/p'],
'first_author': [r'/100__/a'],
'author': [r'/100__/a',
r'/700__/a'],
'author.surname': [r'author#(?P<value>.*),[ ]*(.*)'],
'author.names': [r'author#(.*),[ ]*(?P<value>.*)'],
'abstract': [r'/520__/a'],
'publisher': [r'/260__/b'],
'publisher_location': [r'/260__/a'],
'issn': [r'/022__/a'],
'doi': [r'/773__/a'],
'journal_name_long': [r'/222__/a',
r'/210__/a',
r'/773__/p',
r'/909C4/p'],
'journal_name_short': [r'/210__/a',
r'/773__/p',
r'/909C4/p'],
'journal_name': [r'/773__/p',
r'/909C4/p'],
'journal_volume': [r'/773__/v',
r'/909C4/v'],
'journal_issue': [r'/773__/n'],
'pages': [r'/773__/c',
r'/909C4/c'],
'first_page': [r'/773__/c#(?P<value>\d*)-(\d*)',
r'/909C4/c#(?P<value>\d*)-(\d*)'],
'last_page': [r'/773__/c#(\d*)-(?P<value>\d*)',
r'/909C4/c#(\d*)-(?P<value>\d*)'],
'date': [r'/260__/c'],
'year': [r'/773__/y#(.*)(?P<value>\d\d\d\d).*',
r'/260__/c#(.*)(?P<value>\d\d\d\d).*',
r'/925__/a#(.*)(?P<value>\d\d\d\d).*',
r'/909C4/y'],
'doc_type': [r'/980__/a'],
'doc_status': [r'/980__/c'],
'uri': [r'/8564_/u',
r'/8564_/q'],
'subject': [r'/65017/a'],
'keyword': [r'/6531_/a'],
'day': [],
'month': [],
'creation_date': [],
'reference': []
}
CFG_BIBFORMAT_BFX_ERROR_MESSAGES = \
{
'ERR_BFX_TEMPLATE_REF_NO_NAME' : 'Error: Missing attribute "name" in TEMPLATE_REF.',
'ERR_BFX_TEMPLATE_NOT_FOUND' : 'Error: Template %s not found.',
'ERR_BFX_ELEMENT_NO_NAME' : 'Error: Missing attribute "name" in ELEMENT.',
'ERR_BFX_FIELD_NO_NAME' : 'Error: Missing attribute "name" in FIELD.',
'ERR_BFX_LOOP_NO_OBJECT' : 'Error: Missing attribute "object" in LOOP.',
'ERR_BFX_NO_SUCH_FIELD' : 'Error: Field %s is not defined',
'ERR_BFX_IF_NO_NAME' : 'Error: Missing attrbute "name" in IF.',
'ERR_BFX_TEXT_NO_VALUE' : 'Error: Missing attribute "value" in TEXT.',
'ERR_BFX_INVALID_RE' : 'Error: Invalid regular expression: %s',
'ERR_BFX_INVALID_OPERATOR_NAME' : 'Error: Name %s is not recognised as a valid operator name.',
'ERR_BFX_INVALID_DISPLAY_TYPE' : 'Error: Invalid display type. Must be one of: value, tag, ind1, ind2, code; received: %s',
'ERR_BFX_IF_WRONG_SYNTAX' : 'Error: Invalid syntax of IF statement.',
'ERR_BFX_DUPLICATE_NAME' : 'Error: Duplicate name: %s.',
'ERR_BFX_TEMPLATE_NO_NAME' : 'Error: No name defined for the template.',
'ERR_BFX_NO_TEMPLATES_FOUND' : 'Error: No templates found in the document.',
'ERR_BFX_TOO_MANY_TEMPLATES' : 'Error: More than one templates found in the document. No format found.'
}
CFG_BIBFORMAT_BFX_WARNING_MESSAGES = \
{
'WRN_BFX_TEMPLATE_NO_DESCRIPTION' : 'Warning: No description entered for the template.',
'WRN_BFX_TEMPLATE_NO_CONTENT' : 'Warning: No content type specified for the template. Using default: text/xml.',
'WRN_BFX_NO_FORMAT_FOUND' : 'Warning: No format found. Will look for a default template.'
}
| gpl-2.0 | -8,086,134,669,331,546,000 | 49.808696 | 146 | 0.479719 | false |
jantman/cobbler | newtests/cli/imports/test_vmware/vmware_import_test.py | 1 | 1218 | import os
import sys
import unittest
from cobbler import utils
from newtests.cli.imports.import_base import CobblerImportTest
from newtests.cli.imports.import_base import create_import_func
class Test_VMWare_Imports(CobblerImportTest):
"""
Tests imports of various distros
"""
pass
distros = [
{"name":"vmware_esx_4.0_u1-x86_64", "desc":"VMware ESX 4.0 update1", "path":"/vagrant/distros/vmware_esx_4.0_u1_208167_x86_64"},
{"name":"vmware_esx_4.0_u2-x86_64", "desc":"VMware ESX 4.0 update2", "path":"/vagrant/distros/vmware_esx_4.0_u2_261974_x86_64"},
{"name":"vmware_esxi4.1-x86_64", "desc":"VMware ESXi 4.1", "path":"/vagrant/distros/vmware_esxi4.1_348481_x86_64"},
{"name":"vmware_esxi5.0-x86_64", "desc":"VMware ESXi 5.0", "path":"/vagrant/distros/vmware_esxi5.0_469512_x86_64"},
{"name":"vmware_esxi5.1-x86_64", "desc":"VMware ESXi 5.1", "path":"/vagrant/distros/vmware_esxi5.1_799733_x86_64"},
]
for i in range(0,len(distros)):
test_func = create_import_func(distros[i])
test_func.__name__ = 'test_vmware_%02d_import_%s' % (i,distros[i]["name"])
test_func.__doc__ = "Import of %s" % distros[i]["desc"]
setattr(Test_VMWare_Imports, test_func.__name__, test_func)
del test_func
| gpl-2.0 | 6,504,870,934,439,871,000 | 42.5 | 129 | 0.683087 | false |
smurfix/gevent-socketio | socketio/transports.py | 1 | 11396 | import gevent
import urllib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
from geventwebsocket import WebSocketError
from gevent.queue import Empty
class BaseTransport(object):
"""Base class for all transports. Mostly wraps handler class functions."""
def __init__(self, handler, config, **kwargs):
"""Base transport class.
:param config: dict Should contain the config keys, like
``heartbeat_interval``, ``heartbeat_timeout`` and
``close_timeout``.
"""
self.content_type = ("Content-Type", "text/plain; charset=UTF-8")
self.headers = [
("Access-Control-Allow-Origin", "*"),
("Access-Control-Allow-Credentials", "true"),
("Access-Control-Allow-Methods", "POST, GET, OPTIONS"),
("Access-Control-Max-Age", 3600),
]
self.handler = handler
self.config = config
def write(self, data=""):
# Gevent v 0.13
if hasattr(self.handler, 'response_headers_list'):
if 'Content-Length' not in self.handler.response_headers_list:
self.handler.response_headers.append(('Content-Length', len(data)))
self.handler.response_headers_list.append('Content-Length')
elif not hasattr(self.handler, 'provided_content_length') or self.handler.provided_content_length is None:
# Gevent 1.0bX
l = len(data)
self.handler.provided_content_length = l
self.handler.response_headers.append(('Content-Length', l))
self.handler.write_smart(data)
def start_response(self, status, headers, **kwargs):
if "Content-Type" not in [x[0] for x in headers]:
headers.append(self.content_type)
headers.extend(self.headers)
self.handler.start_response(status, headers, **kwargs)
class XHRPollingTransport(BaseTransport):
def __init__(self, *args, **kwargs):
super(XHRPollingTransport, self).__init__(*args, **kwargs)
def options(self):
self.start_response("200 OK", ())
self.write()
return []
def get(self, socket):
socket.heartbeat()
heartbeat_interval = self.config['heartbeat_interval']
payload = self.get_messages_payload(socket, timeout=heartbeat_interval)
if not payload:
payload = "8::" # NOOP
self.start_response("200 OK", [])
self.write(payload)
def _request_body(self):
return self.handler.wsgi_input.readline()
def post(self, socket):
for message in self.decode_payload(self._request_body()):
socket.put_server_msg(message)
self.start_response("200 OK", [
("Connection", "close"),
("Content-Type", "text/plain")
])
self.write("1")
def get_messages_payload(self, socket, timeout=None):
"""This will fetch the messages from the Socket's queue, and if
there are many messes, pack multiple messages in one payload and return
"""
try:
msgs = socket.get_multiple_client_msgs(timeout=timeout)
data = self.encode_payload(msgs)
except Empty:
data = ""
return data
def encode_payload(self, messages):
"""Encode list of messages. Expects messages to be unicode.
``messages`` - List of raw messages to encode, if necessary
"""
if not messages or messages[0] is None:
return ''
if len(messages) == 1:
return messages[0].encode('utf-8')
payload = u''.join([(u'\ufffd%d\ufffd%s' % (len(p), p))
for p in messages if p is not None])
# FIXME: why is it so that we must filter None from here ? How
# is it even possible that a None gets in there ?
return payload.encode('utf-8')
def decode_payload(self, payload):
"""This function can extract multiple messages from one HTTP payload.
Some times, the XHR/JSONP/.. transports can pack more than one message
on a single packet. They are encoding following the WebSocket
semantics, which need to be reproduced here to unwrap the messages.
The semantics are:
\ufffd + [length as a string] + \ufffd + [payload as a unicode string]
This function returns a list of messages, even though there is only
one.
Inspired by socket.io/lib/transports/http.js
"""
payload = payload.decode('utf-8')
if payload[0] == u"\ufffd":
ret = []
while len(payload) != 0:
len_end = payload.find(u"\ufffd", 1)
length = int(payload[1:len_end])
msg_start = len_end + 1
msg_end = length + msg_start
message = payload[msg_start:msg_end]
ret.append(message)
payload = payload[msg_end:]
return ret
return [payload]
def do_exchange(self, socket, request_method):
if not socket.connection_established:
# Runs only the first time we get a Socket opening
self.start_response("200 OK", [
("Connection", "close"),
])
self.write("1::") # 'connect' packet
return
elif request_method in ("GET", "POST", "OPTIONS"):
return getattr(self, request_method.lower())(socket)
else:
raise Exception("No support for the method: " + request_method)
class JSONPolling(XHRPollingTransport):
def __init__(self, handler, config):
super(JSONPolling, self).__init__(handler, config)
self.content_type = ("Content-Type", "text/javascript; charset=UTF-8")
def _request_body(self):
data = super(JSONPolling, self)._request_body()
# resolve %20%3F's, take out wrapping d="...", etc..
data = urllib.unquote_plus(data)[3:-1] \
.replace(r'\"', '"') \
.replace(r"\\", "\\")
# For some reason, in case of multiple messages passed in one
# query, IE7 sends it escaped, not utf-8 encoded. This dirty
# hack handled it
if data[0] == "\\":
data = data.decode("unicode_escape").encode("utf-8")
return data
def write(self, data):
"""Just quote out stuff before sending it out"""
args = urlparse.parse_qs(self.handler.environ.get("QUERY_STRING"))
if "i" in args:
i = args["i"]
else:
i = "0"
# TODO: don't we need to quote this data in here ?
super(JSONPolling, self).write("io.j[%s]('%s');" % (i, data))
class XHRMultipartTransport(XHRPollingTransport):
def __init__(self, handler):
super(JSONPolling, self).__init__(handler)
self.content_type = (
"Content-Type",
"multipart/x-mixed-replace;boundary=\"socketio\""
)
def do_exchange(self, socket, request_method):
if request_method == "GET":
return self.get(socket)
elif request_method == "POST":
return self.post(socket)
else:
raise Exception("No support for such method: " + request_method)
def get(self, socket):
header = "Content-Type: text/plain; charset=UTF-8\r\n\r\n"
self.start_response("200 OK", [("Connection", "keep-alive")])
self.write_multipart("--socketio\r\n")
self.write_multipart(header)
self.write_multipart(str(socket.sessid) + "\r\n")
self.write_multipart("--socketio\r\n")
def chunk():
while True:
payload = self.get_messages_payload(socket)
if not payload:
# That would mean the call to Queue.get() returned Empty,
# so it was in fact killed, since we pass no timeout=..
return
# See below
else:
try:
self.write_multipart(header)
self.write_multipart(payload)
self.write_multipart("--socketio\r\n")
except socket.error:
# The client might try to reconnect, even with a socket
# error, so let's just let it go, and not kill the
# socket completely. Other processes will ensure
# we kill everything if the user expires the timeouts.
#
# WARN: this means that this payload is LOST, unless we
# decide to re-inject it into the queue.
return
socket.spawn(chunk)
class WebsocketTransport(BaseTransport):
def do_exchange(self, socket, request_method):
websocket = self.handler.environ['wsgi.websocket']
websocket.send("1::") # 'connect' packet
def send_into_ws():
while True:
message = socket.get_client_msg()
if message is None:
break
try:
websocket.send(message)
except (WebSocketError, TypeError):
# We can't send a message on the socket
# it is dead, let the other sockets know
socket.disconnect()
def read_from_ws():
while True:
message = websocket.receive()
if message is None:
break
else:
if message is not None:
socket.put_server_msg(message)
socket.spawn(send_into_ws)
socket.spawn(read_from_ws)
class FlashSocketTransport(WebsocketTransport):
pass
class HTMLFileTransport(XHRPollingTransport):
"""Not tested at all!"""
def __init__(self, handler, config):
super(HTMLFileTransport, self).__init__(handler, config)
self.content_type = ("Content-Type", "text/html")
def write_packed(self, data):
self.write("<script>_('%s');</script>" % data)
def write(self, data):
l = 1024 * 5
super(HTMLFileTransport, self).write("%d\r\n%s%s\r\n" % (l, data, " " * (l - len(data))))
def do_exchange(self, socket, request_method):
return super(HTMLFileTransport, self).do_exchange(socket, request_method)
def get(self, socket):
self.start_response("200 OK", [
("Connection", "keep-alive"),
("Content-Type", "text/html"),
("Transfer-Encoding", "chunked"),
])
self.write("<html><body><script>var _ = function (msg) { parent.s._(msg, document); };</script>")
self.write_packed("1::") # 'connect' packet
def chunk():
while True:
payload = self.get_messages_payload(socket)
if not payload:
# That would mean the call to Queue.get() returned Empty,
# so it was in fact killed, since we pass no timeout=..
return
else:
try:
self.write_packed(payload)
except socket.error:
# See comments for XHRMultipart
return
socket.spawn(chunk)
| bsd-3-clause | 116,012,888,092,994,820 | 34.6125 | 114 | 0.550807 | false |
bjornwallner/proq2-server | apps/modeller9v8/examples/commands/saxs.py | 1 | 1049 | # script to calculate SAXS spectrum of a model
from modeller import *
from modeller.scripts import complete_pdb
env=environ()
env.libs.topology.read(file='$(LIB)/top_heav.lib')
env.libs.parameters.read(file='$(LIB)/par.lib')
log.verbose()
### Calculate SAXS spectrum of correct structure
mdl = complete_pdb(env, '1cuk')
atmsel = selection(mdl)
saxs = saxsdata(env)
saxs.ini_saxs(atmsel=atmsel, s_min= 0.0, s_max=0.5, maxs=100, nmesh=100,
natomtyp=15, represtyp='heav',
filename='$(LIB)/formfactors-int_tab_solvation.lib',
wswitch = 'uniform', s_low=0.0, s_hi=0.5, s_hybrid=0.0,
spaceflag= 'real', use_lookup=False)
### append SAXS data to energy structure
mdl.env.edat.saxsdata.append(saxs)
mdl.saxs_intens(saxsd=saxs, filename='i_s.txt', fitflag=False)
saxs.saxs_read(filename='i_s.txt')
# create some restraints
mdl.restraints.make(atmsel, restraint_type='stereo', spline_on_site=False)
edat = atmsel.energy()
atmsel.randomize_xyz(deviation=2.)
edat = atmsel.energy()
mdl.saxs_intens(saxsd=saxs, filename='i_s_fit.txt',fitflag=True)
| gpl-3.0 | -4,310,941,405,752,410,000 | 36.464286 | 74 | 0.731173 | false |
TurboTurtle/sos | sos/collector/__init__.py | 1 | 54982 | # Copyright Red Hat 2020, Jake Hunsaker <[email protected]>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
import fnmatch
import inspect
import json
import os
import random
import re
import string
import socket
import shutil
import subprocess
import sys
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from getpass import getpass
from pathlib import Path
from pipes import quote
from textwrap import fill
from sos.cleaner import SoSCleaner
from sos.collector.sosnode import SosNode
from sos.collector.exceptions import ControlPersistUnsupportedException
from sos.options import ClusterOption
from sos.component import SoSComponent
from sos import __version__
COLLECTOR_CONFIG_DIR = '/etc/sos/groups.d'
class SoSCollector(SoSComponent):
"""Collector is the formerly standalone sos-collector project, brought into
sos natively in 4.0
It is meant to collect reports from an arbitrary number of remote nodes,
as well as the localhost, at the same time. These nodes may be either user
defined, defined by some clustering software, or both.
"""
desc = 'Collect an sos report from multiple nodes simultaneously'
arg_defaults = {
'all_logs': False,
'alloptions': False,
'allow_system_changes': False,
'become_root': False,
'case_id': False,
'chroot': 'auto',
'clean': False,
'cluster_options': [],
'cluster_type': None,
'domains': [],
'enable_plugins': [],
'encrypt_key': '',
'encrypt_pass': '',
'group': None,
'image': '',
'jobs': 4,
'keywords': [],
'label': '',
'list_options': False,
'log_size': 0,
'map_file': '/etc/sos/cleaner/default_mapping',
'master': '',
'nodes': [],
'no_env_vars': False,
'no_local': False,
'nopasswd_sudo': False,
'no_pkg_check': False,
'no_update': False,
'only_plugins': [],
'password': False,
'password_per_node': False,
'plugin_options': [],
'plugin_timeout': None,
'preset': '',
'save_group': '',
'since': '',
'skip_commands': [],
'skip_files': [],
'skip_plugins': [],
'sos_opt_line': '',
'ssh_key': '',
'ssh_port': 22,
'ssh_user': 'root',
'timeout': 600,
'verify': False,
'usernames': [],
'upload': False,
'upload_url': None,
'upload_directory': None,
'upload_user': None,
'upload_pass': None,
}
def __init__(self, parser, parsed_args, cmdline_args):
super(SoSCollector, self).__init__(parser, parsed_args, cmdline_args)
os.umask(0o77)
self.client_list = []
self.node_list = []
self.master = False
self.retrieved = 0
self.cluster = None
self.cluster_type = None
# add manifest section for collect
self.manifest.components.add_section('collect')
# shorthand reference
self.collect_md = self.manifest.components.collect
# placeholders in manifest organization
self.collect_md.add_field('cluster_type', 'none')
self.collect_md.add_list('node_list')
# add a place to set/get the sudo password, but do not expose it via
# the CLI, because security is a thing
setattr(self.opts, 'sudo_pw', '')
# get the local hostname and addresses to filter from results later
self.hostname = socket.gethostname()
try:
self.ip_addrs = list(set([
i[4][0] for i in socket.getaddrinfo(socket.gethostname(), None)
]))
except Exception:
# this is almost always a DNS issue with reverse resolution
# set a safe fallback and log the issue
self.log_error(
"Could not get a list of IP addresses from this hostnamne. "
"This may indicate a DNS issue in your environment"
)
self.ip_addrs = ['127.0.0.1']
self._parse_options()
self.clusters = self.load_clusters()
if not self.opts.list_options:
try:
self.parse_node_strings()
self.parse_cluster_options()
self._check_for_control_persist()
self.log_debug('Executing %s' % ' '.join(s for s in sys.argv))
self.log_debug("Found cluster profiles: %s"
% self.clusters.keys())
self.verify_cluster_options()
except KeyboardInterrupt:
self.exit('Exiting on user cancel', 130)
except Exception:
raise
def load_clusters(self):
"""Loads all cluster types supported by the local installation for
future comparison and/or use
"""
import sos.collector.clusters
package = sos.collector.clusters
supported_clusters = {}
clusters = self._load_modules(package, 'clusters')
for cluster in clusters:
supported_clusters[cluster[0]] = cluster[1](self.commons)
return supported_clusters
def _load_modules(self, package, submod):
"""Helper to import cluster and host types"""
modules = []
for path in package.__path__:
if os.path.isdir(path):
modules.extend(self._find_modules_in_path(path, submod))
return modules
def _find_modules_in_path(self, path, modulename):
"""Given a path and a module name, find everything that can be imported
and then import it
path - the filesystem path of the package
modulename - the name of the module in the package
E.G. a path of 'clusters', and a modulename of 'ovirt' equates to
importing sos.collector.clusters.ovirt
"""
modules = []
if os.path.exists(path):
for pyfile in sorted(os.listdir(path)):
if not pyfile.endswith('.py'):
continue
if '__' in pyfile:
continue
fname, ext = os.path.splitext(pyfile)
modname = 'sos.collector.%s.%s' % (modulename, fname)
modules.extend(self._import_modules(modname))
return modules
def _import_modules(self, modname):
"""Import and return all found classes in a module"""
mod_short_name = modname.split('.')[2]
module = __import__(modname, globals(), locals(), [mod_short_name])
modules = inspect.getmembers(module, inspect.isclass)
for mod in modules:
if mod[0] in ('SosHost', 'Cluster'):
modules.remove(mod)
return modules
def parse_node_strings(self):
"""Parses the given --nodes option(s) to properly format the regex
list that we use. We cannot blindly split on ',' chars since it is a
valid regex character, so we need to scan along the given strings and
check at each comma if we should use the preceeding string by itself
or not, based on if there is a valid regex at that index.
"""
if not self.opts.nodes:
return
nodes = []
if not isinstance(self.opts.nodes, list):
self.opts.nodes = [self.opts.nodes]
for node in self.opts.nodes:
idxs = [i for i, m in enumerate(node) if m == ',']
idxs.append(len(node))
start = 0
pos = 0
for idx in idxs:
try:
pos = idx
reg = node[start:idx]
re.compile(re.escape(reg))
# make sure we aren't splitting a regex value
if '[' in reg and ']' not in reg:
continue
nodes.append(reg.lstrip(','))
start = idx
except re.error:
continue
if pos != len(node):
nodes.append(node[pos+1:])
self.opts.nodes = nodes
@classmethod
def add_parser_options(cls, parser):
# Add the supported report passthru options to a group for logical
# grouping in --help display
sos_grp = parser.add_argument_group(
'Report Passthru Options',
'These options control how report is run on nodes'
)
sos_grp.add_argument('-a', '--alloptions', action='store_true',
help='Enable all sos report options')
sos_grp.add_argument('--all-logs', action='store_true',
help='Collect logs regardless of size')
sos_grp.add_argument('--allow-system-changes', action='store_true',
default=False,
help=('Allow sosreport to run commands that may '
'alter system state'))
sos_grp.add_argument('--chroot', default='',
choices=['auto', 'always', 'never'],
help="chroot executed commands to SYSROOT")
sos_grp.add_argument('-e', '--enable-plugins', action="extend",
help='Enable specific plugins for sosreport')
sos_grp.add_argument('-k', '--plugin-options', action="extend",
help='Plugin option as plugname.option=value')
sos_grp.add_argument('--log-size', default=0, type=int,
help='Limit the size of individual logs (in MiB)')
sos_grp.add_argument('-n', '--skip-plugins', action="extend",
help='Skip these plugins')
sos_grp.add_argument('-o', '--only-plugins', action="extend",
default=[],
help='Run these plugins only')
sos_grp.add_argument('--no-env-vars', action='store_true',
default=False,
help='Do not collect env vars in sosreports')
sos_grp.add_argument('--plugin-timeout', type=int, default=None,
help='Set the global plugin timeout value')
sos_grp.add_argument('--since', default=None,
help=('Escapes archived files older than date. '
'This will also affect --all-logs. '
'Format: YYYYMMDD[HHMMSS]'))
sos_grp.add_argument('--skip-commands', default=[], action='extend',
dest='skip_commands',
help="do not execute these commands")
sos_grp.add_argument('--skip-files', default=[], action='extend',
dest='skip_files',
help="do not collect these files")
sos_grp.add_argument('--verify', action="store_true",
help='perform pkg verification during collection')
# Add the collector specific options to a separate group to keep
# everything organized
collect_grp = parser.add_argument_group(
'Collector Options',
'These options control how collect runs locally'
)
collect_grp.add_argument('-b', '--become', action='store_true',
dest='become_root',
help='Become root on the remote nodes')
collect_grp.add_argument('--case-id', help='Specify case number')
collect_grp.add_argument('--cluster-type',
help='Specify a type of cluster profile')
collect_grp.add_argument('-c', '--cluster-option',
dest='cluster_options', action='append',
help=('Specify a cluster options used by a '
'profile and takes the form of '
'cluster.option=value'))
collect_grp.add_argument('--group', default=None,
help='Use a predefined group JSON file')
collect_grp.add_argument('--save-group', default='',
help='Save a resulting node list to a group')
collect_grp.add_argument('--image',
help=('Specify the container image to use for'
' containerized hosts.'))
collect_grp.add_argument('-i', '--ssh-key', help='Specify an ssh key')
collect_grp.add_argument('-j', '--jobs', default=4, type=int,
help='Number of concurrent nodes to collect')
collect_grp.add_argument('-l', '--list-options', action="store_true",
help='List options available for profiles')
collect_grp.add_argument('--label',
help='Assign a label to the archives')
collect_grp.add_argument('--master', help='Specify a master node')
collect_grp.add_argument('--nopasswd-sudo', action='store_true',
help='Use passwordless sudo on nodes')
collect_grp.add_argument('--nodes', action="append",
help=('Provide a comma delimited list of '
'nodes, or a regex to match against'))
collect_grp.add_argument('--no-pkg-check', action='store_true',
help=('Do not run package checks. Use this '
'with --cluster-type if there are rpm '
'or apt issues on node'))
collect_grp.add_argument('--no-local', action='store_true',
help='Do not collect a report from localhost')
collect_grp.add_argument('-p', '--ssh-port', type=int,
help='Specify SSH port for all nodes')
collect_grp.add_argument('--password', action='store_true',
default=False,
help='Prompt for user password for nodes')
collect_grp.add_argument('--password-per-node', action='store_true',
default=False,
help='Prompt for password for each node')
collect_grp.add_argument('--preset', default='', required=False,
help='Specify a sos preset to use')
collect_grp.add_argument('--sos-cmd', dest='sos_opt_line',
help=('Manually specify the commandline '
'for sos report on nodes'))
collect_grp.add_argument('--ssh-user',
help='Specify an SSH user. Default root')
collect_grp.add_argument('--timeout', type=int, required=False,
help='Timeout for sosreport on each node.')
collect_grp.add_argument("--upload", action="store_true",
default=False,
help="Upload archive to a policy-default "
"location")
collect_grp.add_argument("--upload-url", default=None,
help="Upload the archive to specified server")
collect_grp.add_argument("--upload-directory", default=None,
help="Specify upload directory for archive")
collect_grp.add_argument("--upload-user", default=None,
help="Username to authenticate with")
collect_grp.add_argument("--upload-pass", default=None,
help="Password to authenticate with")
# Group the cleaner options together
cleaner_grp = parser.add_argument_group(
'Cleaner/Masking Options',
'These options control how data obfuscation is performed'
)
cleaner_grp.add_argument('--clean', '--cleaner', '--mask',
dest='clean',
default=False, action='store_true',
help='Obfuscate sensistive information')
cleaner_grp.add_argument('--domains', dest='domains', default=[],
action='extend',
help='Additional domain names to obfuscate')
cleaner_grp.add_argument('--keywords', action='extend', default=[],
dest='keywords',
help='List of keywords to obfuscate')
cleaner_grp.add_argument('--no-update', action='store_true',
default=False, dest='no_update',
help='Do not update the default cleaner map')
cleaner_grp.add_argument('--map', dest='map_file',
default='/etc/sos/cleaner/default_mapping',
help=('Provide a previously generated mapping'
' file for obfuscation'))
cleaner_grp.add_argument('--usernames', dest='usernames', default=[],
action='extend',
help='List of usernames to obfuscate')
def _check_for_control_persist(self):
"""Checks to see if the local system supported SSH ControlPersist.
ControlPersist allows OpenSSH to keep a single open connection to a
remote host rather than building a new session each time. This is the
same feature that Ansible uses in place of paramiko, which we have a
need to drop in sos-collector.
This check relies on feedback from the ssh binary. The command being
run should always generate stderr output, but depending on what that
output reads we can determine if ControlPersist is supported or not.
For our purposes, a host that does not support ControlPersist is not
able to run sos-collector.
Returns
True if ControlPersist is supported, else raise Exception.
"""
ssh_cmd = ['ssh', '-o', 'ControlPersist']
cmd = subprocess.Popen(ssh_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = cmd.communicate()
err = err.decode('utf-8')
if 'Bad configuration option' in err or 'Usage:' in err:
raise ControlPersistUnsupportedException
return True
def exit(self, msg, error=1):
"""Used to safely terminate if sos-collector encounters an error"""
self.log_error(msg)
try:
self.close_all_connections()
except Exception:
pass
self.cleanup()
sys.exit(error)
def _parse_options(self):
"""From commandline options, defaults, etc... build a set of commons
to hand to other collector mechanisms
"""
self.commons = {
'cmdlineopts': self.opts,
'need_sudo': True if self.opts.ssh_user != 'root' else False,
'tmpdir': self.tmpdir,
'hostlen': len(self.opts.master) or len(self.hostname),
'policy': self.policy
}
def parse_cluster_options(self):
opts = []
if not isinstance(self.opts.cluster_options, list):
self.opts.cluster_options = [self.opts.cluster_options]
if self.opts.cluster_options:
for option in self.opts.cluster_options:
cluster = option.split('.')[0]
name = option.split('.')[1].split('=')[0]
try:
# there are no instances currently where any cluster option
# should contain a legitimate space.
value = option.split('=')[1].split()[0]
except IndexError:
# conversion to boolean is handled during validation
value = 'True'
opts.append(
ClusterOption(name, value, value.__class__, cluster)
)
self.opts.cluster_options = opts
def verify_cluster_options(self):
"""Verify that requested cluster options exist"""
if self.opts.cluster_options:
for opt in self.opts.cluster_options:
match = False
for clust in self.clusters:
for option in self.clusters[clust].options:
if opt.name == option.name and opt.cluster == clust:
match = True
opt.value = self._validate_option(option, opt)
break
if not match:
self.exit('Unknown cluster option provided: %s.%s'
% (opt.cluster, opt.name))
def _validate_option(self, default, cli):
"""Checks to make sure that the option given on the CLI is valid.
Valid in this sense means that the type of value given matches what a
cluster profile expects (str for str, bool for bool, etc).
For bool options, this will also convert the string equivalent to an
actual boolean value
"""
if not default.opt_type == bool:
if not default.opt_type == cli.opt_type:
msg = "Invalid option type for %s. Expected %s got %s"
self.exit(msg % (cli.name, default.opt_type, cli.opt_type))
return cli.value
else:
val = cli.value.lower()
if val not in ['true', 'on', 'yes', 'false', 'off', 'no']:
msg = ("Invalid value for %s. Accepted values are: 'true', "
"'false', 'on', 'off', 'yes', 'no'.")
self.exit(msg % cli.name)
else:
if val in ['true', 'on', 'yes']:
return True
else:
return False
def log_info(self, msg):
"""Log info messages to both console and log file"""
self.soslog.info(msg)
def log_warn(self, msg):
"""Log warn messages to both console and log file"""
self.soslog.warn(msg)
def log_error(self, msg):
"""Log error messages to both console and log file"""
self.soslog.error(msg)
def log_debug(self, msg):
"""Log debug message to both console and log file"""
caller = inspect.stack()[1][3]
msg = '[sos_collector:%s] %s' % (caller, msg)
self.soslog.debug(msg)
def list_options(self):
"""Display options for available clusters"""
sys.stdout.write('\nThe following clusters are supported by this '
'installation\n')
sys.stdout.write('Use the short name with --cluster-type or cluster '
'options (-c)\n\n')
for cluster in sorted(self.clusters):
sys.stdout.write(" {:<15} {:30}\n".format(
cluster,
self.clusters[cluster].cluster_name))
_opts = {}
for _cluster in self.clusters:
for opt in self.clusters[_cluster].options:
if opt.name not in _opts.keys():
_opts[opt.name] = opt
else:
for clust in opt.cluster:
if clust not in _opts[opt.name].cluster:
_opts[opt.name].cluster.append(clust)
sys.stdout.write('\nThe following cluster options are available:\n\n')
sys.stdout.write(' {:25} {:15} {:<10} {:10} {:<}\n'.format(
'Cluster',
'Option Name',
'Type',
'Default',
'Description'
))
for _opt in sorted(_opts, key=lambda x: _opts[x].cluster):
opt = _opts[_opt]
optln = ' {:25} {:15} {:<10} {:<10} {:<10}\n'.format(
', '.join(c for c in sorted(opt.cluster)),
opt.name,
opt.opt_type.__name__,
str(opt.value),
opt.description)
sys.stdout.write(optln)
sys.stdout.write('\nOptions take the form of cluster.name=value'
'\nE.G. "ovirt.no-database=True" or '
'"pacemaker.offline=False"\n')
def delete_tmp_dir(self):
"""Removes the temp directory and all collected sosreports"""
shutil.rmtree(self.tmpdir)
def _get_archive_name(self):
"""Generates a name for the tarball archive"""
nstr = 'sos-collector'
if self.opts.label:
nstr += '-%s' % self.opts.label
if self.opts.case_id:
nstr += '-%s' % self.opts.case_id
dt = datetime.strftime(datetime.now(), '%Y-%m-%d')
try:
string.lowercase = string.ascii_lowercase
except NameError:
pass
rand = ''.join(random.choice(string.lowercase) for x in range(5))
return '%s-%s-%s' % (nstr, dt, rand)
def _get_archive_path(self):
"""Returns the path, including filename, of the tarball we build
that contains the collected sosreports
"""
self.arc_name = self._get_archive_name()
compr = 'gz'
return self.tmpdir + '/' + self.arc_name + '.tar.' + compr
def _fmt_msg(self, msg):
width = 80
_fmt = ''
for line in msg.splitlines():
_fmt = _fmt + fill(line, width, replace_whitespace=False) + '\n'
return _fmt
def _load_group_config(self):
"""
Attempts to load the host group specified on the command line.
Host groups are defined via JSON files, typically saved under
/etc/sos/groups.d/, although users can specify a full filepath
on the commandline to point to one existing anywhere on the system
Host groups define a list of nodes and/or regexes and optionally the
master and cluster-type options.
"""
grp = self.opts.group
paths = [
grp,
os.path.join(Path.home(), '.config/sos/groups.d/%s' % grp),
os.path.join(COLLECTOR_CONFIG_DIR, grp)
]
fname = None
for path in paths:
if os.path.exists(path):
fname = path
break
if fname is None:
raise OSError("no group definition for %s" % grp)
self.log_debug("Loading host group %s" % fname)
with open(fname, 'r') as hf:
_group = json.load(hf)
for key in ['master', 'cluster_type']:
if _group[key]:
self.log_debug("Setting option '%s' to '%s' per host group"
% (key, _group[key]))
setattr(self.opts, key, _group[key])
if _group['nodes']:
self.log_debug("Adding %s to node list" % _group['nodes'])
self.opts.nodes.extend(_group['nodes'])
def write_host_group(self):
"""
Saves the results of this run of sos-collector to a host group file
on the system so it can be used later on.
The host group will save the options master, cluster_type, and nodes
as determined by sos-collector prior to execution of sosreports.
"""
cfg = {
'name': self.opts.save_group,
'master': self.opts.master,
'cluster_type': self.cluster.cluster_type[0],
'nodes': [n for n in self.node_list]
}
if os.getuid() != 0:
group_path = os.path.join(Path.home(), '.config/sos/groups.d')
# create the subdir within the user's home directory
os.makedirs(group_path, exist_ok=True)
else:
group_path = COLLECTOR_CONFIG_DIR
fname = os.path.join(group_path, cfg['name'])
with open(fname, 'w') as hf:
json.dump(cfg, hf)
os.chmod(fname, 0o644)
return fname
def prep(self):
self.policy.set_commons(self.commons)
if (not self.opts.password and not
self.opts.password_per_node):
self.log_debug('password not specified, assuming SSH keys')
msg = ('sos-collector ASSUMES that SSH keys are installed on all '
'nodes unless the --password option is provided.\n')
self.ui_log.info(self._fmt_msg(msg))
if ((self.opts.password or (self.opts.password_per_node and
self.opts.master))
and not self.opts.batch):
self.log_debug('password specified, not using SSH keys')
msg = ('Provide the SSH password for user %s: '
% self.opts.ssh_user)
self.opts.password = getpass(prompt=msg)
if ((self.commons['need_sudo'] and not self.opts.nopasswd_sudo)
and not self.opts.batch):
if not self.opts.password and not self.opts.password_per_node:
self.log_debug('non-root user specified, will request '
'sudo password')
msg = ('A non-root user has been provided. Provide sudo '
'password for %s on remote nodes: '
% self.opts.ssh_user)
self.opts.sudo_pw = getpass(prompt=msg)
else:
if not self.opts.nopasswd_sudo:
self.opts.sudo_pw = self.opts.password
if self.opts.become_root:
if not self.opts.ssh_user == 'root':
if self.opts.batch:
msg = ("Cannot become root without obtaining root "
"password. Do not use --batch if you need "
"to become root remotely.")
self.exit(msg, 1)
self.log_debug('non-root user asking to become root remotely')
msg = ('User %s will attempt to become root. '
'Provide root password: ' % self.opts.ssh_user)
self.opts.root_password = getpass(prompt=msg)
self.commons['need_sudo'] = False
else:
self.log_info('Option to become root but ssh user is root.'
' Ignoring request to change user on node')
self.opts.become_root = False
if self.opts.group:
try:
self._load_group_config()
except Exception as err:
self.log_error("Could not load specified group %s: %s"
% (self.opts.group, err))
self._exit(1)
self.policy.pre_work()
if self.opts.master:
self.connect_to_master()
self.opts.no_local = True
else:
try:
can_run_local = True
local_sudo = None
skip_local_msg = (
"Local sos report generation forcibly skipped due "
"to lack of root privileges.\nEither use --nopasswd-sudo, "
"run as root, or do not use --batch so that you will be "
"prompted for a password\n"
)
if (not self.opts.no_local and (os.getuid() != 0 and not
self.opts.nopasswd_sudo)):
if not self.opts.batch:
msg = ("Enter local sudo password to generate local "
"sos report: ")
local_sudo = getpass(msg)
if local_sudo == '':
self.ui_log.info(skip_local_msg)
can_run_local = False
self.opts.no_local = True
local_sudo = None
else:
self.ui_log.info(skip_local_msg)
can_run_local = False
self.opts.no_local = True
self.master = SosNode('localhost', self.commons,
local_sudo=local_sudo,
load_facts=can_run_local)
except Exception as err:
self.log_debug("Unable to determine local installation: %s" %
err)
self.exit('Unable to determine local installation. Use the '
'--no-local option if localhost should not be '
'included.\nAborting...\n', 1)
self.collect_md.add_field('master', self.master.address)
self.collect_md.add_section('nodes')
self.collect_md.nodes.add_section(self.master.address)
self.master.set_node_manifest(getattr(self.collect_md.nodes,
self.master.address))
if self.opts.cluster_type:
if self.opts.cluster_type == 'none':
self.cluster = self.clusters['jbon']
else:
self.cluster = self.clusters[self.opts.cluster_type]
self.cluster_type = self.opts.cluster_type
self.cluster.master = self.master
else:
self.determine_cluster()
if self.cluster is None and not self.opts.nodes:
msg = ('Cluster type could not be determined and no nodes provided'
'\nAborting...')
self.exit(msg, 1)
elif self.cluster is None and self.opts.nodes:
self.log_info("Cluster type could not be determined, but --nodes "
"is provided. Attempting to continue using JBON "
"cluster type and the node list")
self.cluster = self.clusters['jbon']
self.cluster_type = 'none'
self.collect_md.add_field('cluster_type', self.cluster_type)
if self.cluster:
self.master.cluster = self.cluster
self.cluster.setup()
if self.cluster.cluster_ssh_key:
if not self.opts.ssh_key:
self.log_debug("Updating SSH key to %s per cluster"
% self.cluster.cluster_ssh_key)
self.opts.ssh_key = self.cluster.cluster_ssh_key
self.get_nodes()
if self.opts.save_group:
gname = self.opts.save_group
try:
fname = self.write_host_group()
self.log_info("Wrote group '%s' to %s" % (gname, fname))
except Exception as err:
self.log_error("Could not save group %s: %s" % (gname, err))
def display_nodes(self):
"""Prints a list of nodes to collect from, if available. If no nodes
are discovered or provided, abort.
"""
self.ui_log.info('')
if not self.node_list and not self.master.connected:
self.exit('No nodes were detected, or nodes do not have sos '
'installed.\nAborting...')
self.ui_log.info('The following is a list of nodes to collect from:')
if self.master.connected and self.master.hostname is not None:
if not (self.master.local and self.opts.no_local):
self.ui_log.info('\t%-*s' % (self.commons['hostlen'],
self.master.hostname))
for node in sorted(self.node_list):
self.ui_log.info("\t%-*s" % (self.commons['hostlen'], node))
self.ui_log.info('')
if not self.opts.batch:
try:
input("\nPress ENTER to continue with these nodes, or press "
"CTRL-C to quit\n")
self.ui_log.info("")
except KeyboardInterrupt:
self.exit("Exiting on user cancel", 130)
def configure_sos_cmd(self):
"""Configures the sosreport command that is run on the nodes"""
self.sos_cmd = 'sosreport --batch '
if self.opts.sos_opt_line:
filt = ['&', '|', '>', '<', ';']
if any(f in self.opts.sos_opt_line for f in filt):
self.log_warn('Possible shell script found in provided sos '
'command. Ignoring --sos-opt-line entirely.')
self.opts.sos_opt_line = None
else:
self.sos_cmd = '%s %s' % (
self.sos_cmd, quote(self.opts.sos_opt_line))
self.log_debug("User specified manual sosreport command. "
"Command set to %s" % self.sos_cmd)
return True
sos_opts = []
if self.opts.case_id:
sos_opts.append('--case-id=%s' % (quote(self.opts.case_id)))
if self.opts.alloptions:
sos_opts.append('--alloptions')
if self.opts.all_logs:
sos_opts.append('--all-logs')
if self.opts.verify:
sos_opts.append('--verify')
if self.opts.log_size:
sos_opts.append(('--log-size=%s' % quote(str(self.opts.log_size))))
if self.opts.sysroot:
sos_opts.append('-s %s' % quote(self.opts.sysroot))
if self.opts.chroot:
sos_opts.append('-c %s' % quote(self.opts.chroot))
if self.opts.compression_type != 'auto':
sos_opts.append('-z %s' % (quote(self.opts.compression_type)))
self.sos_cmd = self.sos_cmd + ' '.join(sos_opts)
self.log_debug("Initial sos cmd set to %s" % self.sos_cmd)
self.commons['sos_cmd'] = self.sos_cmd
self.collect_md.add_field('initial_sos_cmd', self.sos_cmd)
def connect_to_master(self):
"""If run with --master, we will run cluster checks again that
instead of the localhost.
"""
try:
self.master = SosNode(self.opts.master, self.commons)
self.ui_log.info('Connected to %s, determining cluster type...'
% self.opts.master)
except Exception as e:
self.log_debug('Failed to connect to master: %s' % e)
self.exit('Could not connect to master node. Aborting...', 1)
def determine_cluster(self):
"""This sets the cluster type and loads that cluster's cluster.
If no cluster type is matched and no list of nodes is provided by
the user, then we abort.
If a list of nodes is given, this is not run, however the cluster
can still be run if the user sets a --cluster-type manually
"""
checks = list(self.clusters.values())
for cluster in self.clusters.values():
checks.remove(cluster)
cluster.master = self.master
if cluster.check_enabled():
cname = cluster.__class__.__name__
self.log_debug("Installation matches %s, checking for layered "
"profiles" % cname)
for remaining in checks:
if issubclass(remaining.__class__, cluster.__class__):
rname = remaining.__class__.__name__
self.log_debug("Layered profile %s found. "
"Checking installation"
% rname)
remaining.master = self.master
if remaining.check_enabled():
self.log_debug("Installation matches both layered "
"profile %s and base profile %s, "
"setting cluster type to layered "
"profile" % (rname, cname))
cluster = remaining
break
self.cluster = cluster
self.cluster_type = cluster.name()
self.commons['cluster'] = self.cluster
self.ui_log.info(
'Cluster type set to %s' % self.cluster_type)
break
def get_nodes_from_cluster(self):
"""Collects the list of nodes from the determined cluster cluster"""
if self.cluster_type:
nodes = self.cluster._get_nodes()
self.log_debug('Node list: %s' % nodes)
return nodes
return []
def reduce_node_list(self):
"""Reduce duplicate entries of the localhost and/or master node
if applicable"""
if (self.hostname in self.node_list and self.opts.no_local):
self.node_list.remove(self.hostname)
for i in self.ip_addrs:
if i in self.node_list:
self.node_list.remove(i)
# remove the master node from the list, since we already have
# an open session to it.
if self.master:
for n in self.node_list:
if n == self.master.hostname or n == self.opts.master:
self.node_list.remove(n)
self.node_list = list(set(n for n in self.node_list if n))
self.log_debug('Node list reduced to %s' % self.node_list)
self.collect_md.add_list('node_list', self.node_list)
def compare_node_to_regex(self, node):
"""Compares a discovered node name to a provided list of nodes from
the user. If there is not a match, the node is removed from the list"""
for regex in self.opts.nodes:
try:
regex = fnmatch.translate(regex)
if re.match(regex, node):
return True
except re.error as err:
msg = 'Error comparing %s to provided node regex %s: %s'
self.log_debug(msg % (node, regex, err))
return False
def get_nodes(self):
""" Sets the list of nodes to collect sosreports from """
if not self.master and not self.cluster:
msg = ('Could not determine a cluster type and no list of '
'nodes or master node was provided.\nAborting...'
)
self.exit(msg)
try:
nodes = self.get_nodes_from_cluster()
if self.opts.nodes:
for node in nodes:
if self.compare_node_to_regex(node):
self.node_list.append(node)
else:
self.node_list = nodes
except Exception as e:
self.log_debug("Error parsing node list: %s" % e)
self.log_debug('Setting node list to --nodes option')
self.node_list = self.opts.nodes
for node in self.node_list:
if any(i in node for i in ('*', '\\', '?', '(', ')', '/')):
self.node_list.remove(node)
# force add any non-regex node strings from nodes option
if self.opts.nodes:
for node in self.opts.nodes:
if any(i in node for i in '*\\?()/[]'):
continue
if node not in self.node_list:
self.log_debug("Force adding %s to node list" % node)
self.node_list.append(node)
if not self.master:
host = self.hostname.split('.')[0]
# trust the local hostname before the node report from cluster
for node in self.node_list:
if host == node.split('.')[0]:
self.node_list.remove(node)
self.node_list.append(self.hostname)
self.reduce_node_list()
try:
self.commons['hostlen'] = len(max(self.node_list, key=len))
except (TypeError, ValueError):
self.commons['hostlen'] = len(self.opts.master)
def _connect_to_node(self, node):
"""Try to connect to the node, and if we can add to the client list to
run sosreport on
Positional arguments
node - a tuple specifying (address, password). If no password, set
to None
"""
try:
client = SosNode(node[0], self.commons, password=node[1])
client.set_cluster(self.cluster)
if client.connected:
self.client_list.append(client)
self.collect_md.nodes.add_section(node[0])
client.set_node_manifest(getattr(self.collect_md.nodes,
node[0]))
else:
client.close_ssh_session()
except Exception:
pass
def intro(self):
"""Print the intro message and prompts for a case ID if one is not
provided on the command line
"""
disclaimer = ("""\
This utility is used to collect sosreports from multiple \
nodes simultaneously. It uses OpenSSH's ControlPersist feature \
to connect to nodes and run commands remotely. If your system \
installation of OpenSSH is older than 5.6, please upgrade.
An archive of sosreport tarballs collected from the nodes will be \
generated in %s and may be provided to an appropriate support representative.
The generated archive may contain data considered sensitive \
and its content should be reviewed by the originating \
organization before being passed to any third party.
No configuration changes will be made to the system running \
this utility or remote systems that it connects to.
""")
self.ui_log.info("\nsos-collector (version %s)\n" % __version__)
intro_msg = self._fmt_msg(disclaimer % self.tmpdir)
self.ui_log.info(intro_msg)
prompt = "\nPress ENTER to continue, or CTRL-C to quit\n"
if not self.opts.batch:
try:
input(prompt)
self.ui_log.info("")
except KeyboardInterrupt:
self.exit("Exiting on user cancel", 130)
if not self.opts.case_id and not self.opts.batch:
msg = 'Please enter the case id you are collecting reports for: '
self.opts.case_id = input(msg)
def execute(self):
if self.opts.list_options:
self.list_options()
self.cleanup()
raise SystemExit
self.intro()
self.configure_sos_cmd()
self.prep()
self.display_nodes()
self.archive_name = self._get_archive_name()
self.setup_archive(name=self.archive_name)
self.archive_path = self.archive.get_archive_path()
self.archive.makedirs('sos_logs', 0o755)
self.collect()
self.cleanup()
def collect(self):
""" For each node, start a collection thread and then tar all
collected sosreports """
if self.master.connected:
self.client_list.append(self.master)
self.ui_log.info("\nConnecting to nodes...")
filters = [self.master.address, self.master.hostname]
nodes = [(n, None) for n in self.node_list if n not in filters]
if self.opts.password_per_node:
_nodes = []
for node in nodes:
msg = ("Please enter the password for %s@%s: "
% (self.opts.ssh_user, node[0]))
node_pwd = getpass(msg)
_nodes.append((node[0], node_pwd))
nodes = _nodes
try:
pool = ThreadPoolExecutor(self.opts.jobs)
pool.map(self._connect_to_node, nodes, chunksize=1)
pool.shutdown(wait=True)
if (self.opts.no_local and
self.client_list[0].address == 'localhost'):
self.client_list.pop(0)
self.report_num = len(self.client_list)
if self.report_num == 0:
self.exit("No nodes connected. Aborting...")
elif self.report_num == 1:
if self.client_list[0].address == 'localhost':
self.exit(
"Collection would only gather from localhost due to "
"failure to either enumerate or connect to cluster "
"nodes. Assuming single collection from localhost is "
"not desired.\n"
"Aborting..."
)
self.ui_log.info("\nBeginning collection of sosreports from %s "
"nodes, collecting a maximum of %s "
"concurrently\n"
% (self.report_num, self.opts.jobs))
pool = ThreadPoolExecutor(self.opts.jobs)
pool.map(self._collect, self.client_list, chunksize=1)
pool.shutdown(wait=True)
except KeyboardInterrupt:
self.log_error('Exiting on user cancel\n')
os._exit(130)
except Exception as err:
self.log_error('Could not connect to nodes: %s' % err)
os._exit(1)
if hasattr(self.cluster, 'run_extra_cmd'):
self.ui_log.info('Collecting additional data from master node...')
files = self.cluster._run_extra_cmd()
if files:
self.master.collect_extra_cmd(files)
msg = '\nSuccessfully captured %s of %s sosreports'
self.log_info(msg % (self.retrieved, self.report_num))
self.close_all_connections()
if self.retrieved > 0:
arc_name = self.create_cluster_archive()
else:
msg = 'No sosreports were collected, nothing to archive...'
self.exit(msg, 1)
if self.opts.upload and self.get_upload_url():
try:
self.policy.upload_archive(arc_name)
self.ui_log.info("Uploaded archive successfully")
except Exception as err:
self.ui_log.error("Upload attempt failed: %s" % err)
def _collect(self, client):
"""Runs sosreport on each node"""
try:
if not client.local:
client.sosreport()
else:
if not self.opts.no_local:
client.sosreport()
if client.retrieved:
self.retrieved += 1
except Exception as err:
self.log_error("Error running sosreport: %s" % err)
def close_all_connections(self):
"""Close all ssh sessions for nodes"""
for client in self.client_list:
self.log_debug('Closing SSH connection to %s' % client.address)
client.close_ssh_session()
def create_cluster_archive(self):
"""Calls for creation of tar archive then cleans up the temporary
files created by sos-collector"""
map_file = None
arc_paths = []
for host in self.client_list:
for fname in host.file_list:
arc_paths.append(fname)
do_clean = False
if self.opts.clean:
hook_commons = {
'policy': self.policy,
'tmpdir': self.tmpdir,
'sys_tmp': self.sys_tmp,
'options': self.opts,
'manifest': self.manifest
}
try:
self.ui_log.info('')
cleaner = SoSCleaner(in_place=True,
hook_commons=hook_commons)
cleaner.set_target_path(self.tmpdir)
map_file, arc_paths = cleaner.execute()
do_clean = True
except Exception as err:
self.ui_log.error("ERROR: unable to obfuscate reports: %s"
% err)
try:
self.log_info('Creating archive of sosreports...')
for fname in arc_paths:
dest = fname.split('/')[-1]
if do_clean:
dest = cleaner.obfuscate_string(dest)
name = os.path.join(self.tmpdir, fname)
self.archive.add_file(name, dest=dest)
if map_file:
# regenerate the checksum for the obfuscated archive
checksum = cleaner.get_new_checksum(fname)
if checksum:
name = os.path.join('checksums', fname.split('/')[-1])
name += '.sha256'
self.archive.add_string(checksum, name)
self.archive.add_file(self.sos_log_file,
dest=os.path.join('sos_logs', 'sos.log'))
self.archive.add_file(self.sos_ui_log_file,
dest=os.path.join('sos_logs', 'ui.log'))
if self.manifest is not None:
self.archive.add_final_manifest_data(
self.opts.compression_type
)
if do_clean:
_dir = os.path.join(self.tmpdir, self.archive._name)
cleaner.obfuscate_file(
os.path.join(_dir, 'sos_logs', 'sos.log'),
short_name='sos.log'
)
cleaner.obfuscate_file(
os.path.join(_dir, 'sos_logs', 'ui.log'),
short_name='ui.log'
)
cleaner.obfuscate_file(
os.path.join(_dir, 'sos_reports', 'manifest.json'),
short_name='manifest.json'
)
arc_name = self.archive.finalize(self.opts.compression_type)
final_name = os.path.join(self.sys_tmp, os.path.basename(arc_name))
if do_clean:
final_name = cleaner.obfuscate_string(
final_name.replace('.tar', '-obfuscated.tar')
)
os.rename(arc_name, final_name)
if map_file:
# rename the map file to match the collector archive name, not
# the temp dir it was constructed in
map_name = cleaner.obfuscate_string(
os.path.join(self.sys_tmp,
"%s_private_map" % self.archive_name)
)
os.rename(map_file, map_name)
self.ui_log.info("A mapping of obfuscated elements is "
"available at\n\t%s" % map_name)
self.soslog.info('Archive created as %s' % final_name)
self.ui_log.info('\nThe following archive has been created. '
'Please provide it to your support team.')
self.ui_log.info('\t%s\n' % final_name)
return final_name
except Exception as err:
msg = ("Could not finalize archive: %s\n\nData may still be "
"available uncompressed at %s" % (err, self.archive_path))
self.exit(msg, 2)
| gpl-2.0 | -8,600,018,873,667,082,000 | 42.258851 | 79 | 0.524299 | false |
0x1001/BabyMonitor | app/add_finger_print.py | 1 | 1251 | def add_finger_print(file_path):
import wave
import analyzer
import storage
import recording
import config
import os
a = analyzer.Analyzer()
s = storage.Storage(config.Config("../config.json"))
waveFile = wave.open(file_path)
waveData = waveFile.readframes(waveFile.getnframes())
rec = recording.Recording(waveData, waveFile.getframerate(), waveFile.getsampwidth(), waveFile.getnchannels())
finger_print = a.finger_print(rec)
finger_print.set_name(os.path.basename(file_path))
s.add_finger_print(finger_print)
if __name__ == "__main__":
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str, help="Path to wave file", dest="file")
parser.add_argument("-d", "--dir", type=str, help="Path to folder with wave files", dest="dir")
args = parser.parse_args()
if args.dir is not None:
waves = [os.path.join(args.dir, file_name) for file_name in os.listdir(args.dir) if file_name.endswith(".wav")]
elif args.file is not None:
waves = [args.file]
else:
parser.print_help()
waves = []
for wave in waves:
print "Processing: " + wave
add_finger_print(wave) | gpl-2.0 | -7,902,132,046,651,514,000 | 28.809524 | 119 | 0.643485 | false |
benschneider/sideprojects1 | density_matrix/fft_filter_test.py | 1 | 1683 | from scipy.signal.signaltools import _next_regular
from matplotlib import pyplot as plt
from numpy.fft import fft, rfftn, irfftn, fftshift # for real data can take advantage of symmetries
import numpy as np
import codecs, json
# from scipy.signal import remez, freqz, lfilter
# lpf = remez(21, [0, 0.2, 0.3, 0.5], [1.0, 0.0])
# w, h = freqz(lpf)
#
# t = np.arange(0, 1.0, 1.00/1000)
# # s = np.sin(2*np.pi*100*t) + np.sin(2*np.pi*200*t)
# noise_amp = 5.0
# s = np.sin(2*np.pi*100*t) + np.sin(2*np.pi*200*t) # +noise_amp * np.random.randn(len(t))
#
# # sout = lfilter(lpf, 1, s)
# # plt.figure(1)
# # plt.plot(s[:100])
# # plt.plot(sout[:100])
#
# ft = fftshift(fft(s)/len(s))
# # ft2 = np.fft.fft(sout[40:])/len(sout)
# # plt.plot(20.0*np.log10(np.abs(ft2)))
# # # plt.plot((np.abs(ft)))
# # plt.show()
#
# shap0 = np.array(s.shape) - 1
# fshape = [_next_regular(int(d)) for d in shap0] # padding to optimal size for FFTPACK
# ft11 = fftshift(rfftn(s, fshape)/fshape)
#
# plt.figure(3)
# # plt.plot(w/(2*np.pi), abs(h))
# # plt.plot(20.0*np.log10(np.abs(ft11)))
# plt.plot(np.abs(ft11))
#
# plt.figure(4)
# #plt.plot(20.0*np.log10(np.abs(ft)))
# plt.plot(np.abs(ft))
# plt.show()
# a = np.random.rand(5)
# b = np.random.rand(5) + 0.1*a
# a = np.arange(10).reshape(2,5) # a 2 by 5 array
# b = a.tolist() # nested lists with same data, indices
# file_path = "blub.json" ## your path variable
# def save_json(file_path, stuff):
# json.dump(stuff, codecs.open(file_path, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=4)
#
# def load_json(file_path):
# obj_text = codecs.open(file_path, 'r', encoding='utf-8').read()
# return json.loads(obj_text)
| gpl-2.0 | -3,275,720,611,350,555,000 | 32 | 118 | 0.623886 | false |
KhronosGroup/COLLADA-CTS | Core/Gui/Dialog/FSettingsScrolledSizer.py | 1 | 3448 | # Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
import wx
import wx.lib.scrolledpanel
from Core.Gui.Dialog.FSettingSizer import *
from Core.Common.FConstants import *
class FSettingsScrolledSizer(wx.BoxSizer):
def __init__(self, parent, testProcedure, applicationMap, settings = None,
editable = True):
wx.BoxSizer.__init__(self, wx.VERTICAL)
self.__settingSizers = []
title = wx.StaticText(parent, wx.ID_ANY, "Test Settings")
scrolledPanel = wx.lib.scrolledpanel.ScrolledPanel(parent, wx.ID_ANY,
style=wx.SUNKEN_BORDER)
self.Add(title, 0, wx.ALIGN_CENTER | wx.ALL, 5)
self.Add(scrolledPanel, 1, wx.EXPAND | wx.TOP, 5)
topSizer = wx.BoxSizer(wx.VERTICAL)
for step, app, op, setting in testProcedure.GetStepGenerator():
sizer = FSettingSizer(scrolledPanel, applicationMap, editable,
self.__OnUpdateList)
if (settings == None):
default = testProcedure.GetGlobalSetting(step)
else:
default = settings[step]
if (op == VALIDATE and op not in OPS_NEEDING_APP):
sizer.SetOperation(">>", op, ">>" + op)
sizer.Enable(False)
else:
sizer.SetOperation(app, op, "[" + app + "]" + op,
testProcedure.GetSettingManager(), default)
topSizer.Add(sizer, 0, wx.EXPAND | wx.ALL, 5)
self.__settingSizers.append(sizer)
padSizer = wx.BoxSizer(wx.VERTICAL)
padSizer.Add(topSizer, 1, wx.EXPAND | wx.ALL, 5)
scrolledPanel.SetSizer(padSizer)
scrolledPanel.SetAutoLayout(True)
scrolledPanel.SetupScrolling(scroll_x = False)
def IsSettingOk(self):
for settingSizer in self.__settingSizers:
if (settingSizer.GetOperation() == VALIDATE): continue
if (settingSizer.GetSettingName() == None):
return False
return True
def GetSettings(self):
settings = []
for settingSizer in self.__settingSizers:
settings.append(settingSizer.GetSetting())
return settings
def __OnUpdateList(self):
for sizer in self.__settingSizers:
sizer.UpdateList()
| mit | 8,386,046,570,576,897,000 | 48.271429 | 466 | 0.639211 | false |
YtvwlD/metecli | metecli/cmdline.py | 1 | 1638 | from . import setup, account, audits, drinks, config, show_version
from .utils import test_terminal_utf8
import argparse
import logging
log = logging.getLogger(__name__)
def setup_logging(log_level: str) -> None:
numeric_log_level = getattr(logging, log_level.upper(), None)
if not numeric_log_level:
raise Exception("Invalid log level: {}".format(log_level))
logging.basicConfig(level=numeric_log_level)
def do() -> None:
parser = argparse.ArgumentParser(
description="A command line interface to mete."
)
subparsers = parser.add_subparsers(help="commands")
setup.setup_cmdline(subparsers)
account.setup_cmdline(subparsers)
audits.setup_cmdline(subparsers)
drinks.setup_cmdline(subparsers)
config.setup_cmdline(subparsers)
show_version.setup_cmdline(subparsers)
parser.add_argument(
"--log_level", type=str, help="{debug, info, warning, error, critical}"
)
parser.add_argument(
"--config_path", type=str,
help="the path where to place the config file(s)",
)
parser.add_argument(
"--config_name", type=str, help="the name of the config to use",
)
args = parser.parse_args()
if args.log_level:
setup_logging(args.log_level)
log.debug("Parsed args: %s", args)
if(not hasattr(args, "func")):
print("You must provide a topic. Please see --help.")
return
conf = config.Config(path=args.config_path, name=args.config_name)
if not args.log_level:
setup_logging(conf["display"]["log_level"])
test_terminal_utf8()
args.func(args, conf)
| gpl-3.0 | -4,544,778,988,285,004,300 | 29.333333 | 79 | 0.653846 | false |
dpgaspar/AutoAnt | autoant/console.py | 1 | 2092 | #!/usr/bin/env python
import argparse
import logging
import datetime
from autoant import AutoAnt
from .providers import providers
from .version import VERSION_STRING
log_level = {'INFO':logging.INFO,
'DEBUG':logging.DEBUG,
'CRITICAL':logging.CRITICAL,
'ERROR':logging.ERROR}
def get_log_level(str_level):
return log_level.get(str_level,logging.INFO)
parser = argparse.ArgumentParser(description='AutoAnt')
parser.add_argument('-c', '--config', type=str, default='config.json',
help='Your config JSON file')
parser.add_argument('-l', '--loglevel', type=str, default='INFO',
help='Adjust log level accepts {0}'.format(log_level.keys()))
parser.add_argument('-p', '--providers', action='store_true', help='Shows available providers')
parser.add_argument('-s', '--state', action='store_true', help='List the state of producers')
parser.add_argument('-d', '--describe', action='store_true', help='Shows a summary of the config')
parser.add_argument('-v', '--version', action='store_true', help='Shows AutoAnt version')
parser.add_argument('-m', '--measure', action='store_true', help='Will measure run time')
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
logging.getLogger().setLevel(get_log_level(args.loglevel))
logging.getLogger('paramiko').setLevel(logging.ERROR)
log = logging.getLogger(__name__)
def main():
"""Entry-point function."""
if vars(args).get('providers'):
print(providers)
elif vars(args).get('state'):
aa = AutoAnt(args.config)
aa.list()
elif vars(args).get('describe'):
aa = AutoAnt(args.config)
aa.describe()
elif vars(args).get('version'):
print("AutoAnt {0}".format(VERSION_STRING))
else:
aa = AutoAnt(args.config)
t1 = datetime.datetime.now()
aa.run()
t2 = datetime.datetime.now()
if vars(args).get('measure'):
log.info("Time to Process {0}".format(t2 - t1))
if __name__ == '__main__':
main()
| bsd-3-clause | 133,878,767,866,146,460 | 33.866667 | 98 | 0.641969 | false |
3upperm2n/DIGITS | tools/create_db.py | 2 | 20860 | #!/usr/bin/env python
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import sys
import os.path
import time
import argparse
import logging
import re
import shutil
import math
import random
from collections import Counter
import threading
import Queue
try:
import digits
except ImportError:
# Add path for DIGITS package
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import digits.config
digits.config.load_config()
from digits import utils, log
import numpy as np
import PIL.Image
import lmdb
import h5py
from cStringIO import StringIO
# must call digits.config.load_config() before caffe to set the path
import caffe.io
try:
import caffe_pb2
except ImportError:
# See issue #32
from caffe.proto import caffe_pb2
logger = logging.getLogger('digits.tools.create_db')
class Error(Exception):
pass
class BadInputFileError(Error):
"""Input file is empty"""
pass
class ParseLineError(Error):
"""Failed to parse a line in the input file"""
pass
class LoadError(Error):
"""Failed to load image[s]"""
pass
class WriteError(Error):
"""Failed to write image[s]"""
pass
def create_db(input_file, output_file,
image_width, image_height, image_channels,
backend,
resize_mode = None,
image_folder = None,
shuffle = True,
mean_files = None,
**kwargs):
"""
Create a database of images from a list of image paths
Raises exceptions on errors
Arguments:
input_file -- a textfile containing labelled image paths
output_file -- the location to store the created database
image_width -- image resize width
image_height -- image resize height
image_channels -- image channels
backend -- the DB format (lmdb/hdf5)
Keyword arguments:
resize_mode -- passed to utils.image.resize_image()
shuffle -- if True, shuffle the images in the list before creating
mean_files -- a list of mean files to save
"""
### Validate arguments
if not os.path.exists(input_file):
raise ValueError('input_file does not exist')
if os.path.exists(output_file):
logger.warning('removing existing database')
if os.path.isdir(output_file):
shutil.rmtree(output_file, ignore_errors=True)
else:
os.remove(output_file)
if image_width <= 0:
raise ValueError('invalid image width')
if image_height <= 0:
raise ValueError('invalid image height')
if image_channels not in [1,3]:
raise ValueError('invalid number of channels')
if resize_mode not in [None, 'crop', 'squash', 'fill', 'half_crop']:
raise ValueError('invalid resize_mode')
if image_folder is not None and not os.path.exists(image_folder):
raise ValueError('image_folder does not exist')
if mean_files:
for mean_file in mean_files:
if os.path.exists(mean_file):
logger.warning('overwriting existing mean file "%s"!' % mean_file)
else:
dirname = os.path.dirname(mean_file)
if not dirname:
dirname = '.'
if not os.path.exists(dirname):
raise ValueError('Cannot save mean file at "%s"' % mean_file)
compute_mean = bool(mean_files)
### Load lines from input_file into a load_queue
load_queue = Queue.Queue()
image_count = _fill_load_queue(input_file, load_queue, shuffle)
# Start some load threads
batch_size = _calculate_batch_size(image_count)
num_threads = _calculate_num_threads(batch_size, shuffle)
write_queue = Queue.Queue(2*batch_size)
summary_queue = Queue.Queue()
for i in xrange(num_threads):
p = threading.Thread(target=_load_thread,
args=(load_queue, write_queue, summary_queue,
image_width, image_height, image_channels,
resize_mode, image_folder, compute_mean)
)
p.daemon = True
p.start()
start = time.time()
if backend == 'lmdb':
_create_lmdb(image_count, write_queue, batch_size, output_file,
summary_queue, num_threads,
mean_files, **kwargs)
elif backend == 'hdf5':
_create_hdf5(image_count, write_queue, batch_size, output_file,
image_width, image_height, image_channels,
summary_queue, num_threads,
mean_files, **kwargs)
else:
raise ValueError('invalid backend')
logger.info('Database created after %d seconds.' % (time.time() - start))
def _create_lmdb(image_count, write_queue, batch_size, output_file,
summary_queue, num_threads,
mean_files = None,
encoding = None,
lmdb_map_size = None,
**kwargs):
"""
Create an LMDB
Keyword arguments:
encoding -- image encoding format
lmdb_map_size -- the initial LMDB map size
"""
wait_time = time.time()
threads_done = 0
images_loaded = 0
images_written = 0
image_sum = None
batch = []
compute_mean = bool(mean_files)
db = lmdb.open(output_file,
map_size=lmdb_map_size,
map_async=True,
max_dbs=0)
while (threads_done < num_threads) or not write_queue.empty():
# Send update every 2 seconds
if time.time() - wait_time > 2:
logger.debug('Processed %d/%d' % (images_written, image_count))
wait_time = time.time()
processed_something = False
if not summary_queue.empty():
result_count, result_sum = summary_queue.get()
images_loaded += result_count
# Update total_image_sum
if compute_mean and result_count > 0 and result_sum is not None:
if image_sum is None:
image_sum = result_sum
else:
image_sum += result_sum
threads_done += 1
processed_something = True
if not write_queue.empty():
image, label = write_queue.get()
datum = _array_to_datum(image, label, encoding)
batch.append(datum)
if len(batch) == batch_size:
_write_batch_lmdb(db, batch, images_written)
images_written += len(batch)
batch = []
processed_something = True
if not processed_something:
time.sleep(0.2)
if len(batch) > 0:
_write_batch_lmdb(db, batch, images_written)
images_written += len(batch)
if images_loaded == 0:
raise LoadError('no images loaded from input file')
logger.debug('%s images loaded' % images_loaded)
if images_written == 0:
raise WriteError('no images written to database')
logger.info('%s images written to database' % images_written)
if compute_mean:
_save_means(image_sum, images_written, mean_files)
db.close()
def _create_hdf5(image_count, write_queue, batch_size, output_file,
image_width, image_height, image_channels,
summary_queue, num_threads,
mean_files = None,
compression = None,
**kwargs):
"""
Create an HDF5 file
Keyword arguments:
compression -- dataset compression format
"""
wait_time = time.time()
threads_done = 0
images_loaded = 0
images_written = 0
image_sum = None
batch = []
compute_mean = bool(mean_files)
db = h5py.File(output_file, 'w')
data_dset = db.create_dataset('data', (0,image_channels,image_height,image_width), maxshape=(None,image_channels,image_height,image_width),
chunks=True, compression=compression, dtype='float32')
label_dset = db.create_dataset('label', (0,), maxshape=(None,),
chunks=True, compression=compression, dtype='float32')
while (threads_done < num_threads) or not write_queue.empty():
# Send update every 2 seconds
if time.time() - wait_time > 2:
logger.debug('Processed %d/%d' % (images_written, image_count))
wait_time = time.time()
processed_something = False
if not summary_queue.empty():
result_count, result_sum = summary_queue.get()
images_loaded += result_count
# Update total_image_sum
if compute_mean and result_count > 0 and result_sum is not None:
if image_sum is None:
image_sum = result_sum
else:
image_sum += result_sum
threads_done += 1
processed_something = True
if not write_queue.empty():
batch.append(write_queue.get())
if len(batch) == batch_size:
_write_batch_hdf5(batch, data_dset, label_dset)
images_written += len(batch)
batch = []
processed_something = True
if not processed_something:
time.sleep(0.2)
if len(batch) > 0:
_write_batch_hdf5(batch, data_dset, label_dset)
images_written += len(batch)
if images_loaded == 0:
raise LoadError('no images loaded from input file')
logger.debug('%s images loaded' % images_loaded)
if images_written == 0:
raise WriteError('no images written to database')
logger.info('%s images written to database' % images_written)
if compute_mean:
_save_means(image_sum, images_written, mean_files)
db.close()
def _fill_load_queue(filename, queue, shuffle):
"""
Fill the queue with data from the input file
Print the category distribution
Returns the number of lines added to the queue
NOTE: This can be slow on a large input file, but we need the total image
count in order to report the progress, so we might as well read it all
"""
total_lines = 0
valid_lines = 0
distribution = Counter()
with open(filename) as infile:
if shuffle:
lines = infile.readlines() # less memory efficient
random.shuffle(lines)
for line in lines:
total_lines += 1
try:
result = _parse_line(line, distribution)
valid_lines += 1
queue.put(result)
except ParseLineError:
pass
else:
for line in infile: # more memory efficient
total_lines += 1
try:
result = _parse_line(line, distribution)
valid_lines += 1
queue.put(result)
except ParseLineError:
pass
logger.debug('%s total lines in file' % total_lines)
if valid_lines == 0:
raise BadInputFileError('No valid lines in input file')
logger.info('%s valid lines in file' % valid_lines)
for key in sorted(distribution):
logger.debug('Category %s has %d images.' % (key, distribution[key]))
return valid_lines
def _parse_line(line, distribution):
"""
Parse a line in the input file into (path, label)
"""
line = line.strip()
if not line:
raise ParseLineError
# Expect format - [/]path/to/file.jpg 123
match = re.match(r'(.+)\s+(\d+)\s*$', line)
if match is None:
raise ParseLineError
path = match.group(1)
label = int(match.group(2))
distribution[label] += 1
return path, label
def _calculate_batch_size(image_count):
"""
Calculates an appropriate batch size for creating this database
"""
return min(100, image_count)
def _calculate_num_threads(batch_size, shuffle):
"""
Calculates an appropriate number of threads for creating this database
"""
if shuffle:
return min(10, int(round(math.sqrt(batch_size))))
else:
#XXX This is the only way to preserve order for now
# This obviously hurts performance considerably
return 1
def _load_thread(load_queue, write_queue, summary_queue,
image_width, image_height, image_channels,
resize_mode, image_folder, compute_mean):
"""
Consumes items in load_queue
Produces items to write_queue
Stores cumulative results in summary_queue
"""
images_added = 0
if compute_mean:
image_sum = _initial_image_sum(image_width, image_height, image_channels)
else:
image_sum = None
while not load_queue.empty():
try:
path, label = load_queue.get(True, 0.05)
except Queue.Empty:
continue
# prepend path with image_folder, if appropriate
if not utils.is_url(path) and image_folder and not os.path.isabs(path):
path = os.path.join(image_folder, path)
try:
image = utils.image.load_image(path)
except utils.errors.LoadImageError:
logger.warning('[%s] %s: %s' % (path, type(e).__name__, e) )
continue
image = utils.image.resize_image(image,
image_height, image_width,
channels = image_channels,
resize_mode = resize_mode,
)
if compute_mean:
image_sum += image
write_queue.put((image, label))
images_added += 1
summary_queue.put((images_added, image_sum))
def _initial_image_sum(width, height, channels):
"""
Returns an array of zeros that will be used to store the accumulated sum of images
"""
if channels == 1:
return np.zeros((height, width), np.float64)
else:
return np.zeros((height, width, channels), np.float64)
def _array_to_datum(image, label, encoding):
"""
Create a caffe Datum from a numpy.ndarray
"""
if not encoding:
# Transform to caffe's format requirements
if image.ndim == 3:
# Transpose to (channels, height, width)
image = image.transpose((2,0,1))
if image.shape[0] == 3:
# channel swap
# XXX see issue #59
image = image[[2,1,0],...]
elif image.ndim == 2:
# Add a channels axis
image = image[np.newaxis,:,:]
else:
raise Exception('Image has unrecognized shape: "%s"' % image.shape)
datum = caffe.io.array_to_datum(image, label)
else:
datum = caffe_pb2.Datum()
if image.ndim == 3:
datum.channels = image.shape[2]
else:
datum.channels = 1
datum.height = image.shape[0]
datum.width = image.shape[1]
datum.label = label
s = StringIO()
if encoding == 'png':
PIL.Image.fromarray(image).save(s, format='PNG')
elif encoding == 'jpg':
PIL.Image.fromarray(image).save(s, format='JPEG', quality=90)
else:
raise ValueError('Invalid encoding type')
datum.data = s.getvalue()
datum.encoded = True
return datum
def _write_batch_lmdb(db, batch, image_count):
"""
Write a batch to an LMDB database
"""
try:
with db.begin(write=True) as lmdb_txn:
for i, datum in enumerate(batch):
key = '%08d_%d' % (image_count + i, datum.label)
lmdb_txn.put(key, datum.SerializeToString())
except lmdb.MapFullError:
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit*2
logger.debug('Doubling LMDB map size to %sMB ...' % (new_limit>>20,))
try:
db.set_mapsize(new_limit) # double it
except AttributeError as e:
version = tuple(int(x) for x in lmdb.__version__.split('.'))
if version < (0,87):
raise Error('py-lmdb is out of date (%s vs 0.87)' % lmdb.__version__)
else:
raise e
# try again
_write_batch_lmdb(db, batch, image_count)
def _write_batch_hdf5(batch, data_dset, label_dset):
"""
Write a batch to an HDF5 database
"""
if batch[0][0].ndim == 2:
data_batch = np.array([i[0][...,np.newaxis] for i in batch])
else:
data_batch = np.array([i[0] for i in batch])
# Transpose to (channels, height, width)
data_batch = data_batch.transpose((0,3,1,2))
label_batch = np.array([i[1] for i in batch])
# resize dataset
if data_dset.len() == 0:
data_dset.resize(data_batch.shape)
label_dset.resize(label_batch.shape)
else:
data_dset.resize(data_dset.len()+len(batch),axis=0)
label_dset.resize(label_dset.len()+len(batch),axis=0)
data_dset[-len(batch):] = data_batch
label_dset[-len(batch):] = label_batch
def _save_means(image_sum, image_count, mean_files):
"""
Save mean[s] to file
"""
mean = np.around(image_sum / image_count).astype(np.uint8)
for mean_file in mean_files:
if mean_file.lower().endswith('.npy'):
np.save(mean_file, mean)
elif mean_file.lower().endswith('.binaryproto'):
data = mean
# Transform to caffe's format requirements
if data.ndim == 3:
# Transpose to (channels, height, width)
data = data.transpose((2,0,1))
if data.shape[0] == 3:
# channel swap
# XXX see issue #59
data = data[[2,1,0],...]
elif mean.ndim == 2:
# Add a channels axis
data = data[np.newaxis,:,:]
blob = caffe_pb2.BlobProto()
blob.num = 1
blob.channels, blob.height, blob.width = data.shape
blob.data.extend(data.astype(float).flat)
with open(mean_file, 'wb') as outfile:
outfile.write(blob.SerializeToString())
elif mean_file.lower().endswith(('.jpg', '.jpeg', '.png')):
image = PIL.Image.fromarray(mean)
image.save(mean_file)
else:
logger.warning('Unrecognized file extension for mean file: "%s"' % mean_file)
continue
logger.info('Mean saved at "%s"' % mean_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create-Db tool - DIGITS')
### Positional arguments
parser.add_argument('input_file',
help='An input file of labeled images')
parser.add_argument('output_file',
help='Path to the output database')
parser.add_argument('width',
type=int,
help='width of resized images'
)
parser.add_argument('height',
type=int,
help='height of resized images'
)
### Optional arguments
parser.add_argument('-c', '--channels',
type=int,
default=3,
help='channels of resized images (1 for grayscale, 3 for color [default])'
)
parser.add_argument('-r', '--resize_mode',
help='resize mode for images (must be "crop", "squash" [default], "fill" or "half_crop")'
)
parser.add_argument('-m', '--mean_file', action='append',
help="location to output the image mean (doesn't save mean if not specified)")
parser.add_argument('-f', '--image_folder',
help='folder containing the images (if the paths in input_file are not absolute)')
parser.add_argument('-s', '--shuffle',
action='store_true',
help='Shuffle images before saving'
)
parser.add_argument('-e', '--encoding',
help = 'Image encoding format (jpg/png)'
)
parser.add_argument('-C', '--compression',
help = 'Database compression format (gzip)'
)
parser.add_argument('-b', '--backend',
default='lmdb',
help = 'The database backend - lmdb[default] or hdf5')
parser.add_argument('--lmdb_map_size',
type=int,
help = 'The initial map size for LMDB (in MB)')
args = vars(parser.parse_args())
if args['lmdb_map_size']:
# convert from MB to B
args['lmdb_map_size'] <<= 20
try:
create_db(args['input_file'], args['output_file'],
args['width'], args['height'], args['channels'],
args['backend'],
resize_mode = args['resize_mode'],
image_folder = args['image_folder'],
shuffle = args['shuffle'],
mean_files = args['mean_file'],
encoding = args['encoding'],
compression = args['compression'],
lmdb_map_size = args['lmdb_map_size']
)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e.message))
raise
| bsd-3-clause | -4,733,648,372,469,106,000 | 31.492212 | 143 | 0.573058 | false |
bgaultier/laboitepro | boites/migrations/0009_pushbutton.py | 1 | 1086 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-09-06 07:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('boites', '0008_auto_20170801_1406'),
]
operations = [
migrations.CreateModel(
name='PushButton',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_name', models.SlugField(help_text="Veuillez saisir ici le nom de l'\xe9v\xe9nement IFTTT", verbose_name='IFTTT event name')),
('api_key', models.SlugField(help_text='Veuillez saisir ici votre cl\xe9 IFTTT', verbose_name="IFTTT cl\xe9 d'API")),
('last_triggered', models.DateTimeField(null=True, verbose_name='Derni\xe8re activit\xe9')),
('boite', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='boites.Boite', verbose_name='Bo\xeete')),
],
),
]
| agpl-3.0 | 1,413,793,480,327,889,000 | 40.769231 | 149 | 0.627993 | false |
jalabort/ijcv-2014-aam | aam/image/test/image_features_test.py | 1 | 12108 | import numpy as np
from numpy.testing import assert_allclose
import random
import math
from menpo.image import MaskedImage
import menpo.io as mio
def test_imagewindowiterator_hog_padding():
n_cases = 5
image_width = np.random.randint(50, 250, [n_cases, 1])
image_height = np.random.randint(50, 250, [n_cases, 1])
window_step_horizontal = np.random.randint(1, 10, [n_cases, 1])
window_step_vertical = np.random.randint(1, 10, [n_cases, 1])
for i in range(n_cases):
image = MaskedImage(np.random.randn(image_height[i, 0],
image_width[i, 0], 1))
hog = image.features.hog(
mode='dense', window_step_vertical=window_step_vertical[i, 0],
window_step_horizontal=window_step_horizontal[i, 0],
window_step_unit='pixels', padding=True)
n_windows_horizontal = len(range(0, image_width[i, 0],
window_step_horizontal[i, 0]))
n_windows_vertical = len(range(0, image_height[i, 0],
window_step_vertical[i, 0]))
assert_allclose(hog.shape, (n_windows_vertical, n_windows_horizontal))
def test_imagewindowiterator_hog_no_padding():
n_cases = 5
image_width = np.random.randint(50, 250, [n_cases, 1])
image_height = np.random.randint(50, 250, [n_cases, 1])
window_step_horizontal = np.random.randint(1, 10, [n_cases, 1])
window_step_vertical = np.random.randint(1, 10, [n_cases, 1])
window_width = np.random.randint(3, 20, [n_cases, 1])
window_height = np.random.randint(3, 20, [n_cases, 1])
for i in range(n_cases):
image = MaskedImage(np.random.randn(image_height[i, 0],
image_width[i, 0], 1))
hog = image.features.hog(
mode='dense', cell_size=3, block_size=1,
window_height=window_height[i, 0], window_width=window_width[i, 0],
window_unit='pixels',
window_step_vertical=window_step_vertical[i, 0],
window_step_horizontal=window_step_horizontal[i, 0],
window_step_unit='pixels', padding=False)
n_windows_horizontal = len(range(window_width[i, 0] - 1,
image_width[i, 0],
window_step_horizontal[i, 0]))
n_windows_vertical = len(range(window_height[i, 0] - 1,
image_height[i, 0],
window_step_vertical[i, 0]))
assert_allclose(hog.shape, (n_windows_vertical, n_windows_horizontal))
def test_imagewindowiterator_lbp_padding():
n_cases = 5
image_width = np.random.randint(50, 250, [n_cases, 1])
image_height = np.random.randint(50, 250, [n_cases, 1])
window_step_horizontal = np.random.randint(1, 10, [n_cases, 1])
window_step_vertical = np.random.randint(1, 10, [n_cases, 1])
for i in range(n_cases):
image = MaskedImage(np.random.randn(image_height[i, 0],
image_width[i, 0], 1))
lbp = image.features.lbp(
window_step_vertical=window_step_vertical[i, 0],
window_step_horizontal=window_step_horizontal[i, 0],
window_step_unit='pixels', padding=True)
n_windows_horizontal = len(range(0, image_width[i, 0],
window_step_horizontal[i, 0]))
n_windows_vertical = len(range(0, image_height[i, 0],
window_step_vertical[i, 0]))
assert_allclose(lbp.shape, (n_windows_vertical, n_windows_horizontal))
def test_imagewindowiterator_lbp_no_padding():
n_cases = 5
image_width = np.random.randint(50, 250, [n_cases, 1])
image_height = np.random.randint(50, 250, [n_cases, 1])
window_step_horizontal = np.random.randint(1, 10, [n_cases, 1])
window_step_vertical = np.random.randint(1, 10, [n_cases, 1])
radius = np.random.randint(3, 5, [n_cases, 1])
for i in range(n_cases):
image = MaskedImage(np.random.randn(image_height[i, 0],
image_width[i, 0], 1))
lbp = image.features.lbp(
radius=radius[i, 0], samples=8,
window_step_vertical=window_step_vertical[i, 0],
window_step_horizontal=window_step_horizontal[i, 0],
window_step_unit='pixels', padding=False)
window_size = 2 * radius[i, 0] + 1
n_windows_horizontal = len(range(window_size - 1, image_width[i, 0],
window_step_horizontal[i, 0]))
n_windows_vertical = len(range(window_size - 1, image_height[i, 0],
window_step_vertical[i, 0]))
assert_allclose(lbp.shape, (n_windows_vertical, n_windows_horizontal))
def test_hog_channels_dalaltriggs():
n_cases = 3
cell_size = np.random.randint(1, 10, [n_cases, 1])
block_size = np.random.randint(1, 3, [n_cases, 1])
num_bins = np.random.randint(7, 9, [n_cases, 1])
channels = np.random.randint(1, 4, [n_cases, 1])
for i in range(n_cases):
image = MaskedImage(np.random.randn(40, 40, channels[i, 0]))
block_size_pixels = cell_size[i, 0] * block_size[i, 0]
window_width = np.random.randint(block_size_pixels, 40, 1)
window_height = np.random.randint(block_size_pixels, 40, 1)
hog = image.features.hog(mode='dense', algorithm='dalaltriggs',
cell_size=cell_size[i, 0],
block_size=block_size[i, 0],
num_bins=num_bins[i, 0],
window_height=window_height[0],
window_width=window_width[0],
window_unit='pixels', window_step_vertical=3,
window_step_horizontal=3,
window_step_unit='pixels', padding=True)
length_per_block = block_size[i, 0] * block_size[i, 0] * num_bins[i, 0]
n_blocks_horizontal = len(range(block_size_pixels - 1, window_width[0],
cell_size[i, 0]))
n_blocks_vertical = len(range(block_size_pixels - 1, window_height[0],
cell_size[i, 0]))
n_channels = n_blocks_horizontal * n_blocks_vertical * length_per_block
assert_allclose(hog.n_channels, n_channels)
def test_hog_channels_zhuramanan():
n_cases = 3
cell_size = np.random.randint(2, 10, [n_cases, 1])
channels = np.random.randint(1, 4, [n_cases, 1])
for i in range(n_cases):
image = MaskedImage(np.random.randn(40, 40, channels[i, 0]))
window_width = np.random.randint(3 * cell_size[i, 0], 40, 1)
window_height = np.random.randint(3 * cell_size[i, 0], 40, 1)
hog = image.features.hog(mode='dense', algorithm='zhuramanan',
cell_size=cell_size[i, 0],
window_height=window_height[0],
window_width=window_width[0],
window_unit='pixels', window_step_vertical=3,
window_step_horizontal=3,
window_step_unit='pixels', padding=True)
length_per_block = 31
n_blocks_horizontal = round(np.float(window_width[0])
/ np.float(cell_size[i, 0])) - 2
n_blocks_vertical = round(np.float(window_height[0])
/ np.float(cell_size[i, 0])) - 2
n_channels = n_blocks_horizontal * n_blocks_vertical * length_per_block
assert_allclose(hog.n_channels, n_channels)
def test_lbp_channels():
n_cases = 3
n_combs = np.random.randint(1, 6, [n_cases, 1])
channels = np.random.randint(1, 4, [n_cases, 1])
for i in range(n_cases):
radius = random.sample(xrange(1, 10), n_combs[i, 0])
samples = random.sample(xrange(4, 12), n_combs[i, 0])
image = MaskedImage(np.random.randn(40, 40, channels[i, 0]))
lbp = image.features.lbp(radius=radius, samples=samples,
window_step_vertical=3,
window_step_horizontal=3,
window_step_unit='pixels', padding=True)
assert_allclose(lbp.n_channels, n_combs[i, 0] * channels[i, 0])
def test_igo_channels():
n_cases = 3
channels = np.random.randint(1, 10, [n_cases, 1])
for i in range(n_cases):
image = MaskedImage(np.random.randn(40, 40, channels[i, 0]))
igo = image.features.igo()
igo2 = image.features.igo(double_angles=True)
assert_allclose(igo.shape, image.shape)
assert_allclose(igo2.shape, image.shape)
assert_allclose(igo.n_channels, 2 * channels[i, 0])
assert_allclose(igo2.n_channels, 4 * channels[i, 0])
def test_es_channels():
n_cases = 3
channels = np.random.randint(1, 10, [n_cases, 1])
for i in range(n_cases):
image = MaskedImage(np.random.randn(40, 40, channels[i, 0]))
es = image.features.es()
assert_allclose(es.shape, image.shape)
assert_allclose(es.n_channels, 2 * channels[i, 0])
def test_igo_values():
image = MaskedImage([[1, 2], [2, 1]])
igo = image.features.igo()
res = np.array([
[[math.cos(math.radians(45)), math.sin(math.radians(45))],
[math.cos(math.radians(90+45)), math.sin(math.radians(90+45))]],
[[math.cos(math.radians(-45)), math.sin(math.radians(-45))],
[math.cos(math.radians(180+45)), math.sin(math.radians(180+45))]]])
assert_allclose(igo.pixels, res)
image = MaskedImage([[0, 0], [0, 0]])
igo = image.features.igo()
res = np.array([[[1., 0.], [1., 0.]], [[1., 0.], [1., 0.]]])
assert_allclose(igo.pixels, res)
def test_es_values():
image = MaskedImage([[1, 2], [2, 1]])
es = image.features.es()
k = 1 / (2 * (2**0.5))
res = np.array([[[k, k], [-k, k]], [[k, -k], [-k, -k]]])
assert_allclose(es.pixels, res)
image = MaskedImage([[0, 0], [0, 0]])
es = image.features.es()
res = np.array([[[np.nan, np.nan], [np.nan, np.nan]],
[[np.nan, np.nan], [np.nan, np.nan]]])
assert_allclose(es.pixels, res)
def test_lbp_values():
image = MaskedImage([[0., 6., 0.], [5., 18., 13.], [0., 20., 0.]])
lbp = image.features.lbp(radius=1, samples=4, mapping_type='none',
padding=False)
assert_allclose(lbp.pixels, 8.)
image = MaskedImage([[0., 6., 0.], [5., 25., 13.], [0., 20., 0.]])
lbp = image.features.lbp(radius=1, samples=4, mapping_type='riu2',
padding=False)
assert_allclose(lbp.pixels, 0.)
image = MaskedImage([[0., 6., 0.], [5., 13., 13.], [0., 20., 0.]])
lbp = image.features.lbp(radius=1, samples=4, mapping_type='u2',
padding=False)
assert_allclose(lbp.pixels, 8.)
image = MaskedImage([[0., 6., 0.], [5., 6., 13.], [0., 20., 0.]])
lbp = image.features.lbp(radius=1, samples=4, mapping_type='ri',
padding=False)
assert_allclose(lbp.pixels, 4.)
def test_constrain_landmarks():
breaking_bad = mio.import_builtin_asset('breakingbad.jpg')
breaking_bad.crop_to_landmarks_inplace(boundary=20)
breaking_bad.constrain_mask_to_landmarks()
breaking_bad = breaking_bad.resize([50, 50])
hog = breaking_bad.features.hog(mode='sparse', constrain_landmarks=False)
x = np.where(hog.landmarks['PTS'].lms.points[:, 0] > hog.shape[1] - 1)
y = np.where(hog.landmarks['PTS'].lms.points[:, 0] > hog.shape[0] - 1)
assert_allclose(len(x[0]) + len(y[0]), 12)
hog = breaking_bad.features.hog(mode='sparse', constrain_landmarks=True)
x = np.where(hog.landmarks['PTS'].lms.points[:, 0] > hog.shape[1] - 1)
y = np.where(hog.landmarks['PTS'].lms.points[:, 0] > hog.shape[0] - 1)
assert_allclose(len(x[0]) + len(y[0]), 0)
| bsd-2-clause | -4,259,554,534,044,738,600 | 47.432 | 79 | 0.553105 | false |
krother/maze_run | leftovers/part2_before_pylint.py | 1 | 11490 |
# TODO: fix and check all command-line arguments
from util import debug_print
from pygame import image, Rect, Surface
from pygame.locals import KEYDOWN, KEYUP, USEREVENT
import pygame
import sys
import random
import json
import os
from collections import namedtuple
from functools import partial
import argparse
import logging
#logging.basicConfig(filename='random_levels.log', level=logging.INFO)
log = logging.getLogger('moves')
log.addHandler(logging.FileHandler('moves.log', mode='w'))
log.setLevel(logging.INFO)
eventlog = logging.getLogger('events')
eventlog.addHandler(logging.StreamHandler(sys.stderr))
#fmt='%(asctime)s %(message)s'
#eventlog.addFormatter(logging.Formatter(fmt), datefmt='%m/%d/%Y %I:%M:%S %p')
eventlog.setLevel(logging.WARNING)
Position = namedtuple("Position", ["x", "y"])
# ------------ CONSTANTS ----------------
CONFIG_PATH = os.path.split(__file__)[0]
TILE_POSITION_FILE = CONFIG_PATH + 'tiles.json'
TILE_IMAGE_FILE = CONFIG_PATH + '../images/tiles.xpm'
SIZE = 32
SPEED = 4
LEFT = Position(-1, 0)
RIGHT = Position(1, 0)
UP = Position(0, -1)
DOWN = Position(0, 1)
DIRECTIONS = {
276: LEFT, 275: RIGHT,
273: UP, 274: DOWN
}
KEY_REPEAT_TIME = 250
DRAW_REPEAT_TIME = 100
UPDATE_REPEAT_TIME = 20
MOVE_GHOST_TIME = 250
KEY_REPEATED, DRAW, UPDATE, MOVE_GHOST, EXIT = range(USEREVENT + 1, USEREVENT + 6)
# ------------- LOADING TILES -----------
def get_tile_rect(pos):
"""Converts tile indices to a pygame.Rect"""
return Rect(pos.x*SIZE, pos.y*SIZE, SIZE, SIZE)
def load_tiles(json_fn):
"""Loads tile positions from a JSON file name"""
tiles = {}
jd = json.loads(open(json_fn).read())
for tile in jd.values():
abbrev = tile["abbrev"]
pos = Position(tile["x"], tile["y"])
rect = get_tile_rect(pos)
tiles[abbrev] = rect
return tiles
# ------------- GENERATING MAZES ------------
class MazeGenerator:
"""Generates two-dimensional mazes consisting of walls and dots."""
@staticmethod
def create_grid_string(dots, xsize, ysize):
grid = ""
for y in range(ysize):
for x in range(xsize):
grid += "." if Position(x, y) in dots else "#"
grid += "\n"
return grid
@staticmethod
def get_all_dot_positions(xsize, ysize):
return [Position(x, y) for x in range(1, xsize-1) for y in range(1, ysize-1)]
@staticmethod
def get_neighbors(pos):
return [
Position(pos.x , pos.y-1), Position(pos.x , pos.y+1),
Position(pos.x-1, pos.y ), Position(pos.x+1, pos.y ),
Position(pos.x-1, pos.y-1), Position(pos.x+1, pos.y-1),
Position(pos.x-1, pos.y+1), Position(pos.x+1, pos.y+1)
]
@staticmethod
def generate_dot_positions(xsize, ysize):
positions = MazeGenerator.get_all_dot_positions(xsize, ysize)
dots = set()
while positions != []:
pos = random.choice(positions)
neighbors = MazeGenerator.get_neighbors(pos)
free = [nb in dots for nb in neighbors]
if free.count(True) < 5:
dots.add(pos)
positions.remove(pos)
return dots
@staticmethod
def create_maze(size):
"""Returns a size.x * size.y maze as a string"""
dots = MazeGenerator.generate_dot_positions(size.x, size.y)
maze = MazeGenerator.create_grid_string(dots, size.x, size.y)
return maze
# ------------- DRAWING GRIDS --------------
class TileGrid:
def __init__(self, data):
self._grid = self.parse_grid(data)
def __repr__(self):
return "\n".join(["".join(row) for row in self._grid])
def parse_grid(self, data):
"""Parses the string representation into a nested list"""
return [list(row) for row in data.strip().split("\n")]
@property
def rows(self):
return self._grid
@property
def xsize(self):
return len(self.rows[0])
@property
def ysize(self):
return len(self.rows)
def __getitem__(self, pos):
return self._grid[pos.y][pos.x]
def __setitem__(self, pos, value):
self._grid[pos.y][pos.x] = value
def __iter__(self):
"""Iterate over all grid tiles"""
for y, row in enumerate(self.rows):
for x, char in enumerate(row):
pos = Position(x, y)
yield pos, char
def find_tile(self, query='*'):
"""Returns a Position tuple for the given char on the level"""
for pos, char in self:
if char == query:
return pos
def draw_grid(self, tile_img, tiles):
"""Returns an image of a tile-based grid"""
#debug_print("drawing level", data)
img = Surface((self.xsize * SIZE, self.ysize * SIZE))
for pos, char in self:
rect = get_tile_rect(pos)
img.blit(tile_img, rect, tiles[char])
return img
# ------------- SPRITES --------------
class Sprite:
def __init__(self, maze, tile, startpos):
self.maze = maze
self.tile = tile
self.pos = startpos
self.anim_direction = None
self.anim_offset = Position(0, 0)
def move(self, direction):
"""Handles moves on a level"""
if not self.is_moving():
old = self.pos
new = Position(old.x + direction.x, old.y + direction.y)
if self.maze[new] in [" ", ".", "x"]:
self.pos = new
self.anim_direction = direction
self.anim_offset = Position(-direction.x * SIZE, -direction.y * SIZE)
def is_moving(self):
return self.anim_direction
def arrives_on_new_tile(self):
pass
def draw(self, img, tile_img, tiles):
"""Returns an image of a tile-based grid"""
rect = get_tile_rect(self.pos)
rect = Rect([rect.x + self.anim_offset.x, rect.y + self.anim_offset.y, rect.w, rect.h])
img.blit(tile_img, rect, tiles[self.tile])
def animate(self):
if self.anim_direction:
ofs_x = self.anim_offset.x + self.anim_direction.x * SPEED
ofs_y = self.anim_offset.y + self.anim_direction.y * SPEED
self.anim_offset = Position(ofs_x, ofs_y)
if ofs_x == 0 and ofs_y == 0:
self.arrives_on_new_tile()
self.anim_direction = None
class Ghost(Sprite):
def random_move(self, event):
direction = random.choice([LEFT, RIGHT, UP, DOWN])
self.move(direction)
class Player(Sprite):
def arrives_on_new_tile(self):
tile = self.maze[self.pos]
if tile == '.':
self.maze[self.pos] = ' ' # eats dot
elif tile == 'x':
exit_game()
def handle_key(self, key):
"""Handles key events in the game"""
direction = DIRECTIONS.get(key)
if direction:
self.move(direction)
# ------------- EVENT LOOP --------------
def event_loop(callbacks, delay=10, repeat=KEY_REPEAT_TIME):
"""Processes events and updates callbacks."""
repeat_key = None
running = True
while running:
pygame.event.pump()
event = pygame.event.poll()
action = callbacks.get(event.type)
if action:
action(event)
elif event.type == EXIT:
running = False
eventlog.critical('exit event received: ' + str(event))
else:
eventlog.info('unhandled event: ' + str(event))
pygame.time.delay(delay)
# ------------- GAME MECHANICS --------------
def exit_game():
eve = pygame.event.Event(EXIT)
pygame.event.post(eve)
# ------------- MAIN GAME --------------
class MazeRun:
def create_display(self):
pygame.init()
pygame.display.set_mode((800, 600))
self.display = pygame.display.get_surface()
def create_tiles(self):
self.tile_img = image.load(TILE_IMAGE_FILE)
self.tiles = load_tiles(TILE_POSITION_FILE)
def load_level(self, fn):
data = open(fn).read()
self.maze = TileGrid(data)
def create_random_maze(self, size):
maze_data = MazeGenerator.create_maze(size)
self.maze = TileGrid(maze_data)
self.maze[Position(size.x-2, size.y-2)] = 'x'
log.info("random level created\n" + str(self.maze))
def create_sprites(self, size):
self.player = Player(self.maze, '*', Position(1, 1))
self.ghost = Ghost(self.maze, 'g', Position(size.x-2, 1))
def draw(self, event):
img = self.maze.draw_grid(self.tile_img, self.tiles)
self.player.draw(img, self.tile_img, self.tiles)
self.ghost.draw(img, self.tile_img, self.tiles)
rect = Rect((0, 0, self.maze.xsize*SIZE, self.maze.ysize*SIZE))
self.display.blit(img, rect, rect)
pygame.display.update()
def handle_key(self, event):
"""Handles key events in the game"""
direction = DIRECTIONS.get(event.key)
if direction:
self.player.move(direction)
self.check_collision()
def check_collision(self):
if self.player.pos == self.ghost.pos:
exit_game()
def update(self, event):
"""Manages recurring checks in the game"""
self.check_collision()
self.player.animate()
self.ghost.animate()
def start_game(self):
callbacks = {
KEYDOWN: self.handle_key,
DRAW: self.draw,
UPDATE: self.update,
MOVE_GHOST: self.ghost.random_move
}
pygame.time.set_timer(DRAW, DRAW_REPEAT_TIME)
pygame.time.set_timer(UPDATE, UPDATE_REPEAT_TIME)
pygame.time.set_timer(MOVE_GHOST, MOVE_GHOST_TIME)
event_loop(callbacks)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Start the MazeRun game.')
parser.add_argument('--x', type=int, default=12,
help='x size of random maze')
parser.add_argument('--y', type=int, default=7,
help='y size of random maze')
parser.add_argument('--ghost',
#dest="MOVE_GHOST_TIME", action="store_const",
type=int, default=500,
help='ghost speed (moves every .. milliseconds)')
parser.add_argument('--load', type=str, default=None,
help='load maze from text file')
parser.add_argument('--replay', type=str, default=None,
help='log file to replay from')
parser.add_argument('-v', '--verbose', action="store_true",
help='print debugging information')
#parser.add_argument('words', type=str, nargs='+',
# help='the word for which characters are counted')
#parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1, 2],
# positional arguments: without dashes
# optional: with --
# g = parser.add_mutually_exclusive_group()
# g.add_argument(...)
# g.add_argument(...)
# -d delay=50 game speed
# -g ghost speed
# -x, -y size of the grid
# -r replay from logfile
# -l load level from file
# optional arguments
# --verbose
# --help info
args = parser.parse_args()
size = Position(args.x, args.y)
mr = MazeRun()
mr.create_display()
mr.create_tiles()
mr.create_random_maze(size)
mr.create_sprites(size)
mr.start_game()
#mr.load_level(LEVEL_FILE)
| mit | -7,573,846,476,509,047,000 | 29.15748 | 95 | 0.573716 | false |
neozerosv/ciberues | agente-servidor.py | 1 | 2142 | #!/usr/bin/env python
# ------------------------------
# importacion
# ------------------------------
import socket, sys, time, ConfigParser
def poner_mensaje( tipo , mensaje ):
# -----------------------------
# Colocar mensajes con formato
# y marca de tiempo
# -----------------------------
print time.strftime('%Y-%m-%d-%X') + " " + tipo + ": " + mensaje
def activar_configuracion():
# ------------------------------
# Variables del servidor desde
# un archivo de configuracion
# ------------------------------
configuracion = "./configuracion/agente-servidor.cfg"
global direccion
global puerto
global clave
try:
cfg = ConfigParser.ConfigParser()
cfg.read([configuracion])
puerto = int(cfg.get('servidor','puerto'))
clave = cfg.get('servidor','clave')
clientes = cfg.get('servidor','clientes')
except:
poner_mensaje( 'ERROR' , "No se pudo leer el archivo de configuracion " + configuracion )
poner_mensaje( 'AVISO' , "Se tomaran los valores por omision: 6470 root" )
puerto = 6470
clave = 'root'
if __name__ == "__main__":
activar_configuracion()
# ------------------------------
# parametros a utilizar
# ------------------------------
if( len(sys.argv) == 3 ):
continuar = True
direccion = sys.argv[1]
comando = sys.argv[2]
agente = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
try:
agente.connect( ( direccion, puerto ) )
except:
poner_mensaje ( 'ERROR' , "No se pudo establecer la conexion en la direccion: "+ direccion +" con el puerto: " + str(puerto) )
continuar = False
if ( continuar == True ):
data, server = agente.recvfrom( 100 )
poner_mensaje ( 'MENSAJE' , data )
agente.send( clave )
data, server = agente.recvfrom( 100 )
poner_mensaje ( 'MENSAJE' , data )
agente.send( comando )
data, server = agente.recvfrom( 100 )
poner_mensaje ( 'MENSAJE' , data )
agente.close()
else:
print "--------------------------------------------------------------"
print " Tiene que mandar cuatro parametros"
print " agente-servidor.py <direccion> <comando>"
print "--------------------------------------------------------------"
| gpl-2.0 | 6,846,871,219,183,194,000 | 31.953846 | 129 | 0.546218 | false |
Subsets and Splits