repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
joshuahoman/vivisect | vivisect/base.py | 2 | 23578 | import Queue
import traceback
import threading
import collections
import envi
import envi.memory as e_mem
import envi.pagelookup as e_page
import envi.codeflow as e_codeflow
import vstruct.cparse as vs_cparse
import vstruct.builder as vs_builder
import vstruct.constants as vs_const
import vivisect.const as viv_const
import vivisect.impapi as viv_impapi
import vivisect.analysis as viv_analysis
import vivisect.codegraph as viv_codegraph
from envi.threads import firethread
from vivisect.exc import *
from vivisect.const import *
"""
Mostly this is a place to scuttle away some of the inner workings
of a workspace, so the outer facing API is a little cleaner.
"""
class VivEventCore(object):
'''
A class to facilitate event monitoring in the viv workspace.
'''
def __init__(self, vw):
self._ve_vw = vw
self._ve_ehand = [None for x in xrange(VWE_MAX)]
self._ve_thand = [None for x in xrange(VTE_MAX)]
self._ve_lock = threading.Lock()
# Find and put handler functions into the list
for name in dir(self):
if name.startswith('VWE_'):
idx = getattr(viv_const, name, None)
self._ve_ehand[idx] = getattr(self, name)
if name.startswith('VTE_'):
idx = getattr(viv_const, name, None)
self._ve_thand[idx] = getattr(self, name)
def _ve_fireEvent(self, event, edata):
hlist = self._ve_ehand
if event & VTE_MASK:
event ^= VTE_MASK
hlist = self._ve_thand
h = hlist[event]
if h != None:
try:
h(self._ve_vw, event, edata)
except Exception, e:
traceback.print_exc()
@firethread
def _ve_fireListener(self):
chanid = self._ve_vw.createEventChannel()
try:
etup = self._ve_vw.waitForEvent(chanid)
while etup != None:
self._ve_lock.acquire()
self._ve_lock.release()
self._ve_fireEvent(*etup)
etup = self._ve_vw.waitForEvent(chanid)
finally:
self._ve_vw.deleteEventChannel(chanid)
def _ve_freezeEvents(self):
self._ve_lock.acquire()
def _ve_thawEvents(self):
self._ve_lock.release()
vaset_xlate = {
int:VASET_ADDRESS,
str:VASET_STRING,
}
class VivEventDist(VivEventCore):
'''
Similar to an event core, but does optimized distribution
to a set of sub eventcore objects (think GUI windows...)
'''
def __init__(self, vw):
VivEventCore.__init__(self, vw)
self._ve_subs = [ [] for x in xrange(VWE_MAX) ]
self._ve_tsubs = [ [] for x in xrange(VTE_MAX) ]
self.addEventCore(self)
# event distributors pretty much always need a thread
self._ve_fireListener()
def addEventCore(self, core):
for i in xrange(VWE_MAX):
h = core._ve_ehand[i]
if h != None:
self._ve_subs[i].append(h)
for i in xrange(VTE_MAX):
h = core._ve_thand[i]
if h != None:
self._ve_tsubs[i].append(h)
def delEventCore(self, core):
for i in xrange(VWE_MAX):
h = core._ve_ehand[i]
if h != None:
self._ve_subs[i].remove(h)
for i in xrange(VTE_MAX):
h = core._ve_thand[i]
if h != None:
self._ve_tsubs[i].remove(h)
def _ve_fireEvent(self, event, edata):
'''
We don't have events of our own, we just hand them down.
'''
subs = self._ve_subs
if event & VTE_MASK:
event ^= VTE_MASK
subs = self._ve_tsubs
hlist = subs[event]
for h in hlist:
try:
h(self._ve_vw, event, edata)
except Exception, e:
traceback.print_exc()
VivEventCore._ve_fireEvent(self, event, edata)
def ddict():
return collections.defaultdict(dict)
class VivWorkspaceCore(object,viv_impapi.ImportApi):
def __init__(self):
viv_impapi.ImportApi.__init__(self)
self.loclist = []
self.bigend = False
self.locmap = e_page.MapLookup()
self.blockmap = e_page.MapLookup()
self._mods_loaded = False
# Storage for function local symbols
self.localsyms = ddict()
self._call_graph = viv_codegraph.CallGraph()
# Just in case of the GUI... :)
self._call_graph.setMeta('bgcolor', '#000')
self._call_graph.setMeta('nodecolor', '#00ff00')
self._call_graph.setMeta('edgecolor', '#00802b')
self._event_list = []
self._event_saved = 0 # The index of the last "save" event...
# Give ourself a structure namespace!
self.vsbuilder = vs_builder.VStructBuilder()
self.vsconsts = vs_const.VSConstResolver()
def _snapInAnalysisModules(self):
'''
Snap in the analysis modules which are appropriate for the
format/architecture/platform of this workspace by calling
'''
if self._mods_loaded:
return
viv_analysis.addAnalysisModules(self)
self._mods_loaded = True
def _createSaveMark(self):
'''
Update the index of the most recent saved event to the current
length of the event list (called after successful save)..
'''
self._event_saved = len(self._event_list)
def _handleADDLOCATION(self, loc):
lva, lsize, ltype, linfo = loc
self.locmap.setMapLookup(lva, lsize, loc)
self.loclist.append(loc)
# A few special handling cases...
if ltype == LOC_IMPORT:
# Check if the import is registered in NoReturnApis
if self.getMeta('NoReturnApis', {}).get(linfo.lower()):
self.cfctx.addNoReturnAddr( lva )
def _handleDELLOCATION(self, loc):
# FIXME delete xrefs
lva, lsize, ltype, linfo = loc
self.locmap.setMapLookup(lva, lsize, None)
self.loclist.remove(loc)
def _handleADDSEGMENT(self, einfo):
self.segments.append(einfo)
def _handleADDRELOC(self, einfo):
self.reloc_by_va[einfo[0]] = einfo[1]
self.relocations.append(einfo)
def _handleADDMODULE(self, einfo):
print('DEPRICATED (ADDMODULE) ignored: %s' % einfo)
def _handleDELMODULE(self, einfo):
print('DEPRICATED (DELMODULE) ignored: %s' % einfo)
def _handleADDFMODULE(self, einfo):
print('DEPRICATED (ADDFMODULE) ignored: %s' % einfo)
def _handleDELFMODULE(self, einfo):
print('DEPRICATED (DELFMODULE) ignored: %s' % einfo)
def _handleADDFUNCTION(self, einfo):
va, meta = einfo
self._initFunction(va)
#node = self._call_graph.addNode( nid=va, repr=self.getName( va ) ) #, color='#00ff00' )
#node = self._call_graph.getFunctionNode(va, repr=self.getName( va ) )
node = self._call_graph.getFunctionNode(va)
self._call_graph.setNodeProp(node,'repr',self.getName(va))
# Tell the codeflow subsystem about this one!
calls_from = meta.get('CallsFrom')
self.cfctx.addFunctionDef(va, calls_from)
self.funcmeta[va] = meta
for name,value in meta.items():
mcbname = "_fmcb_%s" % name.split(':')[0]
mcb = getattr(self, mcbname, None)
if mcb != None:
mcb(va, name, value)
def _handleDELFUNCTION(self, einfo):
self.funcmeta.pop(einfo)
self.func_args.pop(einfo)
self.codeblocks_by_funcva.pop(einfo)
node = self._call_graph.getNode(einfo)
self._call_graph.delNode(node)
def _handleSETFUNCMETA(self, einfo):
funcva, name, value = einfo
m = self.funcmeta.get(funcva)
if m != None:
m[name] = value
mcbname = "_fmcb_%s" % name.split(':')[0]
mcb = getattr(self, mcbname, None)
if mcb != None:
mcb(funcva, name, value)
def _handleADDCODEBLOCK(self, einfo):
va,size,funcva = einfo
self.blockmap.setMapLookup(va, size, einfo)
self.codeblocks_by_funcva.get(funcva).append(einfo)
self.codeblocks.append(einfo)
def _handleDELCODEBLOCK(self, cb):
va,size,funcva = cb
self.codeblocks.remove(cb)
self.codeblocks_by_funcva.get(cb[CB_FUNCVA]).remove(cb)
self.blockmap.setMapLookup(va, size, None)
def _handleADDXREF(self, einfo):
fromva, tova, reftype, rflags = einfo
xr_to = self.xrefs_by_to.get(tova, None)
xr_from = self.xrefs_by_from.get(fromva, None)
if xr_to == None:
xr_to = []
self.xrefs_by_to[tova] = xr_to
if xr_from == None:
xr_from = []
self.xrefs_by_from[fromva] = xr_from
if einfo not in xr_to: # Just check one for now
xr_to.append(einfo)
xr_from.append(einfo)
self.xrefs.append(einfo)
def _handleDELXREF(self, einfo):
fromva, tova, reftype, refflags = einfo
self.xrefs_by_to[tova].remove(einfo)
self.xrefs_by_from[fromva].remove(einfo)
def _handleSETNAME(self, einfo):
va,name = einfo
if name == None:
oldname = self.name_by_va.pop(va, None)
self.va_by_name.pop(oldname, None)
else:
curname = self.name_by_va.get(va)
if curname != None:
self.va_by_name.pop(curname)
self.va_by_name[name] = va
self.name_by_va[va] = name
if self.isFunction( va ):
fnode = self._call_graph.getFunctionNode(va)
self._call_graph.setNodeProp(fnode,'repr',name)
def _handleADDMMAP(self, einfo):
va, perms, fname, mbytes = einfo
e_mem.MemoryObject.addMemoryMap(self, va, perms, fname, mbytes)
blen = len(mbytes)
self.locmap.initMapLookup(va, blen)
self.blockmap.initMapLookup(va, blen)
# On loading a new memory map, we need to crush a few
# transmeta items...
self.transmeta.pop('findPointers',None)
def _handleADDEXPORT(self, einfo):
va, etype, name, filename = einfo
self.exports.append(einfo)
self.exports_by_va[va] = einfo
fullname = "%s.%s" % (filename,name)
self.makeName(va, fullname)
def _handleSETMETA(self, einfo):
name,value = einfo
# See if there's a callback handler for this meta set.
# For "meta namespaces" use the first part to find the
# callback name....
mcbname = "_mcb_%s" % name.split(':')[0]
mcb = getattr(self, mcbname, None)
if mcb != None:
mcb(name, value)
self.metadata[name] = value
def _handleCOMMENT(self, einfo):
va,comment = einfo
if comment == None:
self.comments.pop(va)
else:
self.comments[va] = comment
def _handleADDFILE(self, einfo):
normname, imagebase, md5sum = einfo
self.filemeta[normname] = {"md5sum":md5sum,"imagebase":imagebase}
def _handleSETFILEMETA(self, einfo):
fname, key, value = einfo
self.filemeta.get(fname)[key] = value
def _handleADDCOLOR(self, coltup):
mapname, colmap = coltup
self.colormaps[mapname] = colmap
def _handleDELCOLOR(self, mapname):
self.colormaps.pop(mapname)
def _handleADDVASET(self, argtup):
name, defs, rows = argtup
# NOTE: legacy translation for vaset column types...
defs = [ (cname,vaset_xlate.get(ctype,ctype)) for (cname,ctype) in defs ]
self.vasetdefs[name] = defs
vals = {}
for row in rows:
vals[row[0]] = row
self.vasets[name] = vals
def _handleDELVASET(self, setname):
self.vasetdefs.pop(setname)
self.vasets.pop(setname)
def _handleADDFREF(self, frtup):
va, idx, val = frtup
self.frefs[(va,idx)] = val
def _handleDELFREF(self, frtup):
va, idx, val = frtup
self.frefs.pop((va,idx), None)
def _handleSETVASETROW(self, argtup):
name, row = argtup
self.vasets[name][row[0]] = row
def _handleDELVASETROW(self, argtup):
name, va = argtup
self.vasets[name].pop(va, None)
def _handleADDFSIG(self, einfo):
print('DEPRICATED (ADDFSIG) ignored: %s' % (einfo,))
def _handleFOLLOWME(self, va):
pass
def _handleCHAT(self, msgtup):
# FIXME make a GUI window for this...
user, msg = msgtup
self.vprint('%s: %s' % (user, msg))
def _handleSYMHINT(self, msgtup):
va, idx, hint = msgtup
if hint == None:
self.symhints.pop((va,idx), None)
else:
self.symhints[(va,idx)] = hint
def _handleSETFUNCARGS(self, einfo):
fva, args = einfo
self.func_args[fva] = args
def _handleAUTOANALFIN(self, einfo):
'''
This event is more for the storage subsystem than anything else. It
marks the end of autoanalysis. Any event beyond this is due to the
end user or analysis modules they've executed.
'''
pass
def _initEventHandlers(self):
self.ehand = [None for x in xrange(VWE_MAX)]
self.ehand[VWE_ADDLOCATION] = self._handleADDLOCATION
self.ehand[VWE_DELLOCATION] = self._handleDELLOCATION
self.ehand[VWE_ADDSEGMENT] = self._handleADDSEGMENT
self.ehand[VWE_DELSEGMENT] = None
self.ehand[VWE_ADDRELOC] = self._handleADDRELOC
self.ehand[VWE_DELRELOC] = None
self.ehand[VWE_ADDMODULE] = self._handleADDMODULE
self.ehand[VWE_DELMODULE] = self._handleDELMODULE
self.ehand[VWE_ADDFMODULE] = self._handleADDFMODULE
self.ehand[VWE_DELFMODULE] = self._handleDELFMODULE
self.ehand[VWE_ADDFUNCTION] = self._handleADDFUNCTION
self.ehand[VWE_DELFUNCTION] = self._handleDELFUNCTION
self.ehand[VWE_SETFUNCARGS] = self._handleSETFUNCARGS
self.ehand[VWE_SETFUNCMETA] = self._handleSETFUNCMETA
self.ehand[VWE_ADDCODEBLOCK] = self._handleADDCODEBLOCK
self.ehand[VWE_DELCODEBLOCK] = self._handleDELCODEBLOCK
self.ehand[VWE_ADDXREF] = self._handleADDXREF
self.ehand[VWE_DELXREF] = self._handleDELXREF
self.ehand[VWE_SETNAME] = self._handleSETNAME
self.ehand[VWE_ADDMMAP] = self._handleADDMMAP
self.ehand[VWE_DELMMAP] = None
self.ehand[VWE_ADDEXPORT] = self._handleADDEXPORT
self.ehand[VWE_DELEXPORT] = None
self.ehand[VWE_SETMETA] = self._handleSETMETA
self.ehand[VWE_COMMENT] = self._handleCOMMENT
self.ehand[VWE_ADDFILE] = self._handleADDFILE
self.ehand[VWE_DELFILE] = None
self.ehand[VWE_SETFILEMETA] = self._handleSETFILEMETA
self.ehand[VWE_ADDCOLOR] = self._handleADDCOLOR
self.ehand[VWE_DELCOLOR] = self._handleDELCOLOR
self.ehand[VWE_ADDVASET] = self._handleADDVASET
self.ehand[VWE_DELVASET] = self._handleDELVASET
self.ehand[VWE_SETVASETROW] = self._handleSETVASETROW
self.ehand[VWE_DELVASETROW] = self._handleDELVASETROW
self.ehand[VWE_ADDFSIG] = self._handleADDFSIG
self.ehand[VWE_ADDFREF] = self._handleADDFREF
self.ehand[VWE_DELFREF] = self._handleDELFREF
self.ehand[VWE_FOLLOWME] = self._handleFOLLOWME
self.ehand[VWE_CHAT] = self._handleCHAT
self.ehand[VWE_SYMHINT] = self._handleSYMHINT
self.ehand[VWE_AUTOANALFIN] = self._handleAUTOANALFIN
self.thand = [None for x in xrange(VTE_MAX)]
self.thand[VTE_IAMLEADER] = self._handleIAMLEADER
self.thand[VTE_FOLLOWME] = self._handleFOLLOWME
def _handleIAMLEADER(self, event, einfo):
user,follow = einfo
self.vprint('*%s invites everyone to follow "%s"' % (user,follow))
def _handleFOLLOWME(self, event, einfo):
# workspace has nothing to do...
pass
def _fireEvent(self, event, einfo, local=False, skip=None):
'''
Fire an event down the hole. "local" specifies that this is
being called on a client (self.server != None) but we got it
from the server in the first place so no need to send it back.
skip is used to tell the server to bypass our channelid when
putting the event into channel queues (we took care of our own).
'''
try:
if event & VTE_MASK:
return self._fireTransEvent(event, einfo)
# Do our main event processing
self.ehand[event](einfo)
# If we're supposed to call a server, do that.
if self.server != None and local == False:
self.server._fireEvent(event, einfo, skip=self.rchan)
# FIXME perhaps we should only process events *via* our server
# if we have one? Just to confirm it works before we apply it...
self._event_list.append((event, einfo))
for id,q in self.chan_lookup.items():
if id == skip:
continue
try:
q.put_nowait((event, einfo))
except Queue.Full, e:
print "FULL QUEUE DO SOMETHING"
except Exception, e:
traceback.print_exc()
def _fireTransEvent(self, event, einfo):
for q in self.chan_lookup.values():
q.put((event, einfo))
return self.thand[event ^ VTE_MASK](event,einfo)
def _initFunction(self, funcva):
# Internal function to initialize all datastructures necessary for
# a function, but only if they haven't been done already.
if self.funcmeta.get(funcva) == None:
self.funcmeta[funcva] = {} # His metadata
self.codeblocks_by_funcva[funcva] = [] # Init code block list
#def _loadImportApi(self, apidict):
#self._imp_api.update( apidict )
#################################################################
#
# setMeta key callbacks
#
def _mcb_Architecture(self, name, value):
# This is for legacy stuff...
self.arch = envi.getArchModule(value)
self.psize = self.arch.getPointerSize()
archid = envi.getArchByName(value)
self.setMemArchitecture(archid)
# Default calling convention for architecture
# This will be superceded by Platform and Parser settings
defcall = self.arch.getArchDefaultCall()
if defcall:
self.setMeta('DefaultCall', defcall)
def _mcb_bigend(self, name, value):
print('OH HAI')
self.bigend = bool(value)
def _mcb_Platform(self, name, value):
# Default calling convention for platform
# This supercedes Architecture's setting and should make
# parser settings obsolete
defcall = self.arch.getPlatDefaultCall(value)
if defcall:
self.setMeta('DefaultCall', defcall)
def _mcb_ustruct(self, name, ssrc):
# All meta values in the "ustruct" namespace are user defined
# structure defintions in C.
sname = name.split(':')[1]
ctor = vs_cparse.ctorFromCSource( ssrc )
self.vsbuilder.addVStructCtor( sname, ctor )
def _mcb_WorkspaceServer(self, name, wshost):
self.vprint('Workspace was Saved to Server: %s' % wshost)
self.vprint('(You must close this local copy and work from the server to stay in sync.)')
def _fmcb_Thunk(self, funcva, th, thunkname):
# If the function being made a thunk is registered
# in NoReturnApis, update codeflow...
if self.getMeta('NoReturnApis').get( thunkname.lower() ):
self.cfctx.addNoReturnAddr( funcva )
def _fmcb_CallsFrom(self, funcva, th, callsfrom):
for va in callsfrom:
f2va = self.getFunction( va )
if f2va != None:
self._call_graph.getCallEdge( funcva, f2va )
def _fmcb_LocalSymbol(self, fva, mname, locsym):
fva,spdelta,symtype,syminfo = locsym
self.localsyms[fva][spdelta] = locsym
def trackDynBranches(cfctx, op, vw, bflags, branches):
'''
track dynamic branches
'''
# FIXME: do we want to filter anything out?
# jmp edx
# jmp dword [ebx + 68]
# call eax
# call dword [ebx + eax * 4 - 228]
# if we have any xrefs from here, we have already been analyzed. nevermind.
if len(vw.getXrefsFrom(op.va)):
return
if vw.verbose: print "Dynamic Branch found at 0x%x %s" % (op.va, op)
vw.setVaSetRow('DynamicBranches', (op.va, repr(op), bflags))
class VivCodeFlowContext(e_codeflow.CodeFlowContext):
def __init__(self, mem, persist=False, exptable=True, recurse=True):
e_codeflow.CodeFlowContext.__init__(self, mem, persist=persist, exptable=exptable, recurse=recurse)
self.addDynamicBranchHandler(trackDynBranches)
def _cb_noflow(self, srcva, dstva):
vw = self._mem
loc = vw.getLocation( srcva )
if loc == None:
return
lva,lsize,ltype,linfo = loc
if ltype != LOC_OP:
return
# Update the location def for NOFALL bit
vw.delLocation(lva)
vw.addLocation(lva, lsize, ltype, linfo | envi.IF_NOFALL)
vw.setVaSetRow('NoReturnCalls', (lva,))
# NOTE: self._mem is the viv workspace...
def _cb_opcode(self, va, op, branches):
loc = self._mem.getLocation(va)
if loc == None:
# dont code flow through import calls
branches = [br for br in branches if not self._mem.isLocType(br[0],LOC_IMPORT)]
self._mem.makeOpcode(op.va, op=op)
return branches
return ()
def _cb_function(self, fva, fmeta):
vw = self._mem
if vw.isFunction(fva):
return
# This may be possible if an export/symbol was mistaken for
# a function...
if not vw.isLocType(fva, LOC_OP):
return
# If the function doesn't have a name, make one
if vw.getName(fva) == None:
vw.makeName(fva, "sub_%.8x" % fva)
vw._fireEvent(VWE_ADDFUNCTION, (fva,fmeta))
# Go through the function analysis modules in order
for fmname in vw.fmodlist:
fmod = vw.fmods.get(fmname)
try:
fmod.analyzeFunction(vw, fva)
except Exception, e:
if vw.verbose:
traceback.print_exc()
vw.verbprint("Function Analysis Exception for 0x%x %s: %s" % (fva, fmod.__name__, e))
vw.setFunctionMeta(fva, "%s fail" % fmod.__name__, traceback.format_exc())
fname = vw.getName( fva )
if vw.getMeta('NoReturnApis').get( fname.lower() ):
self._cf_noret[ fva ] = True
if len( vw.getFunctionBlocks( fva )) == 1:
return
fmeta = vw.getFunctionMetaDict(fva)
for lva in vw.getVaSetRows('NoReturnCalls'):
va = lva[0]
ctup = vw.getCodeBlock(va)
if ctup and fva == ctup[2] and vw.getFunctionMeta(fva, 'BlockCount', default=0) == 1:
self._cf_noret[ fva ] = True
break
def _cb_branchtable(self, tablebase, tableva, destva):
if tablebase != tableva and self._mem.getXrefsTo(tableva):
return False
if self._mem.getLocation(tableva) == None:
self._mem.makePointer(tableva, tova=destva, follow=False)
return True
| apache-2.0 | 4,224,038,362,557,105,700 | 32.827834 | 107 | 0.591526 | false |
Lemma1/MAC-POSTS | src/pybinder/pybind11/tests/test_numpy_array.py | 8 | 14777 | import pytest
from pybind11_tests import numpy_array as m
pytestmark = pytest.requires_numpy
with pytest.suppress(ImportError):
import numpy as np
@pytest.fixture(scope='function')
def arr():
return np.array([[1, 2, 3], [4, 5, 6]], '=u2')
def test_array_attributes():
a = np.array(0, 'f8')
assert m.ndim(a) == 0
assert all(m.shape(a) == [])
assert all(m.strides(a) == [])
with pytest.raises(IndexError) as excinfo:
m.shape(a, 0)
assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)'
with pytest.raises(IndexError) as excinfo:
m.strides(a, 0)
assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)'
assert m.writeable(a)
assert m.size(a) == 1
assert m.itemsize(a) == 8
assert m.nbytes(a) == 8
assert m.owndata(a)
a = np.array([[1, 2, 3], [4, 5, 6]], 'u2').view()
a.flags.writeable = False
assert m.ndim(a) == 2
assert all(m.shape(a) == [2, 3])
assert m.shape(a, 0) == 2
assert m.shape(a, 1) == 3
assert all(m.strides(a) == [6, 2])
assert m.strides(a, 0) == 6
assert m.strides(a, 1) == 2
with pytest.raises(IndexError) as excinfo:
m.shape(a, 2)
assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)'
with pytest.raises(IndexError) as excinfo:
m.strides(a, 2)
assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)'
assert not m.writeable(a)
assert m.size(a) == 6
assert m.itemsize(a) == 2
assert m.nbytes(a) == 12
assert not m.owndata(a)
@pytest.mark.parametrize('args, ret', [([], 0), ([0], 0), ([1], 3), ([0, 1], 1), ([1, 2], 5)])
def test_index_offset(arr, args, ret):
assert m.index_at(arr, *args) == ret
assert m.index_at_t(arr, *args) == ret
assert m.offset_at(arr, *args) == ret * arr.dtype.itemsize
assert m.offset_at_t(arr, *args) == ret * arr.dtype.itemsize
def test_dim_check_fail(arr):
for func in (m.index_at, m.index_at_t, m.offset_at, m.offset_at_t, m.data, m.data_t,
m.mutate_data, m.mutate_data_t):
with pytest.raises(IndexError) as excinfo:
func(arr, 1, 2, 3)
assert str(excinfo.value) == 'too many indices for an array: 3 (ndim = 2)'
@pytest.mark.parametrize('args, ret',
[([], [1, 2, 3, 4, 5, 6]),
([1], [4, 5, 6]),
([0, 1], [2, 3, 4, 5, 6]),
([1, 2], [6])])
def test_data(arr, args, ret):
from sys import byteorder
assert all(m.data_t(arr, *args) == ret)
assert all(m.data(arr, *args)[(0 if byteorder == 'little' else 1)::2] == ret)
assert all(m.data(arr, *args)[(1 if byteorder == 'little' else 0)::2] == 0)
@pytest.mark.parametrize('dim', [0, 1, 3])
def test_at_fail(arr, dim):
for func in m.at_t, m.mutate_at_t:
with pytest.raises(IndexError) as excinfo:
func(arr, *([0] * dim))
assert str(excinfo.value) == 'index dimension mismatch: {} (ndim = 2)'.format(dim)
def test_at(arr):
assert m.at_t(arr, 0, 2) == 3
assert m.at_t(arr, 1, 0) == 4
assert all(m.mutate_at_t(arr, 0, 2).ravel() == [1, 2, 4, 4, 5, 6])
assert all(m.mutate_at_t(arr, 1, 0).ravel() == [1, 2, 4, 5, 5, 6])
def test_mutate_readonly(arr):
arr.flags.writeable = False
for func, args in (m.mutate_data, ()), (m.mutate_data_t, ()), (m.mutate_at_t, (0, 0)):
with pytest.raises(ValueError) as excinfo:
func(arr, *args)
assert str(excinfo.value) == 'array is not writeable'
def test_mutate_data(arr):
assert all(m.mutate_data(arr).ravel() == [2, 4, 6, 8, 10, 12])
assert all(m.mutate_data(arr).ravel() == [4, 8, 12, 16, 20, 24])
assert all(m.mutate_data(arr, 1).ravel() == [4, 8, 12, 32, 40, 48])
assert all(m.mutate_data(arr, 0, 1).ravel() == [4, 16, 24, 64, 80, 96])
assert all(m.mutate_data(arr, 1, 2).ravel() == [4, 16, 24, 64, 80, 192])
assert all(m.mutate_data_t(arr).ravel() == [5, 17, 25, 65, 81, 193])
assert all(m.mutate_data_t(arr).ravel() == [6, 18, 26, 66, 82, 194])
assert all(m.mutate_data_t(arr, 1).ravel() == [6, 18, 26, 67, 83, 195])
assert all(m.mutate_data_t(arr, 0, 1).ravel() == [6, 19, 27, 68, 84, 196])
assert all(m.mutate_data_t(arr, 1, 2).ravel() == [6, 19, 27, 68, 84, 197])
def test_bounds_check(arr):
for func in (m.index_at, m.index_at_t, m.data, m.data_t,
m.mutate_data, m.mutate_data_t, m.at_t, m.mutate_at_t):
with pytest.raises(IndexError) as excinfo:
func(arr, 2, 0)
assert str(excinfo.value) == 'index 2 is out of bounds for axis 0 with size 2'
with pytest.raises(IndexError) as excinfo:
func(arr, 0, 4)
assert str(excinfo.value) == 'index 4 is out of bounds for axis 1 with size 3'
def test_make_c_f_array():
assert m.make_c_array().flags.c_contiguous
assert not m.make_c_array().flags.f_contiguous
assert m.make_f_array().flags.f_contiguous
assert not m.make_f_array().flags.c_contiguous
def test_wrap():
def assert_references(a, b, base=None):
if base is None:
base = a
assert a is not b
assert a.__array_interface__['data'][0] == b.__array_interface__['data'][0]
assert a.shape == b.shape
assert a.strides == b.strides
assert a.flags.c_contiguous == b.flags.c_contiguous
assert a.flags.f_contiguous == b.flags.f_contiguous
assert a.flags.writeable == b.flags.writeable
assert a.flags.aligned == b.flags.aligned
assert a.flags.updateifcopy == b.flags.updateifcopy
assert np.all(a == b)
assert not b.flags.owndata
assert b.base is base
if a.flags.writeable and a.ndim == 2:
a[0, 0] = 1234
assert b[0, 0] == 1234
a1 = np.array([1, 2], dtype=np.int16)
assert a1.flags.owndata and a1.base is None
a2 = m.wrap(a1)
assert_references(a1, a2)
a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='F')
assert a1.flags.owndata and a1.base is None
a2 = m.wrap(a1)
assert_references(a1, a2)
a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='C')
a1.flags.writeable = False
a2 = m.wrap(a1)
assert_references(a1, a2)
a1 = np.random.random((4, 4, 4))
a2 = m.wrap(a1)
assert_references(a1, a2)
a1t = a1.transpose()
a2 = m.wrap(a1t)
assert_references(a1t, a2, a1)
a1d = a1.diagonal()
a2 = m.wrap(a1d)
assert_references(a1d, a2, a1)
a1m = a1[::-1, ::-1, ::-1]
a2 = m.wrap(a1m)
assert_references(a1m, a2, a1)
def test_numpy_view(capture):
with capture:
ac = m.ArrayClass()
ac_view_1 = ac.numpy_view()
ac_view_2 = ac.numpy_view()
assert np.all(ac_view_1 == np.array([1, 2], dtype=np.int32))
del ac
pytest.gc_collect()
assert capture == """
ArrayClass()
ArrayClass::numpy_view()
ArrayClass::numpy_view()
"""
ac_view_1[0] = 4
ac_view_1[1] = 3
assert ac_view_2[0] == 4
assert ac_view_2[1] == 3
with capture:
del ac_view_1
del ac_view_2
pytest.gc_collect()
pytest.gc_collect()
assert capture == """
~ArrayClass()
"""
@pytest.unsupported_on_pypy
def test_cast_numpy_int64_to_uint64():
m.function_taking_uint64(123)
m.function_taking_uint64(np.uint64(123))
def test_isinstance():
assert m.isinstance_untyped(np.array([1, 2, 3]), "not an array")
assert m.isinstance_typed(np.array([1.0, 2.0, 3.0]))
def test_constructors():
defaults = m.default_constructors()
for a in defaults.values():
assert a.size == 0
assert defaults["array"].dtype == np.array([]).dtype
assert defaults["array_t<int32>"].dtype == np.int32
assert defaults["array_t<double>"].dtype == np.float64
results = m.converting_constructors([1, 2, 3])
for a in results.values():
np.testing.assert_array_equal(a, [1, 2, 3])
assert results["array"].dtype == np.int_
assert results["array_t<int32>"].dtype == np.int32
assert results["array_t<double>"].dtype == np.float64
def test_overload_resolution(msg):
# Exact overload matches:
assert m.overloaded(np.array([1], dtype='float64')) == 'double'
assert m.overloaded(np.array([1], dtype='float32')) == 'float'
assert m.overloaded(np.array([1], dtype='ushort')) == 'unsigned short'
assert m.overloaded(np.array([1], dtype='intc')) == 'int'
assert m.overloaded(np.array([1], dtype='longlong')) == 'long long'
assert m.overloaded(np.array([1], dtype='complex')) == 'double complex'
assert m.overloaded(np.array([1], dtype='csingle')) == 'float complex'
# No exact match, should call first convertible version:
assert m.overloaded(np.array([1], dtype='uint8')) == 'double'
with pytest.raises(TypeError) as excinfo:
m.overloaded("not an array")
assert msg(excinfo.value) == """
overloaded(): incompatible function arguments. The following argument types are supported:
1. (arg0: numpy.ndarray[float64]) -> str
2. (arg0: numpy.ndarray[float32]) -> str
3. (arg0: numpy.ndarray[int32]) -> str
4. (arg0: numpy.ndarray[uint16]) -> str
5. (arg0: numpy.ndarray[int64]) -> str
6. (arg0: numpy.ndarray[complex128]) -> str
7. (arg0: numpy.ndarray[complex64]) -> str
Invoked with: 'not an array'
"""
assert m.overloaded2(np.array([1], dtype='float64')) == 'double'
assert m.overloaded2(np.array([1], dtype='float32')) == 'float'
assert m.overloaded2(np.array([1], dtype='complex64')) == 'float complex'
assert m.overloaded2(np.array([1], dtype='complex128')) == 'double complex'
assert m.overloaded2(np.array([1], dtype='float32')) == 'float'
assert m.overloaded3(np.array([1], dtype='float64')) == 'double'
assert m.overloaded3(np.array([1], dtype='intc')) == 'int'
expected_exc = """
overloaded3(): incompatible function arguments. The following argument types are supported:
1. (arg0: numpy.ndarray[int32]) -> str
2. (arg0: numpy.ndarray[float64]) -> str
Invoked with:"""
with pytest.raises(TypeError) as excinfo:
m.overloaded3(np.array([1], dtype='uintc'))
assert msg(excinfo.value) == expected_exc + " array([1], dtype=uint32)"
with pytest.raises(TypeError) as excinfo:
m.overloaded3(np.array([1], dtype='float32'))
assert msg(excinfo.value) == expected_exc + " array([ 1.], dtype=float32)"
with pytest.raises(TypeError) as excinfo:
m.overloaded3(np.array([1], dtype='complex'))
assert msg(excinfo.value) == expected_exc + " array([ 1.+0.j])"
# Exact matches:
assert m.overloaded4(np.array([1], dtype='double')) == 'double'
assert m.overloaded4(np.array([1], dtype='longlong')) == 'long long'
# Non-exact matches requiring conversion. Since float to integer isn't a
# save conversion, it should go to the double overload, but short can go to
# either (and so should end up on the first-registered, the long long).
assert m.overloaded4(np.array([1], dtype='float32')) == 'double'
assert m.overloaded4(np.array([1], dtype='short')) == 'long long'
assert m.overloaded5(np.array([1], dtype='double')) == 'double'
assert m.overloaded5(np.array([1], dtype='uintc')) == 'unsigned int'
assert m.overloaded5(np.array([1], dtype='float32')) == 'unsigned int'
def test_greedy_string_overload():
"""Tests fix for #685 - ndarray shouldn't go to std::string overload"""
assert m.issue685("abc") == "string"
assert m.issue685(np.array([97, 98, 99], dtype='b')) == "array"
assert m.issue685(123) == "other"
def test_array_unchecked_fixed_dims(msg):
z1 = np.array([[1, 2], [3, 4]], dtype='float64')
m.proxy_add2(z1, 10)
assert np.all(z1 == [[11, 12], [13, 14]])
with pytest.raises(ValueError) as excinfo:
m.proxy_add2(np.array([1., 2, 3]), 5.0)
assert msg(excinfo.value) == "array has incorrect number of dimensions: 1; expected 2"
expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype='int')
assert np.all(m.proxy_init3(3.0) == expect_c)
expect_f = np.transpose(expect_c)
assert np.all(m.proxy_init3F(3.0) == expect_f)
assert m.proxy_squared_L2_norm(np.array(range(6))) == 55
assert m.proxy_squared_L2_norm(np.array(range(6), dtype="float64")) == 55
assert m.proxy_auxiliaries2(z1) == [11, 11, True, 2, 8, 2, 2, 4, 32]
assert m.proxy_auxiliaries2(z1) == m.array_auxiliaries2(z1)
def test_array_unchecked_dyn_dims(msg):
z1 = np.array([[1, 2], [3, 4]], dtype='float64')
m.proxy_add2_dyn(z1, 10)
assert np.all(z1 == [[11, 12], [13, 14]])
expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype='int')
assert np.all(m.proxy_init3_dyn(3.0) == expect_c)
assert m.proxy_auxiliaries2_dyn(z1) == [11, 11, True, 2, 8, 2, 2, 4, 32]
assert m.proxy_auxiliaries2_dyn(z1) == m.array_auxiliaries2(z1)
def test_array_failure():
with pytest.raises(ValueError) as excinfo:
m.array_fail_test()
assert str(excinfo.value) == 'cannot create a pybind11::array from a nullptr'
with pytest.raises(ValueError) as excinfo:
m.array_t_fail_test()
assert str(excinfo.value) == 'cannot create a pybind11::array_t from a nullptr'
with pytest.raises(ValueError) as excinfo:
m.array_fail_test_negative_size()
assert str(excinfo.value) == 'negative dimensions are not allowed'
def test_initializer_list():
assert m.array_initializer_list1().shape == (1,)
assert m.array_initializer_list2().shape == (1, 2)
assert m.array_initializer_list3().shape == (1, 2, 3)
assert m.array_initializer_list4().shape == (1, 2, 3, 4)
def test_array_resize(msg):
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='float64')
m.array_reshape2(a)
assert(a.size == 9)
assert(np.all(a == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
# total size change should succced with refcheck off
m.array_resize3(a, 4, False)
assert(a.size == 64)
# ... and fail with refcheck on
try:
m.array_resize3(a, 3, True)
except ValueError as e:
assert(str(e).startswith("cannot resize an array"))
# transposed array doesn't own data
b = a.transpose()
try:
m.array_resize3(b, 3, False)
except ValueError as e:
assert(str(e).startswith("cannot resize this array: it does not own its data"))
# ... but reshape should be fine
m.array_reshape2(b)
assert(b.shape == (8, 8))
@pytest.unsupported_on_pypy
def test_array_create_and_resize(msg):
a = m.create_and_resize(2)
assert(a.size == 4)
assert(np.all(a == 42.))
| mit | 2,836,450,355,394,470,400 | 35.758706 | 99 | 0.597009 | false |
j-carpentier/nova | nova/tests/unit/api/openstack/compute/test_volumes.py | 9 | 38890 | # Copyright 2013 Josh Durgin
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import urllib
import webob
from webob import exc
from nova.api.openstack.compute import assisted_volume_snapshots \
as assisted_snaps_v21
from nova.api.openstack.compute.legacy_v2.contrib import \
assisted_volume_snapshots as assisted_snaps_v2
from nova.api.openstack.compute.legacy_v2.contrib import volumes
from nova.api.openstack.compute import volumes as volumes_v21
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.volume import cinder
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000'
FAKE_UUID_B = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_UUID_C = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
FAKE_UUID_D = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
IMAGE_UUID = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
def fake_get_instance(self, context, instance_id, want_objects=False,
expected_attrs=None):
return fake_instance.fake_instance_obj(context, **{'uuid': instance_id})
def fake_get_volume(self, context, id):
return {'id': 'woot'}
def fake_attach_volume(self, context, instance, volume_id, device):
pass
def fake_detach_volume(self, context, instance, volume):
pass
def fake_swap_volume(self, context, instance,
old_volume_id, new_volume_id):
pass
def fake_create_snapshot(self, context, volume, name, description):
return {'id': 123,
'volume_id': 'fakeVolId',
'status': 'available',
'volume_size': 123,
'created_at': '2013-01-01 00:00:01',
'display_name': 'myVolumeName',
'display_description': 'myVolumeDescription'}
def fake_delete_snapshot(self, context, snapshot_id):
pass
def fake_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
pass
def fake_compute_volume_snapshot_create(self, context, volume_id,
create_info):
pass
def fake_bdms_get_all_by_instance(context, instance_uuid, use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'instance_uuid': instance_uuid,
'device_name': '/dev/fake0',
'delete_on_termination': 'False',
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': FAKE_UUID_A,
'volume_size': 1}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'instance_uuid': instance_uuid,
'device_name': '/dev/fake1',
'delete_on_termination': 'False',
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': FAKE_UUID_B,
'volume_size': 1})]
class BootFromVolumeTest(test.TestCase):
def setUp(self):
super(BootFromVolumeTest, self).setUp()
self.stubs.Set(compute_api.API, 'create',
self._get_fake_compute_api_create())
fakes.stub_out_nw_api(self.stubs)
self._block_device_mapping_seen = None
self._legacy_bdm_seen = True
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes', 'Block_device_mapping_v2_boot'])
def _get_fake_compute_api_create(self):
def _fake_compute_api_create(cls, context, instance_type,
image_href, **kwargs):
self._block_device_mapping_seen = kwargs.get(
'block_device_mapping')
self._legacy_bdm_seen = kwargs.get('legacy_bdm')
inst_type = flavors.get_flavor_by_flavor_id(2)
resv_id = None
return ([{'id': 1,
'display_name': 'test_server',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': IMAGE_UUID,
'user_id': 'fake',
'project_id': 'fake',
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0),
'progress': 0,
'fixed_ips': []
}], resv_id)
return _fake_compute_api_create
def test_create_root_volume(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping=[dict(
volume_id='1',
device_name='/dev/vda',
virtual='root',
delete_on_termination=False,
)]
))
req = fakes.HTTPRequest.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
init_only=('os-volumes_boot', 'servers')))
self.assertEqual(res.status_int, 202)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(len(self._block_device_mapping_seen), 1)
self.assertTrue(self._legacy_bdm_seen)
self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], '1')
self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
'/dev/vda')
def test_create_root_volume_bdm_v2(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping_v2=[dict(
source_type='volume',
uuid='1',
device_name='/dev/vda',
boot_index=0,
delete_on_termination=False,
)]
))
req = fakes.HTTPRequest.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
init_only=('os-volumes_boot', 'servers')))
self.assertEqual(res.status_int, 202)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(len(self._block_device_mapping_seen), 1)
self.assertFalse(self._legacy_bdm_seen)
self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], '1')
self.assertEqual(self._block_device_mapping_seen[0]['boot_index'],
0)
self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
'/dev/vda')
class VolumeApiTestV21(test.NoDBTestCase):
url_prefix = '/v2/fake'
def setUp(self):
super(VolumeApiTestV21, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete)
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
self.stubs.Set(cinder.API, "get_all", fakes.stub_volume_get_all)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes'])
self.context = context.get_admin_context()
self.app = self._get_app()
def _get_app(self):
return fakes.wsgi_app_v21()
def test_volume_create(self):
self.stubs.Set(cinder.API, "create", fakes.stub_volume_create)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_dict = jsonutils.loads(resp.body)
self.assertIn('volume', resp_dict)
self.assertEqual(resp_dict['volume']['size'],
vol['size'])
self.assertEqual(resp_dict['volume']['displayName'],
vol['display_name'])
self.assertEqual(resp_dict['volume']['displayDescription'],
vol['display_description'])
self.assertEqual(resp_dict['volume']['availabilityZone'],
vol['availability_zone'])
def _test_volume_create_bad(self, cinder_exc, api_exc):
def fake_volume_create(self, context, size, name, description,
snapshot, **param):
raise cinder_exc
self.stubs.Set(cinder.API, "create", fake_volume_create)
vol = {"size": '#$?',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
self.assertRaises(api_exc,
volumes.VolumeController().create, req, body=body)
@mock.patch.object(cinder.API, 'get_snapshot')
@mock.patch.object(cinder.API, 'create')
def test_volume_create_bad_snapshot_id(self, mock_create, mock_get):
vol = {"snapshot_id": '1'}
body = {"volume": vol}
mock_get.side_effect = exception.SnapshotNotFound(snapshot_id='1')
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
self.assertRaises(webob.exc.HTTPNotFound,
volumes.VolumeController().create, req, body=body)
def test_volume_create_bad_input(self):
self._test_volume_create_bad(exception.InvalidInput(reason='fake'),
webob.exc.HTTPBadRequest)
def test_volume_create_bad_quota(self):
self._test_volume_create_bad(exception.OverQuota(overs='fake'),
webob.exc.HTTPForbidden)
def test_volume_index(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_volume_detail(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/detail')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_volume_show(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/123')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_volume_show_no_volume(self):
self.stubs.Set(cinder.API, "get", fakes.stub_volume_notfound)
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/456')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertIn('Volume 456 could not be found.', resp.body)
def test_volume_delete(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/123')
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_volume_delete_no_volume(self):
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_notfound)
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/456')
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertIn('Volume 456 could not be found.', resp.body)
class VolumeApiTestV2(VolumeApiTestV21):
def setUp(self):
super(VolumeApiTestV2, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes'])
self.context = context.get_admin_context()
self.app = self._get_app()
def _get_app(self):
return fakes.wsgi_app()
class VolumeAttachTestsV21(test.NoDBTestCase):
validation_error = exception.ValidationError
def setUp(self):
super(VolumeAttachTestsV21, self).setUp()
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_bdms_get_all_by_instance)
self.stubs.Set(compute_api.API, 'get', fake_get_instance)
self.stubs.Set(cinder.API, 'get', fake_get_volume)
self.context = context.get_admin_context()
self.expected_show = {'volumeAttachment':
{'device': '/dev/fake0',
'serverId': FAKE_UUID,
'id': FAKE_UUID_A,
'volumeId': FAKE_UUID_A
}}
self._set_up_controller()
def _set_up_controller(self):
self.attachments = volumes_v21.VolumeAttachmentController()
def test_show(self):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.show(req, FAKE_UUID, FAKE_UUID_A)
self.assertEqual(self.expected_show, result)
@mock.patch.object(compute_api.API, 'get',
side_effect=exception.InstanceNotFound(instance_id=FAKE_UUID))
def test_show_no_instance(self, mock_mr):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_A)
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=None)
def test_show_no_bdms(self, mock_mr):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_A)
def test_show_bdms_no_mountpoint(self):
FAKE_UUID_NOTEXIST = '00000000-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_NOTEXIST)
def test_detach(self):
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume)
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.attachments,
volumes_v21.VolumeAttachmentController):
status_int = self.attachments.delete.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
def test_detach_vol_not_found(self):
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume)
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_C)
@mock.patch('nova.objects.BlockDeviceMapping.is_root',
new_callable=mock.PropertyMock)
def test_detach_vol_root(self, mock_isroot):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
mock_isroot.return_value = True
self.assertRaises(exc.HTTPForbidden,
self.attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_A)
def test_detach_volume_from_locked_server(self):
def fake_detach_volume_from_locked_server(self, context,
instance, volume):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume_from_locked_server)
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict, self.attachments.delete,
req, FAKE_UUID, FAKE_UUID_A)
def test_attach_volume(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body=body)
self.assertEqual(result['volumeAttachment']['id'],
'00000000-aaaa-aaaa-aaaa-000000000000')
@mock.patch.object(compute_api.API, 'attach_volume',
return_value='/dev/myfake')
def test_attach_volume_with_auto_device(self, mock_attach):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': None}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body=body)
self.assertEqual(result['volumeAttachment']['id'],
'00000000-aaaa-aaaa-aaaa-000000000000')
self.assertEqual(result['volumeAttachment']['device'],
'/dev/myfake')
def test_attach_volume_to_locked_server(self):
def fake_attach_volume_to_locked_server(self, context, instance,
volume_id, device=None):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume_to_locked_server)
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict, self.attachments.create,
req, FAKE_UUID, body=body)
def test_attach_volume_bad_id(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {
'volumeAttachment': {
'device': None,
'volumeId': 'TESTVOLUME',
}
}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(self.validation_error, self.attachments.create,
req, FAKE_UUID, body=body)
def test_attach_volume_without_volumeId(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {
'volumeAttachment': {
'device': None
}
}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(self.validation_error, self.attachments.create,
req, FAKE_UUID, body=body)
def test_attach_volume_with_extra_arg(self):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake',
'extra': 'extra_arg'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(self.validation_error, self.attachments.create,
req, FAKE_UUID, body=body)
def _test_swap(self, attachments, uuid=FAKE_UUID_A,
fake_func=None, body=None):
fake_func = fake_func or fake_swap_volume
self.stubs.Set(compute_api.API,
'swap_volume',
fake_func)
body = body or {'volumeAttachment': {'volumeId': FAKE_UUID_B}}
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'PUT'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
return attachments.update(req, FAKE_UUID, uuid, body=body)
def test_swap_volume_for_locked_server(self):
def fake_swap_volume_for_locked_server(self, context, instance,
old_volume, new_volume):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.assertRaises(webob.exc.HTTPConflict, self._test_swap,
self.attachments,
fake_func=fake_swap_volume_for_locked_server)
def test_swap_volume(self):
result = self._test_swap(self.attachments)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.attachments,
volumes_v21.VolumeAttachmentController):
status_int = self.attachments.update.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
def test_swap_volume_no_attachment(self):
self.assertRaises(exc.HTTPNotFound, self._test_swap,
self.attachments, FAKE_UUID_C)
def test_swap_volume_without_volumeId(self):
body = {'volumeAttachment': {'device': '/dev/fake'}}
self.assertRaises(self.validation_error,
self._test_swap,
self.attachments,
body=body)
def test_swap_volume_with_extra_arg(self):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
self.assertRaises(self.validation_error,
self._test_swap,
self.attachments,
body=body)
class VolumeAttachTestsV2(VolumeAttachTestsV21):
validation_error = webob.exc.HTTPBadRequest
def _set_up_controller(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-volume-attachment-update'}
self.attachments = volumes.VolumeAttachmentController(ext_mgr)
ext_mgr_no_update = extensions.ExtensionManager()
ext_mgr_no_update.extensions = {}
self.attachments_no_update = volumes.VolumeAttachmentController(
ext_mgr_no_update)
def test_swap_volume_no_extension(self):
self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap,
self.attachments_no_update)
@mock.patch.object(compute_api.API, 'attach_volume',
return_value=[])
def test_attach_volume_with_extra_arg(self, mock_attach):
# NOTE(gmann): V2 does not perform strong input validation
# so volume is attached successfully even with extra arg in
# request body.
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake',
'extra': 'extra_arg'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body=body)
self.assertEqual(result['volumeAttachment']['id'],
'00000000-aaaa-aaaa-aaaa-000000000000')
def test_swap_volume_with_extra_arg(self):
# NOTE(gmann): V2 does not perform strong input validation.
# Volume is swapped successfully even with extra arg in
# request body. So 'pass' this test for V2.
pass
class CommonBadRequestTestCase(object):
resource = None
entity_name = None
controller_cls = None
kwargs = {}
bad_request = exc.HTTPBadRequest
"""
Tests of places we throw 400 Bad Request from
"""
def setUp(self):
super(CommonBadRequestTestCase, self).setUp()
self.controller = self.controller_cls()
def _bad_request_create(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/' + self.resource)
req.method = 'POST'
kwargs = self.kwargs.copy()
kwargs['body'] = body
self.assertRaises(self.bad_request,
self.controller.create, req, **kwargs)
def test_create_no_body(self):
self._bad_request_create(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._bad_request_create(body=body)
def test_create_malformed_entity(self):
body = {self.entity_name: 'string'}
self._bad_request_create(body=body)
class BadRequestVolumeTestCaseV21(CommonBadRequestTestCase,
test.NoDBTestCase):
resource = 'os-volumes'
entity_name = 'volume'
controller_cls = volumes_v21.VolumeController
bad_request = exception.ValidationError
class BadRequestVolumeTestCaseV2(BadRequestVolumeTestCaseV21):
controller_cls = volumes.VolumeController
bad_request = exc.HTTPBadRequest
class BadRequestAttachmentTestCase(CommonBadRequestTestCase,
test.NoDBTestCase):
resource = 'servers/' + FAKE_UUID + '/os-volume_attachments'
entity_name = 'volumeAttachment'
controller_cls = volumes.VolumeAttachmentController
kwargs = {'server_id': FAKE_UUID}
class BadRequestSnapshotTestCaseV21(CommonBadRequestTestCase,
test.NoDBTestCase):
resource = 'os-snapshots'
entity_name = 'snapshot'
controller_cls = volumes_v21.SnapshotController
bad_request = exception.ValidationError
class BadRequestSnapshotTestCaseV2(BadRequestSnapshotTestCaseV21):
controller_cls = volumes.SnapshotController
bad_request = exc.HTTPBadRequest
class AssistedSnapshotCreateTestCaseV21(test.NoDBTestCase):
assisted_snaps = assisted_snaps_v21
bad_request = exception.ValidationError
def setUp(self):
super(AssistedSnapshotCreateTestCaseV21, self).setUp()
self.controller = \
self.assisted_snaps.AssistedVolumeSnapshotsController()
self.stubs.Set(compute_api.API, 'volume_snapshot_create',
fake_compute_volume_snapshot_create)
def test_assisted_create(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
body = {'snapshot':
{'volume_id': '1',
'create_info': {'type': 'qcow2',
'new_file': 'new_file',
'snapshot_id': 'snapshot_id'}}}
req.method = 'POST'
self.controller.create(req, body=body)
def test_assisted_create_missing_create_info(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
body = {'snapshot': {'volume_id': '1'}}
req.method = 'POST'
self.assertRaises(self.bad_request, self.controller.create,
req, body=body)
class AssistedSnapshotCreateTestCaseV2(AssistedSnapshotCreateTestCaseV21):
assisted_snaps = assisted_snaps_v2
bad_request = webob.exc.HTTPBadRequest
class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
assisted_snaps = assisted_snaps_v21
def _check_status(self, expected_status, res, controller_method):
self.assertEqual(expected_status, controller_method.wsgi_code)
def setUp(self):
super(AssistedSnapshotDeleteTestCaseV21, self).setUp()
self.controller = \
self.assisted_snaps.AssistedVolumeSnapshotsController()
self.stubs.Set(compute_api.API, 'volume_snapshot_delete',
fake_compute_volume_snapshot_delete)
def test_assisted_delete(self):
params = {
'delete_info': jsonutils.dumps({'volume_id': '1'}),
}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-assisted-volume-snapshots?%s' %
urllib.parse.urlencode(params))
req.method = 'DELETE'
result = self.controller.delete(req, '5')
self._check_status(204, result, self.controller.delete)
def test_assisted_delete_missing_delete_info(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '5')
class AssistedSnapshotDeleteTestCaseV2(AssistedSnapshotDeleteTestCaseV21):
assisted_snaps = assisted_snaps_v2
def _check_status(self, expected_status, res, controller_method):
self.assertEqual(expected_status, res.status_int)
class TestAssistedVolumeSnapshotsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(TestAssistedVolumeSnapshotsPolicyEnforcementV21, self).setUp()
self.controller = (
assisted_snaps_v21.AssistedVolumeSnapshotsController())
self.req = fakes.HTTPRequest.blank('')
def test_create_assisted_volumes_snapshots_policy_failed(self):
rule_name = "os_compute_api:os-assisted-volume-snapshots:create"
self.policy.set_rules({rule_name: "project:non_fake"})
body = {'snapshot':
{'volume_id': '1',
'create_info': {'type': 'qcow2',
'new_file': 'new_file',
'snapshot_id': 'snapshot_id'}}}
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create, self.req, body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_assisted_volumes_snapshots_policy_failed(self):
rule_name = "os_compute_api:os-assisted-volume-snapshots:delete"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, '5')
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class TestVolumeAttachPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(TestVolumeAttachPolicyEnforcementV21, self).setUp()
self.controller = volumes_v21.VolumeAttachmentController()
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, rules, rule_name, func, *arg, **kwarg):
self.policy.set_rules(rules)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes-attachments:index"
rules = {rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name,
self.controller.index, self.req, FAKE_UUID)
def test_show_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:show": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.show,
self.req, FAKE_UUID, FAKE_UUID_A)
rule_name = "os_compute_api:os-volumes-attachments:show"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.show,
self.req, FAKE_UUID, FAKE_UUID_A)
def test_create_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:create": "@",
rule_name: "project:non_fake"}
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
self._common_policy_check(rules, rule_name, self.controller.create,
self.req, FAKE_UUID, body=body)
rule_name = "os_compute_api:os-volumes-attachments:create"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.create,
self.req, FAKE_UUID, body=body)
def test_update_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:update": "@",
rule_name: "project:non_fake"}
body = {'volumeAttachment': {'volumeId': FAKE_UUID_B}}
self._common_policy_check(rules, rule_name, self.controller.update,
self.req, FAKE_UUID, FAKE_UUID_A, body=body)
rule_name = "os_compute_api:os-volumes-attachments:update"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.update,
self.req, FAKE_UUID, FAKE_UUID_A, body=body)
def test_delete_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:delete": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.delete,
self.req, FAKE_UUID, FAKE_UUID_A)
rule_name = "os_compute_api:os-volumes-attachments:delete"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.delete,
self.req, FAKE_UUID, FAKE_UUID_A)
| apache-2.0 | -4,335,558,850,881,914,000 | 39.051493 | 79 | 0.586012 | false |
jakevdp/spheredb | spheredb/hpx_utils.py | 1 | 2943 | """HEALPix Utilities"""
import numpy as np
def RAdec_to_HPX(RA, dec):
"""Convert RA/dec to healpix
Parameters
----------
RA, dec : degrees
Returns
-------
x, y : degrees
See Section 6 of Calabretta & Roukema, Mapping on the HEALPix grid
"""
H, K = 4.0, 3.0
RA = np.asarray(RA, dtype=float)
dec = np.asarray(dec, dtype=float)
# shift the RA to the range [-180, 180)
RA = -180 + (RA + 180) % 360
RA = RA + np.zeros_like(dec)
dec = dec + np.zeros_like(RA)
if np.any(RA < -180.) or np.any(RA >= 180.):
raise ValueError("RA must be in range [-180, 180)")
if np.any(dec < -90) or np.any(dec > 90):
raise ValueError("DEC must be in range [-90, 90]")
x = np.zeros(RA.shape, dtype=float)
y = np.zeros(dec.shape, dtype=float)
sindec = np.sin(np.radians(dec))
dec_cutoff = np.degrees(np.arcsin((K - 1.) / K))
sigma = np.sqrt(K * (1 - abs(sindec)))
omega = ((K % 2 > 0) | (dec > 0)).astype(float)
phi_c = -180. + (180. / H) * (omega +
2 * np.floor((RA + 180.) * H / 360.
+ 0.5 * (1. - omega)))
upper = (dec > dec_cutoff)
lower = (dec < -dec_cutoff)
inner = ~(upper | lower)
x[upper] = phi_c[upper] + (RA[upper] - phi_c[upper]) * sigma[upper]
y[upper] = (180. / H) * (0.5 * (K + 1) - sigma[upper])
x[inner] = RA[inner]
y[inner] = (K * 90. / H) * sindec[inner]
x[lower] = phi_c[lower] + (RA[lower] - phi_c[lower]) * sigma[lower]
y[lower] = -(180. / H) * (0.5 * (K + 1) - sigma[lower])
return x, y
def HPX_to_RAdec(x, y):
"""Convert RA/dec to healpix
Parameters
----------
RA, dec : degrees
Returns
-------
x, y : degrees
See Section 6 of Calabretta & Roukema, Mapping on the HEALPix grid
"""
H, K = 4.0, 3.0
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
x = x + np.zeros_like(y)
y = y + np.zeros_like(x)
RA = np.zeros(x.shape, dtype=float)
dec = np.zeros(y.shape, dtype=float)
extreme = (abs(y) >= 90)
upper = ~extreme & (y > (K - 1.) * 90. / H)
lower = ~extreme & (y < -(K - 1.) * 90. / H)
inner = ~(upper | lower | extreme)
sigma = 0.5 * (K + 1) - abs(y * H) / 180.
omega = ((K % 2 > 0) | (y > 0)).astype(float)
x_c = -180. + (2 * np.floor((x + 180.) * H / 360. + 0.5 * (1 - omega))
+ omega) * 180. / H
RA[upper] = x_c[upper] + (x[upper] - x_c[upper]) / sigma[upper]
dec[upper] = np.degrees(np.arcsin(1 - (1. / K) * sigma[upper] ** 2))
RA[inner] = x[inner]
dec[inner] = np.degrees(np.arcsin((y[inner] * H) / (90. * K)))
RA[lower] = x_c[lower] + (x[lower] - x_c[lower]) / sigma[lower]
dec[lower] = -np.degrees(np.arcsin(1 - (1. / K) * sigma[lower] ** 2))
RA[extreme] = x[extreme]
dec[extreme] = y[extreme]
return RA, dec
| bsd-3-clause | -1,399,782,932,874,408,000 | 25.754545 | 74 | 0.498471 | false |
City-of-Helsinki/kuulemma | kuulemma/forms/sign_up.py | 2 | 2193 | # -*- coding: utf-8 -*-
# Kuulemma
# Copyright (C) 2014, Fast Monkeys Oy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from flask_wtf import Form
from wtforms.fields import PasswordField, TextField
from wtforms.validators import Email, Length, Required, ValidationError
from wtforms_components import EmailField
from kuulemma.models import User
required = Required(message='Tämä kenttä on pakollinen.')
class SignUpForm(Form):
email = EmailField(
u'Sähköpostiosoite',
validators=[
Required(message='Sähköpostiosoite on pakollinen.'),
Email(message='Sähköpostiosoitteen täytyy sisältää @ ja . merkit.')
]
)
username = TextField(
u'Käyttäjänimi', validators=[
required,
Length(
min=4,
max=100,
message='Käyttäjänimi täytyy olla vähintään 4 merkkiä pitkä.'
)
]
)
password = PasswordField(
u'Salasana',
validators=[
required,
Length(
min=6,
max=100,
message='Salasana täytyy olla vähintään 6 merkkiä pitkä.'
)
]
)
def validate_email(self, field):
if User.query.filter(User.email == field.data).count() != 0:
raise ValidationError(u'Sähköposti jo käytössä.')
def validate_username(self, field):
if User.query.filter(User.username == field.data).count() != 0:
raise ValidationError(u'Käyttäjänimi jo käytössä.')
| agpl-3.0 | -6,837,181,214,356,372,000 | 33.142857 | 79 | 0.647141 | false |
mway08/grpc | src/python/interop/interop/client.py | 8 | 4235 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The Python implementation of the GRPC interoperability test client."""
import argparse
from oauth2client import client as oauth2client_client
from grpc.early_adopter import implementations
from interop import methods
from interop import resources
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
def _args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--server_host', help='the host to which to connect', type=str)
parser.add_argument(
'--server_port', help='the port to which to connect', type=int)
parser.add_argument(
'--test_case', help='the test case to execute', type=str)
parser.add_argument(
'--use_tls', help='require a secure connection', dest='use_tls',
action='store_true')
parser.add_argument(
'--use_test_ca', help='replace platform root CAs with ca.pem',
action='store_true')
parser.add_argument(
'--server_host_override',
help='the server host to which to claim to connect', type=str)
parser.add_argument('--oauth_scope', help='scope for OAuth tokens', type=str)
parser.add_argument(
'--default_service_account',
help='email address of the default service account', type=str)
return parser.parse_args()
def _oauth_access_token(args):
credentials = oauth2client_client.GoogleCredentials.get_application_default()
scoped_credentials = credentials.create_scoped([args.oauth_scope])
return scoped_credentials.get_access_token().access_token
def _stub(args):
if args.oauth_scope:
metadata_transformer = lambda x: [('Authorization', 'Bearer %s' % _oauth_access_token(args))]
else:
metadata_transformer = lambda x: []
if args.use_tls:
if args.use_test_ca:
root_certificates = resources.test_root_certificates()
else:
root_certificates = resources.prod_root_certificates()
stub = implementations.stub(
methods.SERVICE_NAME, methods.CLIENT_METHODS, args.server_host,
args.server_port, metadata_transformer=metadata_transformer,
secure=True, root_certificates=root_certificates,
server_host_override=args.server_host_override)
else:
stub = implementations.stub(
methods.SERVICE_NAME, methods.CLIENT_METHODS, args.server_host,
args.server_port, secure=False)
return stub
def _test_case_from_arg(test_case_arg):
for test_case in methods.TestCase:
if test_case_arg == test_case.value:
return test_case
else:
raise ValueError('No test case "%s"!' % test_case_arg)
def _test_interoperability():
args = _args()
stub = _stub(args)
test_case = _test_case_from_arg(args.test_case)
test_case.test_interoperability(stub, args)
if __name__ == '__main__':
_test_interoperability()
| bsd-3-clause | -1,769,432,103,839,377,700 | 37.5 | 97 | 0.727509 | false |
philanthropy-u/edx-platform | common/test/acceptance/pages/lms/course_page.py | 24 | 1158 | """
Base class for pages in courseware.
"""
from bok_choy.page_object import PageObject
from common.test.acceptance.pages.lms import BASE_URL
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
class CoursePage(PageObject):
"""
Abstract base class for page objects within a course.
"""
# Overridden by subclasses to provide the relative path within the course
# Paths should not include the leading forward slash.
url_path = ""
def __init__(self, browser, course_id):
"""
Course ID is currently of the form "edx/999/2013_Spring"
but this format could change.
"""
super(CoursePage, self).__init__(browser)
self.course_id = course_id
@property
def url(self):
"""
Construct a URL to the page within the course.
"""
return BASE_URL + "/courses/" + self.course_id + "/" + self.url_path
def has_tab(self, tab_name):
"""
Returns true if the current page is showing a tab with the given name.
:return:
"""
tab_nav = TabNavPage(self.browser)
return tab_name in tab_nav.tab_names
| agpl-3.0 | 7,720,485,328,744,148,000 | 27.243902 | 78 | 0.626079 | false |
hackerkid/zulip | zerver/lib/message.py | 1 | 49744 | import copy
import datetime
import zlib
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Sequence, Set, Tuple
import ahocorasick
import orjson
from django.db import connection
from django.db.models import Max, Sum
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from psycopg2.sql import SQL
from typing_extensions import TypedDict
from analytics.lib.counts import COUNT_STATS, RealmCount
from zerver.lib.avatar import get_avatar_field
from zerver.lib.cache import (
cache_with_key,
generic_bulk_cached_fetch,
to_dict_cache_key,
to_dict_cache_key_id,
)
from zerver.lib.display_recipient import (
DisplayRecipientT,
UserDisplayRecipient,
bulk_fetch_display_recipients,
)
from zerver.lib.markdown import MentionData, markdown_convert, topic_links
from zerver.lib.markdown import version as markdown_version
from zerver.lib.request import JsonableError
from zerver.lib.stream_subscription import (
get_stream_subscriptions_for_user,
num_subscribers_for_stream_id,
)
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.topic import DB_TOPIC_NAME, MESSAGE__TOPIC, TOPIC_LINKS, TOPIC_NAME
from zerver.lib.topic_mutes import build_topic_mute_checker, topic_is_muted
from zerver.models import (
MAX_MESSAGE_LENGTH,
MAX_TOPIC_NAME_LENGTH,
Message,
Reaction,
Realm,
Recipient,
Stream,
SubMessage,
Subscription,
UserMessage,
UserProfile,
get_display_recipient_by_id,
get_usermessage_by_message_id,
query_for_ids,
)
RealmAlertWord = Dict[int, List[str]]
class RawReactionRow(TypedDict):
emoji_code: str
emoji_name: str
message_id: int
reaction_type: str
user_profile__email: str
user_profile__full_name: str
user_profile__id: int
class RawUnreadMessagesResult(TypedDict):
pm_dict: Dict[int, Any]
stream_dict: Dict[int, Any]
huddle_dict: Dict[int, Any]
mentions: Set[int]
muted_stream_ids: List[int]
unmuted_stream_msgs: Set[int]
old_unreads_missing: bool
class UnreadMessagesResult(TypedDict):
pms: List[Dict[str, Any]]
streams: List[Dict[str, Any]]
huddles: List[Dict[str, Any]]
mentions: List[int]
count: int
old_unreads_missing: bool
@dataclass
class SendMessageRequest:
message: Message
stream: Optional[Stream]
local_id: Optional[int]
sender_queue_id: Optional[int]
realm: Realm
mention_data: MentionData
active_user_ids: Set[int]
push_notify_user_ids: Set[int]
stream_push_user_ids: Set[int]
stream_email_user_ids: Set[int]
um_eligible_user_ids: Set[int]
long_term_idle_user_ids: Set[int]
default_bot_user_ids: Set[int]
service_bot_tuples: List[Tuple[int, int]]
wildcard_mention_user_ids: Set[int]
links_for_embed: Set[str]
widget_content: Optional[Dict[str, Any]]
submessages: List[Dict[str, Any]] = field(default_factory=list)
deliver_at: Optional[datetime.datetime] = None
delivery_type: Optional[str] = None
# We won't try to fetch more unread message IDs from the database than
# this limit. The limit is super high, in large part because it means
# client-side code mostly doesn't need to think about the case that a
# user has more older unread messages that were cut off.
MAX_UNREAD_MESSAGES = 50000
def truncate_content(content: str, max_length: int, truncation_message: str) -> str:
if len(content) > max_length:
content = content[: max_length - len(truncation_message)] + truncation_message
return content
def normalize_body(body: str) -> str:
body = body.rstrip()
if len(body) == 0:
raise JsonableError(_("Message must not be empty"))
if "\x00" in body:
raise JsonableError(_("Message must not contain null bytes"))
return truncate_content(body, MAX_MESSAGE_LENGTH, "\n[message truncated]")
def truncate_topic(topic: str) -> str:
return truncate_content(topic, MAX_TOPIC_NAME_LENGTH, "...")
def messages_for_ids(
message_ids: List[int],
user_message_flags: Dict[int, List[str]],
search_fields: Dict[int, Dict[str, str]],
apply_markdown: bool,
client_gravatar: bool,
allow_edit_history: bool,
) -> List[Dict[str, Any]]:
cache_transformer = MessageDict.build_dict_from_raw_db_row
id_fetcher = lambda row: row["id"]
message_dicts = generic_bulk_cached_fetch(
to_dict_cache_key_id,
MessageDict.get_raw_db_rows,
message_ids,
id_fetcher=id_fetcher,
cache_transformer=cache_transformer,
extractor=extract_message_dict,
setter=stringify_message_dict,
)
message_list: List[Dict[str, Any]] = []
for message_id in message_ids:
msg_dict = message_dicts[message_id]
msg_dict.update(flags=user_message_flags[message_id])
if message_id in search_fields:
msg_dict.update(search_fields[message_id])
# Make sure that we never send message edit history to clients
# in realms with allow_edit_history disabled.
if "edit_history" in msg_dict and not allow_edit_history:
del msg_dict["edit_history"]
message_list.append(msg_dict)
MessageDict.post_process_dicts(message_list, apply_markdown, client_gravatar)
return message_list
def sew_messages_and_reactions(
messages: List[Dict[str, Any]], reactions: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""Given a iterable of messages and reactions stitch reactions
into messages.
"""
# Add all messages with empty reaction item
for message in messages:
message["reactions"] = []
# Convert list of messages into dictionary to make reaction stitching easy
converted_messages = {message["id"]: message for message in messages}
for reaction in reactions:
converted_messages[reaction["message_id"]]["reactions"].append(reaction)
return list(converted_messages.values())
def sew_messages_and_submessages(
messages: List[Dict[str, Any]], submessages: List[Dict[str, Any]]
) -> None:
# This is super similar to sew_messages_and_reactions.
for message in messages:
message["submessages"] = []
message_dict = {message["id"]: message for message in messages}
for submessage in submessages:
message_id = submessage["message_id"]
if message_id in message_dict:
message = message_dict[message_id]
message["submessages"].append(submessage)
def extract_message_dict(message_bytes: bytes) -> Dict[str, Any]:
return orjson.loads(zlib.decompress(message_bytes))
def stringify_message_dict(message_dict: Dict[str, Any]) -> bytes:
return zlib.compress(orjson.dumps(message_dict))
@cache_with_key(to_dict_cache_key, timeout=3600 * 24)
def message_to_dict_json(message: Message, realm_id: Optional[int] = None) -> bytes:
return MessageDict.to_dict_uncached([message], realm_id)[message.id]
def save_message_rendered_content(message: Message, content: str) -> str:
rendered_content = render_markdown(message, content, realm=message.get_realm())
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
message.save_rendered_content()
return rendered_content
class MessageDict:
"""MessageDict is the core class responsible for marshalling Message
objects obtained from the database into a format that can be sent
to clients via the Zulip API, whether via `GET /messages`,
outgoing webhooks, or other code paths. There are two core flows through
which this class is used:
* For just-sent messages, we construct a single `wide_dict` object
containing all the data for the message and the related
UserProfile models (sender_info and recipient_info); this object
can be stored in queues, caches, etc., and then later turned
into an API-format JSONable dictionary via finalize_payload.
* When fetching messages from the database, we fetch their data in
bulk using messages_for_ids, which makes use of caching, bulk
fetches that skip the Django ORM, etc., to provide an optimized
interface for fetching hundreds of thousands of messages from
the database and then turning them into API-format JSON
dictionaries.
"""
@staticmethod
def wide_dict(message: Message, realm_id: Optional[int] = None) -> Dict[str, Any]:
"""
The next two lines get the cacheable field related
to our message object, with the side effect of
populating the cache.
"""
json = message_to_dict_json(message, realm_id)
obj = extract_message_dict(json)
"""
The steps below are similar to what we do in
post_process_dicts(), except we don't call finalize_payload(),
since that step happens later in the queue
processor.
"""
MessageDict.bulk_hydrate_sender_info([obj])
MessageDict.bulk_hydrate_recipient_info([obj])
return obj
@staticmethod
def post_process_dicts(
objs: List[Dict[str, Any]], apply_markdown: bool, client_gravatar: bool
) -> None:
"""
NOTE: This function mutates the objects in
the `objs` list, rather than making
shallow copies. It might be safer to
make shallow copies here, but performance
is somewhat important here, as we are
often fetching hundreds of messages.
"""
MessageDict.bulk_hydrate_sender_info(objs)
MessageDict.bulk_hydrate_recipient_info(objs)
for obj in objs:
MessageDict.finalize_payload(obj, apply_markdown, client_gravatar, skip_copy=True)
@staticmethod
def finalize_payload(
obj: Dict[str, Any],
apply_markdown: bool,
client_gravatar: bool,
keep_rendered_content: bool = False,
skip_copy: bool = False,
) -> Dict[str, Any]:
"""
By default, we make a shallow copy of the incoming dict to avoid
mutation-related bugs. Code paths that are passing a unique object
can pass skip_copy=True to avoid this extra work.
"""
if not skip_copy:
obj = copy.copy(obj)
MessageDict.set_sender_avatar(obj, client_gravatar)
if apply_markdown:
obj["content_type"] = "text/html"
obj["content"] = obj["rendered_content"]
else:
obj["content_type"] = "text/x-markdown"
if not keep_rendered_content:
del obj["rendered_content"]
del obj["sender_realm_id"]
del obj["sender_avatar_source"]
del obj["sender_delivery_email"]
del obj["sender_avatar_version"]
del obj["recipient_type"]
del obj["recipient_type_id"]
del obj["sender_is_mirror_dummy"]
return obj
@staticmethod
def sew_submessages_and_reactions_to_msgs(
messages: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
msg_ids = [msg["id"] for msg in messages]
submessages = SubMessage.get_raw_db_rows(msg_ids)
sew_messages_and_submessages(messages, submessages)
reactions = Reaction.get_raw_db_rows(msg_ids)
return sew_messages_and_reactions(messages, reactions)
@staticmethod
def to_dict_uncached(
messages: List[Message], realm_id: Optional[int] = None
) -> Dict[int, bytes]:
messages_dict = MessageDict.to_dict_uncached_helper(messages, realm_id)
encoded_messages = {msg["id"]: stringify_message_dict(msg) for msg in messages_dict}
return encoded_messages
@staticmethod
def to_dict_uncached_helper(
messages: List[Message], realm_id: Optional[int] = None
) -> List[Dict[str, Any]]:
# Near duplicate of the build_message_dict + get_raw_db_rows
# code path that accepts already fetched Message objects
# rather than message IDs.
def get_rendering_realm_id(message: Message) -> int:
# realm_id can differ among users, currently only possible
# with cross realm bots.
if realm_id is not None:
return realm_id
if message.recipient.type == Recipient.STREAM:
return Stream.objects.get(id=message.recipient.type_id).realm_id
return message.sender.realm_id
message_rows = [
{
"id": message.id,
DB_TOPIC_NAME: message.topic_name(),
"date_sent": message.date_sent,
"last_edit_time": message.last_edit_time,
"edit_history": message.edit_history,
"content": message.content,
"rendered_content": message.rendered_content,
"rendered_content_version": message.rendered_content_version,
"recipient_id": message.recipient.id,
"recipient__type": message.recipient.type,
"recipient__type_id": message.recipient.type_id,
"rendering_realm_id": get_rendering_realm_id(message),
"sender_id": message.sender.id,
"sending_client__name": message.sending_client.name,
"sender__realm_id": message.sender.realm_id,
}
for message in messages
]
MessageDict.sew_submessages_and_reactions_to_msgs(message_rows)
return [MessageDict.build_dict_from_raw_db_row(row) for row in message_rows]
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
# This is a special purpose function optimized for
# callers like get_messages_backend().
fields = [
"id",
DB_TOPIC_NAME,
"date_sent",
"last_edit_time",
"edit_history",
"content",
"rendered_content",
"rendered_content_version",
"recipient_id",
"recipient__type",
"recipient__type_id",
"sender_id",
"sending_client__name",
"sender__realm_id",
]
messages = Message.objects.filter(id__in=needed_ids).values(*fields)
return MessageDict.sew_submessages_and_reactions_to_msgs(messages)
@staticmethod
def build_dict_from_raw_db_row(row: Dict[str, Any]) -> Dict[str, Any]:
"""
row is a row from a .values() call, and it needs to have
all the relevant fields populated
"""
return MessageDict.build_message_dict(
message_id=row["id"],
last_edit_time=row["last_edit_time"],
edit_history=row["edit_history"],
content=row["content"],
topic_name=row[DB_TOPIC_NAME],
date_sent=row["date_sent"],
rendered_content=row["rendered_content"],
rendered_content_version=row["rendered_content_version"],
sender_id=row["sender_id"],
sender_realm_id=row["sender__realm_id"],
sending_client_name=row["sending_client__name"],
rendering_realm_id=row.get("rendering_realm_id", row["sender__realm_id"]),
recipient_id=row["recipient_id"],
recipient_type=row["recipient__type"],
recipient_type_id=row["recipient__type_id"],
reactions=row["reactions"],
submessages=row["submessages"],
)
@staticmethod
def build_message_dict(
message_id: int,
last_edit_time: Optional[datetime.datetime],
edit_history: Optional[str],
content: str,
topic_name: str,
date_sent: datetime.datetime,
rendered_content: Optional[str],
rendered_content_version: Optional[int],
sender_id: int,
sender_realm_id: int,
sending_client_name: str,
rendering_realm_id: int,
recipient_id: int,
recipient_type: int,
recipient_type_id: int,
reactions: List[RawReactionRow],
submessages: List[Dict[str, Any]],
) -> Dict[str, Any]:
obj = dict(
id=message_id,
sender_id=sender_id,
content=content,
recipient_type_id=recipient_type_id,
recipient_type=recipient_type,
recipient_id=recipient_id,
timestamp=datetime_to_timestamp(date_sent),
client=sending_client_name,
)
obj[TOPIC_NAME] = topic_name
obj["sender_realm_id"] = sender_realm_id
# Render topic_links with the stream's realm instead of the
# sender's realm; this is important for messages sent by
# cross-realm bots like NOTIFICATION_BOT.
obj[TOPIC_LINKS] = topic_links(rendering_realm_id, topic_name)
if last_edit_time is not None:
obj["last_edit_timestamp"] = datetime_to_timestamp(last_edit_time)
assert edit_history is not None
obj["edit_history"] = orjson.loads(edit_history)
if Message.need_to_render_content(
rendered_content, rendered_content_version, markdown_version
):
# We really shouldn't be rendering objects in this method, but there is
# a scenario where we upgrade the version of Markdown and fail to run
# management commands to re-render historical messages, and then we
# need to have side effects. This method is optimized to not need full
# blown ORM objects, but the Markdown renderer is unfortunately highly
# coupled to Message, and we also need to persist the new rendered content.
# If we don't have a message object passed in, we get one here. The cost
# of going to the DB here should be overshadowed by the cost of rendering
# and updating the row.
# TODO: see #1379 to eliminate Markdown dependencies
message = Message.objects.select_related().get(id=message_id)
assert message is not None # Hint for mypy.
# It's unfortunate that we need to have side effects on the message
# in some cases.
rendered_content = save_message_rendered_content(message, content)
if rendered_content is not None:
obj["rendered_content"] = rendered_content
else:
obj["rendered_content"] = (
"<p>[Zulip note: Sorry, we could not "
+ "understand the formatting of your message]</p>"
)
if rendered_content is not None:
obj["is_me_message"] = Message.is_status_message(content, rendered_content)
else:
obj["is_me_message"] = False
obj["reactions"] = [
ReactionDict.build_dict_from_raw_db_row(reaction) for reaction in reactions
]
obj["submessages"] = submessages
return obj
@staticmethod
def bulk_hydrate_sender_info(objs: List[Dict[str, Any]]) -> None:
sender_ids = list({obj["sender_id"] for obj in objs})
if not sender_ids:
return
query = UserProfile.objects.values(
"id",
"full_name",
"delivery_email",
"email",
"realm__string_id",
"avatar_source",
"avatar_version",
"is_mirror_dummy",
)
rows = query_for_ids(query, sender_ids, "zerver_userprofile.id")
sender_dict = {row["id"]: row for row in rows}
for obj in objs:
sender_id = obj["sender_id"]
user_row = sender_dict[sender_id]
obj["sender_full_name"] = user_row["full_name"]
obj["sender_email"] = user_row["email"]
obj["sender_delivery_email"] = user_row["delivery_email"]
obj["sender_realm_str"] = user_row["realm__string_id"]
obj["sender_avatar_source"] = user_row["avatar_source"]
obj["sender_avatar_version"] = user_row["avatar_version"]
obj["sender_is_mirror_dummy"] = user_row["is_mirror_dummy"]
@staticmethod
def hydrate_recipient_info(obj: Dict[str, Any], display_recipient: DisplayRecipientT) -> None:
"""
This method hyrdrates recipient info with things
like full names and emails of senders. Eventually
our clients should be able to hyrdrate these fields
themselves with info they already have on users.
"""
recipient_type = obj["recipient_type"]
recipient_type_id = obj["recipient_type_id"]
sender_is_mirror_dummy = obj["sender_is_mirror_dummy"]
sender_email = obj["sender_email"]
sender_full_name = obj["sender_full_name"]
sender_id = obj["sender_id"]
if recipient_type == Recipient.STREAM:
display_type = "stream"
elif recipient_type in (Recipient.HUDDLE, Recipient.PERSONAL):
assert not isinstance(display_recipient, str)
display_type = "private"
if len(display_recipient) == 1:
# add the sender in if this isn't a message between
# someone and themself, preserving ordering
recip: UserDisplayRecipient = {
"email": sender_email,
"full_name": sender_full_name,
"id": sender_id,
"is_mirror_dummy": sender_is_mirror_dummy,
}
if recip["email"] < display_recipient[0]["email"]:
display_recipient = [recip, display_recipient[0]]
elif recip["email"] > display_recipient[0]["email"]:
display_recipient = [display_recipient[0], recip]
else:
raise AssertionError(f"Invalid recipient type {recipient_type}")
obj["display_recipient"] = display_recipient
obj["type"] = display_type
if obj["type"] == "stream":
obj["stream_id"] = recipient_type_id
@staticmethod
def bulk_hydrate_recipient_info(objs: List[Dict[str, Any]]) -> None:
recipient_tuples = { # We use set to eliminate duplicate tuples.
(
obj["recipient_id"],
obj["recipient_type"],
obj["recipient_type_id"],
)
for obj in objs
}
display_recipients = bulk_fetch_display_recipients(recipient_tuples)
for obj in objs:
MessageDict.hydrate_recipient_info(obj, display_recipients[obj["recipient_id"]])
@staticmethod
def set_sender_avatar(obj: Dict[str, Any], client_gravatar: bool) -> None:
sender_id = obj["sender_id"]
sender_realm_id = obj["sender_realm_id"]
sender_delivery_email = obj["sender_delivery_email"]
sender_avatar_source = obj["sender_avatar_source"]
sender_avatar_version = obj["sender_avatar_version"]
obj["avatar_url"] = get_avatar_field(
user_id=sender_id,
realm_id=sender_realm_id,
email=sender_delivery_email,
avatar_source=sender_avatar_source,
avatar_version=sender_avatar_version,
medium=False,
client_gravatar=client_gravatar,
)
class ReactionDict:
@staticmethod
def build_dict_from_raw_db_row(row: RawReactionRow) -> Dict[str, Any]:
return {
"emoji_name": row["emoji_name"],
"emoji_code": row["emoji_code"],
"reaction_type": row["reaction_type"],
# TODO: We plan to remove this redundant user dictionary once
# clients are updated to support accessing use user_id. See
# https://github.com/zulip/zulip/pull/14711 for details.
#
# When we do that, we can likely update the `.values()` query to
# not fetch the extra user_profile__* fields from the database
# as a small performance optimization.
"user": {
"email": row["user_profile__email"],
"id": row["user_profile__id"],
"full_name": row["user_profile__full_name"],
},
"user_id": row["user_profile__id"],
}
def access_message(
user_profile: UserProfile, message_id: int
) -> Tuple[Message, Optional[UserMessage]]:
"""You can access a message by ID in our APIs that either:
(1) You received or have previously accessed via starring
(aka have a UserMessage row for).
(2) Was sent to a public stream in your realm.
We produce consistent, boring error messages to avoid leaking any
information from a security perspective.
"""
try:
message = Message.objects.select_related().get(id=message_id)
except Message.DoesNotExist:
raise JsonableError(_("Invalid message(s)"))
user_message = get_usermessage_by_message_id(user_profile, message_id)
if has_message_access(user_profile, message, user_message):
return (message, user_message)
raise JsonableError(_("Invalid message(s)"))
def has_message_access(
user_profile: UserProfile, message: Message, user_message: Optional[UserMessage]
) -> bool:
if user_message is None:
if message.recipient.type != Recipient.STREAM:
# You can't access private messages you didn't receive
return False
stream = Stream.objects.get(id=message.recipient.type_id)
if stream.realm != user_profile.realm:
# You can't access public stream messages in other realms
return False
if not stream.is_history_public_to_subscribers():
# You can't access messages you didn't directly receive
# unless history is public to subscribers.
return False
if not stream.is_public():
# This stream is an invite-only stream where message
# history is available to subscribers. So we check if
# you're subscribed.
if not Subscription.objects.filter(
user_profile=user_profile, active=True, recipient=message.recipient
).exists():
return False
# You are subscribed, so let this fall through to the public stream case.
elif user_profile.is_guest:
# Guest users don't get automatic access to public stream messages
if not Subscription.objects.filter(
user_profile=user_profile, active=True, recipient=message.recipient
).exists():
return False
else:
# Otherwise, the message was sent to a public stream in
# your realm, so return the message, user_message pair
pass
return True
def bulk_access_messages(user_profile: UserProfile, messages: Sequence[Message]) -> List[Message]:
filtered_messages = []
for message in messages:
user_message = get_usermessage_by_message_id(user_profile, message.id)
if has_message_access(user_profile, message, user_message):
filtered_messages.append(message)
return filtered_messages
def bulk_access_messages_expect_usermessage(
user_profile_id: int, message_ids: Sequence[int]
) -> List[int]:
"""
Like bulk_access_messages, but faster and potentially stricter.
Returns a subset of `message_ids` containing only messages the
user can access. Makes O(1) database queries.
Use this function only when the user is expected to have a
UserMessage row for every message in `message_ids`. If a
UserMessage row is missing, the message will be omitted even if
the user has access (e.g. because it went to a public stream.)
See also: `access_message`, `bulk_access_messages`.
"""
return UserMessage.objects.filter(
user_profile_id=user_profile_id,
message_id__in=message_ids,
).values_list("message_id", flat=True)
def render_markdown(
message: Message,
content: str,
realm: Optional[Realm] = None,
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
mention_data: Optional[MentionData] = None,
email_gateway: bool = False,
) -> str:
"""
This is basically just a wrapper for do_render_markdown.
"""
if realm is None:
realm = message.get_realm()
sender = message.sender
sent_by_bot = sender.is_bot
translate_emoticons = sender.translate_emoticons
rendered_content = do_render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words_automaton=realm_alert_words_automaton,
sent_by_bot=sent_by_bot,
translate_emoticons=translate_emoticons,
mention_data=mention_data,
email_gateway=email_gateway,
)
return rendered_content
def do_render_markdown(
message: Message,
content: str,
realm: Realm,
sent_by_bot: bool,
translate_emoticons: bool,
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
mention_data: Optional[MentionData] = None,
email_gateway: bool = False,
) -> str:
"""Return HTML for given Markdown. Markdown may add properties to the
message object such as `mentions_user_ids`, `mentions_user_group_ids`, and
`mentions_wildcard`. These are only on this Django object and are not
saved in the database.
"""
message.mentions_wildcard = False
message.mentions_user_ids = set()
message.mentions_user_group_ids = set()
message.alert_words = set()
message.links_for_preview = set()
message.user_ids_with_alert_words = set()
# DO MAIN WORK HERE -- call markdown_convert to convert
rendered_content = markdown_convert(
content,
realm_alert_words_automaton=realm_alert_words_automaton,
message=message,
message_realm=realm,
sent_by_bot=sent_by_bot,
translate_emoticons=translate_emoticons,
mention_data=mention_data,
email_gateway=email_gateway,
)
return rendered_content
def huddle_users(recipient_id: int) -> str:
display_recipient: DisplayRecipientT = get_display_recipient_by_id(
recipient_id,
Recipient.HUDDLE,
None,
)
# str is for streams.
assert not isinstance(display_recipient, str)
user_ids: List[int] = [obj["id"] for obj in display_recipient]
user_ids = sorted(user_ids)
return ",".join(str(uid) for uid in user_ids)
def aggregate_message_dict(
input_dict: Dict[int, Dict[str, Any]], lookup_fields: List[str], collect_senders: bool
) -> List[Dict[str, Any]]:
lookup_dict: Dict[Tuple[Any, ...], Dict[str, Any]] = {}
"""
A concrete example might help explain the inputs here:
input_dict = {
1002: dict(stream_id=5, topic='foo', sender_id=40),
1003: dict(stream_id=5, topic='foo', sender_id=41),
1004: dict(stream_id=6, topic='baz', sender_id=99),
}
lookup_fields = ['stream_id', 'topic']
The first time through the loop:
attribute_dict = dict(stream_id=5, topic='foo', sender_id=40)
lookup_dict = (5, 'foo')
lookup_dict = {
(5, 'foo'): dict(stream_id=5, topic='foo',
unread_message_ids=[1002, 1003],
sender_ids=[40, 41],
),
...
}
result = [
dict(stream_id=5, topic='foo',
unread_message_ids=[1002, 1003],
sender_ids=[40, 41],
),
...
]
"""
for message_id, attribute_dict in input_dict.items():
lookup_key = tuple(attribute_dict[f] for f in lookup_fields)
if lookup_key not in lookup_dict:
obj = {}
for f in lookup_fields:
obj[f] = attribute_dict[f]
obj["unread_message_ids"] = []
if collect_senders:
obj["sender_ids"] = set()
lookup_dict[lookup_key] = obj
bucket = lookup_dict[lookup_key]
bucket["unread_message_ids"].append(message_id)
if collect_senders:
bucket["sender_ids"].add(attribute_dict["sender_id"])
for dct in lookup_dict.values():
dct["unread_message_ids"].sort()
if collect_senders:
dct["sender_ids"] = sorted(dct["sender_ids"])
sorted_keys = sorted(lookup_dict.keys())
return [lookup_dict[k] for k in sorted_keys]
def get_inactive_recipient_ids(user_profile: UserProfile) -> List[int]:
rows = (
get_stream_subscriptions_for_user(user_profile)
.filter(
active=False,
)
.values(
"recipient_id",
)
)
inactive_recipient_ids = [row["recipient_id"] for row in rows]
return inactive_recipient_ids
def get_muted_stream_ids(user_profile: UserProfile) -> List[int]:
rows = (
get_stream_subscriptions_for_user(user_profile)
.filter(
active=True,
is_muted=True,
)
.values(
"recipient__type_id",
)
)
muted_stream_ids = [row["recipient__type_id"] for row in rows]
return muted_stream_ids
def get_starred_message_ids(user_profile: UserProfile) -> List[int]:
return list(
UserMessage.objects.filter(
user_profile=user_profile,
)
.extra(
where=[UserMessage.where_starred()],
)
.order_by(
"message_id",
)
.values_list("message_id", flat=True)[0:10000]
)
def get_raw_unread_data(user_profile: UserProfile) -> RawUnreadMessagesResult:
excluded_recipient_ids = get_inactive_recipient_ids(user_profile)
user_msgs = (
UserMessage.objects.filter(
user_profile=user_profile,
)
.exclude(
message__recipient_id__in=excluded_recipient_ids,
)
.extra(
where=[UserMessage.where_unread()],
)
.values(
"message_id",
"message__sender_id",
MESSAGE__TOPIC,
"message__recipient_id",
"message__recipient__type",
"message__recipient__type_id",
"flags",
)
.order_by("-message_id")
)
# Limit unread messages for performance reasons.
user_msgs = list(user_msgs[:MAX_UNREAD_MESSAGES])
rows = list(reversed(user_msgs))
return extract_unread_data_from_um_rows(rows, user_profile)
def extract_unread_data_from_um_rows(
rows: List[Dict[str, Any]], user_profile: Optional[UserProfile]
) -> RawUnreadMessagesResult:
pm_dict: Dict[int, Any] = {}
stream_dict: Dict[int, Any] = {}
unmuted_stream_msgs: Set[int] = set()
huddle_dict: Dict[int, Any] = {}
mentions: Set[int] = set()
total_unreads = 0
raw_unread_messages: RawUnreadMessagesResult = dict(
pm_dict=pm_dict,
stream_dict=stream_dict,
muted_stream_ids=[],
unmuted_stream_msgs=unmuted_stream_msgs,
huddle_dict=huddle_dict,
mentions=mentions,
old_unreads_missing=False,
)
if user_profile is None:
return raw_unread_messages
muted_stream_ids = get_muted_stream_ids(user_profile)
raw_unread_messages["muted_stream_ids"] = muted_stream_ids
topic_mute_checker = build_topic_mute_checker(user_profile)
def is_row_muted(stream_id: int, recipient_id: int, topic: str) -> bool:
if stream_id in muted_stream_ids:
return True
if topic_mute_checker(recipient_id, topic):
return True
# Messages sent by muted users are never unread, so we don't
# need any logic related to muted users here.
return False
huddle_cache: Dict[int, str] = {}
def get_huddle_users(recipient_id: int) -> str:
if recipient_id in huddle_cache:
return huddle_cache[recipient_id]
user_ids_string = huddle_users(recipient_id)
huddle_cache[recipient_id] = user_ids_string
return user_ids_string
for row in rows:
total_unreads += 1
message_id = row["message_id"]
msg_type = row["message__recipient__type"]
recipient_id = row["message__recipient_id"]
sender_id = row["message__sender_id"]
if msg_type == Recipient.STREAM:
stream_id = row["message__recipient__type_id"]
topic = row[MESSAGE__TOPIC]
stream_dict[message_id] = dict(
stream_id=stream_id,
topic=topic,
sender_id=sender_id,
)
if not is_row_muted(stream_id, recipient_id, topic):
unmuted_stream_msgs.add(message_id)
elif msg_type == Recipient.PERSONAL:
if sender_id == user_profile.id:
other_user_id = row["message__recipient__type_id"]
else:
other_user_id = sender_id
# The `sender_id` field here is misnamed. It's really
# just the other participant in a PM conversation. For
# most unread PM messages, the other user is also the sender,
# but that's not true for certain messages sent from the
# API. Unfortunately, it's difficult now to rename the
# field without breaking mobile.
pm_dict[message_id] = dict(
sender_id=other_user_id,
)
elif msg_type == Recipient.HUDDLE:
user_ids_string = get_huddle_users(recipient_id)
huddle_dict[message_id] = dict(
user_ids_string=user_ids_string,
)
# TODO: Add support for alert words here as well.
is_mentioned = (row["flags"] & UserMessage.flags.mentioned) != 0
is_wildcard_mentioned = (row["flags"] & UserMessage.flags.wildcard_mentioned) != 0
if is_mentioned:
mentions.add(message_id)
if is_wildcard_mentioned:
if msg_type == Recipient.STREAM:
stream_id = row["message__recipient__type_id"]
topic = row[MESSAGE__TOPIC]
if not is_row_muted(stream_id, recipient_id, topic):
mentions.add(message_id)
else: # nocoverage # TODO: Test wildcard mentions in PMs.
mentions.add(message_id)
# Record whether the user had more than MAX_UNREAD_MESSAGES total
# unreads -- that's a state where Zulip's behavior will start to
# be erroneous, and clients should display a warning.
raw_unread_messages["old_unreads_missing"] = total_unreads == MAX_UNREAD_MESSAGES
return raw_unread_messages
def aggregate_unread_data(raw_data: RawUnreadMessagesResult) -> UnreadMessagesResult:
pm_dict = raw_data["pm_dict"]
stream_dict = raw_data["stream_dict"]
unmuted_stream_msgs = raw_data["unmuted_stream_msgs"]
huddle_dict = raw_data["huddle_dict"]
mentions = list(raw_data["mentions"])
count = len(pm_dict) + len(unmuted_stream_msgs) + len(huddle_dict)
pm_objects = aggregate_message_dict(
input_dict=pm_dict,
lookup_fields=[
"sender_id",
],
collect_senders=False,
)
stream_objects = aggregate_message_dict(
input_dict=stream_dict,
lookup_fields=[
"stream_id",
"topic",
],
collect_senders=True,
)
huddle_objects = aggregate_message_dict(
input_dict=huddle_dict,
lookup_fields=[
"user_ids_string",
],
collect_senders=False,
)
result: UnreadMessagesResult = dict(
pms=pm_objects,
streams=stream_objects,
huddles=huddle_objects,
mentions=mentions,
count=count,
old_unreads_missing=raw_data["old_unreads_missing"],
)
return result
def apply_unread_message_event(
user_profile: UserProfile,
state: RawUnreadMessagesResult,
message: Dict[str, Any],
flags: List[str],
) -> None:
message_id = message["id"]
if message["type"] == "stream":
message_type = "stream"
elif message["type"] == "private":
others = [recip for recip in message["display_recipient"] if recip["id"] != user_profile.id]
if len(others) <= 1:
message_type = "private"
else:
message_type = "huddle"
else:
raise AssertionError("Invalid message type {}".format(message["type"]))
sender_id = message["sender_id"]
if message_type == "stream":
stream_id = message["stream_id"]
topic = message[TOPIC_NAME]
new_row = dict(
stream_id=stream_id,
topic=topic,
sender_id=sender_id,
)
state["stream_dict"][message_id] = new_row
if stream_id not in state["muted_stream_ids"]:
# This next check hits the database.
if not topic_is_muted(user_profile, stream_id, topic):
state["unmuted_stream_msgs"].add(message_id)
elif message_type == "private":
if len(others) == 1:
other_id = others[0]["id"]
else:
other_id = user_profile.id
# The `sender_id` field here is misnamed.
new_row = dict(
sender_id=other_id,
)
state["pm_dict"][message_id] = new_row
else:
display_recipient = message["display_recipient"]
user_ids = [obj["id"] for obj in display_recipient]
user_ids = sorted(user_ids)
user_ids_string = ",".join(str(uid) for uid in user_ids)
new_row = dict(
user_ids_string=user_ids_string,
)
state["huddle_dict"][message_id] = new_row
if "mentioned" in flags:
state["mentions"].add(message_id)
if "wildcard_mentioned" in flags:
if message_id in state["unmuted_stream_msgs"]:
state["mentions"].add(message_id)
def remove_message_id_from_unread_mgs(state: RawUnreadMessagesResult, message_id: int) -> None:
# The opposite of apply_unread_message_event; removes a read or
# deleted message from a raw_unread_msgs data structure.
state["pm_dict"].pop(message_id, None)
state["stream_dict"].pop(message_id, None)
state["huddle_dict"].pop(message_id, None)
state["unmuted_stream_msgs"].discard(message_id)
state["mentions"].discard(message_id)
def estimate_recent_messages(realm: Realm, hours: int) -> int:
stat = COUNT_STATS["messages_sent:is_bot:hour"]
d = timezone_now() - datetime.timedelta(hours=hours)
return (
RealmCount.objects.filter(property=stat.property, end_time__gt=d, realm=realm).aggregate(
Sum("value")
)["value__sum"]
or 0
)
def get_first_visible_message_id(realm: Realm) -> int:
return realm.first_visible_message_id
def maybe_update_first_visible_message_id(realm: Realm, lookback_hours: int) -> None:
recent_messages_count = estimate_recent_messages(realm, lookback_hours)
if realm.message_visibility_limit is not None and recent_messages_count > 0:
update_first_visible_message_id(realm)
def update_first_visible_message_id(realm: Realm) -> None:
if realm.message_visibility_limit is None:
realm.first_visible_message_id = 0
else:
try:
first_visible_message_id = (
Message.objects.filter(sender__realm=realm)
.values("id")
.order_by("-id")[realm.message_visibility_limit - 1]["id"]
)
except IndexError:
first_visible_message_id = 0
realm.first_visible_message_id = first_visible_message_id
realm.save(update_fields=["first_visible_message_id"])
def get_last_message_id() -> int:
# We generally use this function to populate RealmAuditLog, and
# the max id here is actually systemwide, not per-realm. I
# assume there's some advantage in not filtering by realm.
last_id = Message.objects.aggregate(Max("id"))["id__max"]
if last_id is None:
# During initial realm creation, there might be 0 messages in
# the database; in that case, the `aggregate` query returns
# None. Since we want an int for "beginning of time", use -1.
last_id = -1
return last_id
def get_recent_conversations_recipient_id(
user_profile: UserProfile, recipient_id: int, sender_id: int
) -> int:
"""Helper for doing lookups of the recipient_id that
get_recent_private_conversations would have used to record that
message in its data structure.
"""
my_recipient_id = user_profile.recipient_id
if recipient_id == my_recipient_id:
return UserProfile.objects.values_list("recipient_id", flat=True).get(id=sender_id)
return recipient_id
def get_recent_private_conversations(user_profile: UserProfile) -> Dict[int, Dict[str, Any]]:
"""This function uses some carefully optimized SQL queries, designed
to use the UserMessage index on private_messages. It is
significantly complicated by the fact that for 1:1 private
messages, we store the message against a recipient_id of whichever
user was the recipient, and thus for 1:1 private messages sent
directly to us, we need to look up the other user from the
sender_id on those messages. You'll see that pattern repeated
both here and also in zerver/lib/events.py.
Ideally, we would write these queries using Django, but even
without the UNION ALL, that seems to not be possible, because the
equivalent Django syntax (for the first part of this query):
message_data = UserMessage.objects.select_related("message__recipient_id").filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_private()]
).order_by("-message_id")[:1000].values(
"message__recipient_id").annotate(last_message_id=Max("message_id"))
does not properly nest the GROUP BY (from .annotate) with the slicing.
We return a dictionary structure for convenient modification
below; this structure is converted into its final form by
post_process.
"""
RECENT_CONVERSATIONS_LIMIT = 1000
recipient_map = {}
my_recipient_id = user_profile.recipient_id
query = SQL(
"""
SELECT
subquery.recipient_id, MAX(subquery.message_id)
FROM (
(SELECT
um.message_id AS message_id,
m.recipient_id AS recipient_id
FROM
zerver_usermessage um
JOIN
zerver_message m
ON
um.message_id = m.id
WHERE
um.user_profile_id=%(user_profile_id)s AND
um.flags & 2048 <> 0 AND
m.recipient_id <> %(my_recipient_id)s
ORDER BY message_id DESC
LIMIT %(conversation_limit)s)
UNION ALL
(SELECT
m.id AS message_id,
sender_profile.recipient_id AS recipient_id
FROM
zerver_message m
JOIN
zerver_userprofile sender_profile
ON
m.sender_id = sender_profile.id
WHERE
m.recipient_id=%(my_recipient_id)s
ORDER BY message_id DESC
LIMIT %(conversation_limit)s)
) AS subquery
GROUP BY subquery.recipient_id
"""
)
with connection.cursor() as cursor:
cursor.execute(
query,
{
"user_profile_id": user_profile.id,
"conversation_limit": RECENT_CONVERSATIONS_LIMIT,
"my_recipient_id": my_recipient_id,
},
)
rows = cursor.fetchall()
# The resulting rows will be (recipient_id, max_message_id)
# objects for all parties we've had recent (group?) private
# message conversations with, including PMs with yourself (those
# will generate an empty list of user_ids).
for recipient_id, max_message_id in rows:
recipient_map[recipient_id] = dict(
max_message_id=max_message_id,
user_ids=[],
)
# Now we need to map all the recipient_id objects to lists of user IDs
for (recipient_id, user_profile_id) in (
Subscription.objects.filter(recipient_id__in=recipient_map.keys())
.exclude(user_profile_id=user_profile.id)
.values_list("recipient_id", "user_profile_id")
):
recipient_map[recipient_id]["user_ids"].append(user_profile_id)
# Sort to prevent test flakes and client bugs.
for rec in recipient_map.values():
rec["user_ids"].sort()
return recipient_map
def wildcard_mention_allowed(sender: UserProfile, stream: Stream) -> bool:
realm = sender.realm
# If there are fewer than Realm.WILDCARD_MENTION_THRESHOLD, we
# allow sending. In the future, we may want to make this behavior
# a default, and also just allow explicitly setting whether this
# applies to a stream as an override.
if num_subscribers_for_stream_id(stream.id) <= Realm.WILDCARD_MENTION_THRESHOLD:
return True
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_NOBODY:
return False
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_EVERYONE:
return True
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_ADMINS:
return sender.is_realm_admin
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_STREAM_ADMINS:
# TODO: Change this when we implement stream administrators
return sender.is_realm_admin
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_FULL_MEMBERS:
return sender.is_realm_admin or (not sender.is_provisional_member and not sender.is_guest)
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_MEMBERS:
return not sender.is_guest
raise AssertionError("Invalid wildcard mention policy")
| apache-2.0 | 8,241,790,482,959,836,000 | 34.254429 | 100 | 0.61911 | false |
marek-sezemsky/coreemu | daemon/core/misc/ipaddr.py | 11 | 6985 | #
# CORE
# Copyright (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# author: Tom Goff <[email protected]>
#
'''
ipaddr.py: helper objects for dealing with IPv4/v6 addresses.
'''
import socket
import struct
import random
AF_INET = socket.AF_INET
AF_INET6 = socket.AF_INET6
class MacAddr(object):
def __init__(self, addr):
self.addr = addr
def __str__(self):
return ":".join(map(lambda x: ("%02x" % ord(x)), self.addr))
def tolinklocal(self):
''' Convert the MAC address to a IPv6 link-local address, using EUI 48
to EUI 64 conversion process per RFC 5342.
'''
if not self.addr:
return IPAddr.fromstring("::")
tmp = struct.unpack("!Q", '\x00\x00' + self.addr)[0]
nic = long(tmp) & 0x000000FFFFFFL
oui = long(tmp) & 0xFFFFFF000000L
# toggle U/L bit
oui ^= 0x020000000000L
# append EUI-48 octets
oui = (oui << 16) | 0xFFFE000000L
return IPAddr(AF_INET6, struct.pack("!QQ", 0xfe80 << 48, oui | nic))
@classmethod
def fromstring(cls, s):
addr = "".join(map(lambda x: chr(int(x, 16)), s.split(":")))
return cls(addr)
@classmethod
def random(cls):
tmp = random.randint(0, 0xFFFFFF)
tmp |= 0x00163E << 24 # use the Xen OID 00:16:3E
tmpbytes = struct.pack("!Q", tmp)
return cls(tmpbytes[2:])
class IPAddr(object):
def __init__(self, af, addr):
# check if (af, addr) is valid
if not socket.inet_ntop(af, addr):
raise ValueError, "invalid af/addr"
self.af = af
self.addr = addr
def isIPv4(self):
return self.af == AF_INET
def isIPv6(self):
return self.af == AF_INET6
def __str__(self):
return socket.inet_ntop(self.af, self.addr)
def __eq__(self, other):
try:
return other.af == self.af and other.addr == self.addr
except:
return False
def __add__(self, other):
try:
carry = int(other)
except:
return NotImplemented
tmp = map(lambda x: ord(x), self.addr)
for i in xrange(len(tmp) - 1, -1, -1):
x = tmp[i] + carry
tmp[i] = x & 0xff
carry = x >> 8
if carry == 0:
break
addr = "".join(map(lambda x: chr(x), tmp))
return self.__class__(self.af, addr)
def __sub__(self, other):
try:
tmp = -int(other)
except:
return NotImplemented
return self.__add__(tmp)
@classmethod
def fromstring(cls, s):
for af in AF_INET, AF_INET6:
try:
return cls(af, socket.inet_pton(af, s))
except Exception, e:
pass
raise e
@staticmethod
def toint(s):
''' convert IPv4 string to 32-bit integer
'''
bin = socket.inet_pton(AF_INET, s)
return(struct.unpack('!I', bin)[0])
class IPPrefix(object):
def __init__(self, af, prefixstr):
"prefixstr format: address/prefixlen"
tmp = prefixstr.split("/")
if len(tmp) > 2:
raise ValueError, "invalid prefix: '%s'" % prefixstr
self.af = af
if self.af == AF_INET:
self.addrlen = 32
elif self.af == AF_INET6:
self.addrlen = 128
else:
raise ValueError, "invalid address family: '%s'" % self.af
if len(tmp) == 2:
self.prefixlen = int(tmp[1])
else:
self.prefixlen = self.addrlen
self.prefix = socket.inet_pton(self.af, tmp[0])
if self.addrlen > self.prefixlen:
addrbits = self.addrlen - self.prefixlen
netmask = ((1L << self.prefixlen) - 1) << addrbits
prefix = ""
for i in xrange(-1, -(addrbits >> 3) - 2, -1):
prefix = chr(ord(self.prefix[i]) & (netmask & 0xff)) + prefix
netmask >>= 8
self.prefix = self.prefix[:i] + prefix
def __str__(self):
return "%s/%s" % (socket.inet_ntop(self.af, self.prefix),
self.prefixlen)
def __eq__(self, other):
try:
return other.af == self.af and \
other.prefixlen == self.prefixlen and \
other.prefix == self.prefix
except:
return False
def __add__(self, other):
try:
tmp = int(other)
except:
return NotImplemented
a = IPAddr(self.af, self.prefix) + \
(tmp << (self.addrlen - self.prefixlen))
prefixstr = "%s/%s" % (a, self.prefixlen)
if self.__class__ == IPPrefix:
return self.__class__(self.af, prefixstr)
else:
return self.__class__(prefixstr)
def __sub__(self, other):
try:
tmp = -int(other)
except:
return NotImplemented
return self.__add__(tmp)
def addr(self, hostid):
tmp = int(hostid)
if (tmp == 1 or tmp == 0 or tmp == -1) and self.addrlen == self.prefixlen:
return IPAddr(self.af, self.prefix)
if tmp == 0 or \
tmp > (1 << (self.addrlen - self.prefixlen)) - 1 or \
(self.af == AF_INET and tmp == (1 << (self.addrlen - self.prefixlen)) - 1):
raise ValueError, "invalid hostid for prefix %s: %s" % (self, hostid)
addr = ""
for i in xrange(-1, -(self.addrlen >> 3) - 1, -1):
addr = chr(ord(self.prefix[i]) | (tmp & 0xff)) + addr
tmp >>= 8
if not tmp:
break
addr = self.prefix[:i] + addr
return IPAddr(self.af, addr)
def minaddr(self):
return self.addr(1)
def maxaddr(self):
if self.af == AF_INET:
return self.addr((1 << (self.addrlen - self.prefixlen)) - 2)
else:
return self.addr((1 << (self.addrlen - self.prefixlen)) - 1)
def numaddr(self):
return max(0, (1 << (self.addrlen - self.prefixlen)) - 2)
def prefixstr(self):
return "%s" % socket.inet_ntop(self.af, self.prefix)
def netmaskstr(self):
addrbits = self.addrlen - self.prefixlen
netmask = ((1L << self.prefixlen) - 1) << addrbits
netmaskbytes = struct.pack("!L", netmask)
return IPAddr(af=AF_INET, addr=netmaskbytes).__str__()
class IPv4Prefix(IPPrefix):
def __init__(self, prefixstr):
IPPrefix.__init__(self, AF_INET, prefixstr)
class IPv6Prefix(IPPrefix):
def __init__(self, prefixstr):
IPPrefix.__init__(self, AF_INET6, prefixstr)
def isIPAddress(af, addrstr):
try:
tmp = socket.inet_pton(af, addrstr)
return True
except:
return False
def isIPv4Address(addrstr):
return isIPAddress(AF_INET, addrstr)
def isIPv6Address(addrstr):
return isIPAddress(AF_INET6, addrstr)
| bsd-2-clause | 5,247,344,145,343,764,000 | 29.369565 | 87 | 0.528132 | false |
ronkyo/mi-instrument | mi/idk/platform/switch_driver.py | 11 | 3321 | """
@file coi-services/mi/idk/platform/switch_driver.py
@author Bill French
@brief Main script class for running the switch_driver process
"""
from os.path import exists, join, isdir
from os import listdir
from mi.idk.metadata import Metadata
from mi.idk.comm_config import CommConfig
from mi.idk.config import Config
from mi.idk.exceptions import DriverDoesNotExist
from mi.core.log import get_logger ; log = get_logger()
import os
import re
from glob import glob
import subprocess
from mi.idk import prompt
import mi.idk.switch_driver
import mi.idk.platform.metadata
class SwitchDriver(mi.idk.switch_driver.SwitchDriver):
"""
Main class for running the switch driver process.
"""
def __init__(self, path=None, version=None):
self.driver_path = path
self.driver_version = version
def get_base_name(self):
return 'platform_%s_%s' % (self.driver_path.replace('/', '_'),
self.driver_version.replace('.', '_'))
def get_metadata(self):
self.metadata = mi.idk.platform.metadata.Metadata(self.driver_path)
return self.metadata
def fetch_metadata(self):
"""
@brief collect metadata from the user
"""
if not (self.driver_path):
self.driver_path = prompt.text( 'Driver Path' )
self.get_metadata()
self.driver_version = prompt.text('Driver Version', self.metadata.version)
def fetch_comm_config(self):
"""
@brief No comm config for dsa
"""
pass
@staticmethod
def list_drivers():
"""
@brief Print a list of all the different drivers and their versions
"""
drivers = SwitchDriver.get_drivers()
for driver in sorted(drivers.keys()):
for version in sorted(drivers[driver]):
print "%s %s" % (driver, version)
@staticmethod
def get_drivers():
"""
@brief Get a list of all the different drivers and their versions
"""
result = {}
driver_dir = join(Config().get("working_repo"), 'mi', 'platform', 'driver')
log.debug("Driver Dir: %s", driver_dir)
files = []
for dirname,_,_ in os.walk(driver_dir):
files.extend(glob(os.path.join(dirname,"metadata.yml")))
log.debug("Files: %s", files)
for f in files:
matcher = re.compile( "%s/(.*)/metadata.yml" % driver_dir )
match = matcher.match(f)
path = match.group(1)
result[path] = SwitchDriver.get_versions(path)
return result
@staticmethod
def get_versions(path):
"""
@brief Get all versions for this driver from the tags
@param path - the driver path
"""
# get all tags that start with this instrument
cmd = 'git tag -l ' + 'release_platform_' + path.replace('/', '_') + '*'
log.debug("git cmd: %s", cmd)
output = subprocess.check_output(cmd, shell=True)
version_list = ['master']
if len(output) > 0:
tag_regex = re.compile(r'release_platform_[a-z0-9_]+(\d+_\d+_\d+)')
tag_iter = tag_regex.finditer(output)
for tag_match in tag_iter:
version_list.append(tag_match.group(1))
return version_list
| bsd-2-clause | -8,164,659,112,145,360,000 | 29.190909 | 83 | 0.595303 | false |
sysadmin75/ansible | test/lib/ansible_test/_internal/cloud/hcloud.py | 19 | 3099 | """Hetzner Cloud plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ..util import (
display,
ConfigParser,
)
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..core_ci import (
AnsibleCoreCI,
)
class HcloudCloudProvider(CloudProvider):
"""Hetzner Cloud provider plugin. Sets up cloud resources before
delegation.
"""
def __init__(self, args):
"""
:type args: TestConfig
"""
super(HcloudCloudProvider, self).__init__(args)
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = self._create_ansible_core_ci()
if aci.available:
return
super(HcloudCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(HcloudCloudProvider, self).setup()
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self):
"""Request Hetzner credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
token = response['hetzner']['token']
display.sensitive.add(token)
display.info('Hetzner Cloud Token: %s' % token, verbosity=1)
values = dict(
TOKEN=token,
)
display.sensitive.add(values['TOKEN'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _create_ansible_core_ci(self):
"""
:rtype: AnsibleCoreCI
"""
return AnsibleCoreCI(self.args, 'hetzner', 'hetzner', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
class HcloudCloudEnvironment(CloudEnvironment):
"""Hetzner Cloud cloud environment plugin. Updates integration test environment
after delegation.
"""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
parser = ConfigParser()
parser.read(self.config_path)
env_vars = dict(
HCLOUD_TOKEN=parser.get('default', 'hcloud_api_token'),
)
display.sensitive.add(env_vars['HCLOUD_TOKEN'])
ansible_vars = dict(
hcloud_prefix=self.resource_prefix,
)
ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
| gpl-3.0 | 2,734,247,036,087,165,400 | 25.715517 | 142 | 0.609229 | false |
motion2015/edx-platform | lms/djangoapps/shoppingcart/migrations/0007_auto__add_field_orderitem_service_fee.py | 114 | 10407 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OrderItem.service_fee'
db.add_column('shoppingcart_orderitem', 'service_fee',
self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=30, decimal_places=2),
keep_default=False)
# Adding index on 'OrderItem', fields ['status']
db.create_index('shoppingcart_orderitem', ['status'])
# Adding index on 'OrderItem', fields ['fulfilled_time']
db.create_index('shoppingcart_orderitem', ['fulfilled_time'])
# Adding index on 'OrderItem', fields ['refund_requested_time']
db.create_index('shoppingcart_orderitem', ['refund_requested_time'])
def backwards(self, orm):
# Removing index on 'OrderItem', fields ['refund_requested_time']
db.delete_index('shoppingcart_orderitem', ['refund_requested_time'])
# Removing index on 'OrderItem', fields ['fulfilled_time']
db.delete_index('shoppingcart_orderitem', ['fulfilled_time'])
# Removing index on 'OrderItem', fields ['status']
db.delete_index('shoppingcart_orderitem', ['status'])
# Deleting field 'OrderItem.service_fee'
db.delete_column('shoppingcart_orderitem', 'service_fee')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 | 1,544,607,153,648,904,200 | 72.288732 | 182 | 0.562506 | false |
boudinfl/pke | pke/supervised/neural_based/seq2seq.py | 1 | 1171 | # -*- coding: utf-8 -*-
# Author: Florian Boudin
# Date: 11-11-2018
"""
Implementation of the Seq2Seq model for automatic keyphrase extraction.
"""
from __future__ import absolute_import
from __future__ import print_function
from pke.supervised.api import SupervisedLoadFile
class Seq2Seq(SupervisedLoadFile):
def __init__(self):
"""Redefining initializer for Seq2Seq."""
super(Seq2Seq, self).__init__()
self.sequence = []
"""Input sequence."""
self.vocabulary = ['<SOS>', '<EOS>', '<UNK>']
"""Vocabulary."""
def document_to_ix(self):
"""Convert the document to a sequence of ix."""
self.sequence.append(self.vocabulary.index('<SOS>'))
for i, sentence in enumerate(self.sentences):
for word in sentence.stems:
try:
self.sequence.append(self.vocabulary.index(word))
except ValueError:
self.sequence.append(self.vocabulary.index('<UNK>'))
self.sequence.append(self.vocabulary.index('<EOS>'))
def candidate_selection(self):
pass
def candidate_weighting(self):
pass
| gpl-3.0 | 2,304,112,351,493,401,900 | 25.613636 | 72 | 0.601196 | false |
ahmadio/edx-platform | common/djangoapps/enrollment/data.py | 39 | 9283 | """
Data Aggregation Layer of the Enrollment API. Collects all enrollment specific data into a single
source to be used throughout the API.
"""
import logging
from django.contrib.auth.models import User
from opaque_keys.edx.keys import CourseKey
from enrollment.errors import (
CourseNotFoundError, CourseEnrollmentClosedError, CourseEnrollmentFullError,
CourseEnrollmentExistsError, UserNotFoundError, InvalidEnrollmentAttribute
)
from enrollment.serializers import CourseEnrollmentSerializer, CourseField
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from student.models import (
CourseEnrollment, NonExistentCourseError, EnrollmentClosedError,
CourseFullError, AlreadyEnrolledError, CourseEnrollmentAttribute
)
log = logging.getLogger(__name__)
def get_course_enrollments(user_id):
"""Retrieve a list representing all aggregated data for a user's course enrollments.
Construct a representation of all course enrollment data for a specific user.
Args:
user_id (str): The name of the user to retrieve course enrollment information for.
Returns:
A serializable list of dictionaries of all aggregated enrollment data for a user.
"""
qset = CourseEnrollment.objects.filter(
user__username=user_id, is_active=True
).order_by('created')
return CourseEnrollmentSerializer(qset).data
def get_course_enrollment(username, course_id):
"""Retrieve an object representing all aggregated data for a user's course enrollment.
Get the course enrollment information for a specific user and course.
Args:
username (str): The name of the user to retrieve course enrollment information for.
course_id (str): The course to retrieve course enrollment information for.
Returns:
A serializable dictionary representing the course enrollment.
"""
course_key = CourseKey.from_string(course_id)
try:
enrollment = CourseEnrollment.objects.get(
user__username=username, course_id=course_key
)
return CourseEnrollmentSerializer(enrollment).data
except CourseEnrollment.DoesNotExist:
return None
def create_course_enrollment(username, course_id, mode, is_active):
"""Create a new course enrollment for the given user.
Creates a new course enrollment for the specified user username.
Args:
username (str): The name of the user to create a new course enrollment for.
course_id (str): The course to create the course enrollment for.
mode (str): (Optional) The mode for the new enrollment.
is_active (boolean): (Optional) Determines if the enrollment is active.
Returns:
A serializable dictionary representing the new course enrollment.
Raises:
CourseNotFoundError
CourseEnrollmentFullError
EnrollmentClosedError
CourseEnrollmentExistsError
"""
course_key = CourseKey.from_string(course_id)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
msg = u"Not user with username '{username}' found.".format(username=username)
log.warn(msg)
raise UserNotFoundError(msg)
try:
enrollment = CourseEnrollment.enroll(user, course_key, check_access=True)
return _update_enrollment(enrollment, is_active=is_active, mode=mode)
except NonExistentCourseError as err:
raise CourseNotFoundError(err.message)
except EnrollmentClosedError as err:
raise CourseEnrollmentClosedError(err.message)
except CourseFullError as err:
raise CourseEnrollmentFullError(err.message)
except AlreadyEnrolledError as err:
enrollment = get_course_enrollment(username, course_id)
raise CourseEnrollmentExistsError(err.message, enrollment)
def update_course_enrollment(username, course_id, mode=None, is_active=None):
"""Modify a course enrollment for a user.
Allows updates to a specific course enrollment.
Args:
username (str): The name of the user to retrieve course enrollment information for.
course_id (str): The course to retrieve course enrollment information for.
mode (str): (Optional) If specified, modify the mode for this enrollment.
is_active (boolean): (Optional) Determines if the enrollment is active.
Returns:
A serializable dictionary representing the modified course enrollment.
"""
course_key = CourseKey.from_string(course_id)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
msg = u"Not user with username '{username}' found.".format(username=username)
log.warn(msg)
raise UserNotFoundError(msg)
try:
enrollment = CourseEnrollment.objects.get(user=user, course_id=course_key)
return _update_enrollment(enrollment, is_active=is_active, mode=mode)
except CourseEnrollment.DoesNotExist:
return None
def add_or_update_enrollment_attr(user_id, course_id, attributes):
"""Set enrollment attributes for the enrollment of given user in the
course provided.
Args:
course_id (str): The Course to set enrollment attributes for.
user_id (str): The User to set enrollment attributes for.
attributes (list): Attributes to be set.
Example:
>>>add_or_update_enrollment_attr(
"Bob",
"course-v1-edX-DemoX-1T2015",
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
)
"""
course_key = CourseKey.from_string(course_id)
user = _get_user(user_id)
enrollment = CourseEnrollment.get_enrollment(user, course_key)
if not _invalid_attribute(attributes) and enrollment is not None:
CourseEnrollmentAttribute.add_enrollment_attr(enrollment, attributes)
def get_enrollment_attributes(user_id, course_id):
"""Retrieve enrollment attributes for given user for provided course.
Args:
user_id: The User to get enrollment attributes for
course_id (str): The Course to get enrollment attributes for.
Example:
>>>get_enrollment_attributes("Bob", "course-v1-edX-DemoX-1T2015")
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
Returns: list
"""
course_key = CourseKey.from_string(course_id)
user = _get_user(user_id)
enrollment = CourseEnrollment.get_enrollment(user, course_key)
return CourseEnrollmentAttribute.get_enrollment_attributes(enrollment)
def _get_user(user_id):
"""Retrieve user with provided user_id
Args:
user_id(str): username of the user for which object is to retrieve
Returns: obj
"""
try:
return User.objects.get(username=user_id)
except User.DoesNotExist:
msg = u"Not user with username '{username}' found.".format(username=user_id)
log.warn(msg)
raise UserNotFoundError(msg)
def _update_enrollment(enrollment, is_active=None, mode=None):
enrollment.update_enrollment(is_active=is_active, mode=mode)
enrollment.save()
return CourseEnrollmentSerializer(enrollment).data
def _invalid_attribute(attributes):
"""Validate enrollment attribute
Args:
attributes(dict): dict of attribute
Return:
list of invalid attributes
"""
invalid_attributes = []
for attribute in attributes:
if "namespace" not in attribute:
msg = u"'namespace' not in enrollment attribute"
log.warn(msg)
invalid_attributes.append("namespace")
raise InvalidEnrollmentAttribute(msg)
if "name" not in attribute:
msg = u"'name' not in enrollment attribute"
log.warn(msg)
invalid_attributes.append("name")
raise InvalidEnrollmentAttribute(msg)
if "value" not in attribute:
msg = u"'value' not in enrollment attribute"
log.warn(msg)
invalid_attributes.append("value")
raise InvalidEnrollmentAttribute(msg)
return invalid_attributes
def get_course_enrollment_info(course_id, include_expired=False):
"""Returns all course enrollment information for the given course.
Based on the course id, return all related course information.
Args:
course_id (str): The course to retrieve enrollment information for.
include_expired (bool): Boolean denoting whether expired course modes
should be included in the returned JSON data.
Returns:
A serializable dictionary representing the course's enrollment information.
Raises:
CourseNotFoundError
"""
course_key = CourseKey.from_string(course_id)
try:
course = CourseOverview.get_from_id(course_key)
except CourseOverview.DoesNotExist:
msg = u"Requested enrollment information for unknown course {course}".format(course=course_id)
log.warning(msg)
raise CourseNotFoundError(msg)
else:
return CourseField().to_native(course, include_expired=include_expired)
| agpl-3.0 | 817,833,241,314,472,700 | 32.879562 | 102 | 0.682107 | false |
alexknvl/mbody | wire_loops.py | 1 | 2403 | from numpy import *
from ellip import ellipe, ellipk
from const import *
import unittest
def wire_loop_field(radius, x, y, z):
rr = radius
r1 = sqrt(x**2 + y**2)
if r1 == 0:
return array([0, 0, 2 * pi * rr ** 2 / pow(rr**2 + z**2, 1.5)]) * kMagneticConstant / (4 * kPi)
theta = arctan2(y, x)
alpha = sqrt(r1**2 + 2 * r1 * rr + rr**2 + z**2)
beta = sqrt(r1**2 - 2 * r1 * rr + rr**2 + z**2)
gamma = sqrt(x**2 + y**2 + z**2)
k1 = (4*r1*rr)/alpha**2
k2 = (-4*r1*rr)/beta**2
ek1 = ellipe(k1)
ek2 = ellipe(k2)
kk1 = ellipk(k1)
kk2 = ellipk(k2)
cth = cos(theta)
sth = sin(theta)
return array([-((cth*z*(alpha*kk1*r1**2 + beta*kk2*r1**2 - 2*alpha*kk1*r1*rr + 2*beta*kk2*r1*rr + alpha*kk1*rr**2 + beta*kk2*rr**2 - alpha*ek1*(gamma**2 + rr**2) - beta*ek2*(gamma**2 + rr**2) + alpha*kk1*z**2 + beta*kk2*z**2))/
(alpha**2*beta**2*r1)), -((sth*z*(alpha*kk1*r1**2 + beta*kk2*r1**2 - 2*alpha*kk1*r1*rr + 2*beta*kk2*r1*rr + alpha*kk1*rr**2 + beta*kk2*rr**2 - alpha*ek1*(gamma**2 + rr**2) - beta*ek2*(gamma**2 + rr**2) + alpha*kk1*z**2 +
beta*kk2*z**2))/(alpha**2*beta**2*r1)), (alpha*kk1*r1**2 + beta*kk2*r1**2 - 2*alpha*kk1*r1*rr + 2*beta*kk2*r1*rr + alpha*kk1*rr**2 + beta*kk2*rr**2 + alpha*ek1*(-gamma**2 + rr**2) + beta*ek2*(-gamma**2 + rr**2) + alpha*kk1*z**2 +
beta*kk2*z**2)/(alpha**2*beta**2)]) * kMagneticConstant / (4 * kPi)
class TestWireLoop(unittest.TestCase):
def test_continuity(self):
a = wire_loop_field(1, 1e-10, 1e-10, 1)
b = wire_loop_field(1, 0, 0, 1)
for i in xrange(3):
self.assertAlmostEqual(a[i], b[i])
a = wire_loop_field(1, 1e-10, 1e-10, 10)
b = wire_loop_field(1, 0, 0, 10)
for i in xrange(3):
self.assertAlmostEqual(a[i], b[i])
def test_field_in_the_centre(self):
a = wire_loop_field(1, 0, 0, 0)
b = array([0, 0, kMagneticConstant / 2])
for i in xrange(3):
self.assertAlmostEqual(a[i], b[i])
def test_at_some_distance(self):
a = wire_loop_field(10, 0, 0, 1) * 100
b = array([0, 0, 0.0000061901020332917456])
for i in xrange(3):
self.assertAlmostEqual(a[i], b[i])
a = wire_loop_field(10, 0, 0, -1) * 100
b = array([0, 0, 0.0000061901020332917456])
for i in xrange(3):
self.assertAlmostEqual(a[i], b[i])
| mit | -2,936,108,847,077,385,700 | 37.758065 | 234 | 0.540158 | false |
MSFTOSSMgmt/WPSDSCLinux | Providers/Scripts/2.4x-2.5x/Scripts/nxOMSPerfCounter.py | 2 | 13033 | #!/usr/bin/env python
#============================================================================
# Copyright (C) Microsoft Corporation, All rights reserved.
#============================================================================
import os
import imp
import re
import codecs
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
# backwards compatibility with pre-multi-homing bundles
conf_path = '/etc/opt/microsoft/omsagent/conf/omsagent.conf'
omi_map_path = '/etc/opt/microsoft/omsagent/conf/omsagent.d/omi_mapping.json'
omi_map = None
workspace_specific = None
non_mh_heartbeat_cmd = '/opt/microsoft/omsagent/bin/omsadmin.sh -b'
oms_restart_cmd = 'sudo /opt/microsoft/omsagent/bin/service_control restart'
def init_paths(WorkspaceID):
"""
Initialize path values depending on workspace ID
"""
global conf_path
global omi_map_path
global workspace_specific
omsagent_dir = '/etc/opt/microsoft/omsagent/'
wspc_conf_dir = omsagent_dir + WorkspaceID + '/conf'
workspace_specific = os.path.isdir(wspc_conf_dir)
if workspace_specific:
LG().Log('INFO', 'Configuration is in a workspace-specific path; ' \
'resource is updating workspace ' + WorkspaceID)
conf_path = wspc_conf_dir + '/omsagent.conf'
omi_map_path = wspc_conf_dir + '/omsagent.d/omi_mapping.json'
def init_omi_map():
"""
Initialize OMI value mapping
"""
global omi_map
txt = codecs.open(omi_map_path, 'r', 'utf8').read()
omi_map = eval(txt)
def init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
"""
Initialize global values
"""
init_paths(WorkspaceID)
init_omi_map()
if WorkspaceID is not None:
WorkspaceID = WorkspaceID.encode('ascii', 'ignore')
else:
WorkspaceID = ''
if PerfCounterObject is not None:
for perf in PerfCounterObject:
new_perfs = []
if len(perf['PerformanceCounter'].value):
for perf_counter in perf['PerformanceCounter'].value:
new_perfs.append(perf_counter.encode('ascii', 'ignore'))
perf['PerformanceCounter'] = new_perfs
if perf['InstanceName'].value is None:
perf['InstanceName'] = ''
else:
perf['InstanceName'] = perf[
'InstanceName'].value.encode('ascii', 'ignore')
if perf['ObjectName'].value is None:
perf['ObjectName'] = ''
else:
perf['ObjectName'] = perf[
'ObjectName'].value.encode('ascii', 'ignore')
if perf['AllInstances'].value is None:
perf['AllInstances'] = False
else:
if perf['AllInstances'].value.value == 1:
perf['AllInstances'] = True
else:
perf['AllInstances'] = False
perf['IntervalSeconds'] = perf['IntervalSeconds'].value.value
def Set_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
return Set(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
def Test_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
return Test(HeartbeatIntervalSeconds, PerfCounterObject)
def Get_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
arg_names = list(locals().keys())
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
retval = 0
NewHeartbeatIntervalSeconds, NewPerf = Get(
HeartbeatIntervalSeconds, PerfCounterObject)
for perf in NewPerf:
if len(perf['PerformanceCounter']):
perf['PerformanceCounter'] = protocol.MI_StringA(
perf['PerformanceCounter'])
perf['ObjectName'] = protocol.MI_String(perf['ObjectName'])
perf['InstanceName'] = protocol.MI_String(perf['InstanceName'])
perf['AllInstances'] = protocol.MI_Boolean(perf['AllInstances'])
perf['IntervalSeconds'] = protocol.MI_Uint16(perf['IntervalSeconds'])
PerfCounterObject = protocol.MI_InstanceA(NewPerf)
HeartbeatIntervalSeconds = protocol.MI_Uint16(NewHeartbeatIntervalSeconds)
WorkspaceID = protocol.MI_String(WorkspaceID)
Name = protocol.MI_String(Name)
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return retval, retd
def Set(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
if Test(HeartbeatIntervalSeconds, PerfCounterObject) == [0]:
return [0]
if UpdateOMSAgentConf(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
return [0]
else:
return [-1]
def Test(HeartbeatIntervalSeconds, PerfCounterObject):
prune_perfs(PerfCounterObject)
NewHeartbeatIntervalSeconds, NewPerfs = ReadOMSAgentConf(
HeartbeatIntervalSeconds, PerfCounterObject)
if NewHeartbeatIntervalSeconds != HeartbeatIntervalSeconds:
return [-1]
PerfCounterObject.sort()
for perf in PerfCounterObject:
perf['PerformanceCounter'].sort()
perf['AllInstances'] = True
NewPerfs.sort()
for perf in NewPerfs:
perf['PerformanceCounter'].sort()
if PerfCounterObject != NewPerfs:
return [-1]
# Check if the omi_mapping_path has been specified yet
if not CheckForOMIMappingPathInConf():
return [-1]
return [0]
def Get(HeartbeatIntervalSeconds, PerfCounterObject):
NewHeartbeatIntervalSeconds, NewPerf = ReadOMSAgentConf(
HeartbeatIntervalSeconds, PerfCounterObject)
return NewHeartbeatIntervalSeconds, NewPerf
def TranslatePerfs(object_name, perfs):
d = {}
for p in perfs:
for cname in omi_map:
for prop in cname['CimProperties']:
if (p == prop['CounterName'] or p == prop['CimPropertyName']) and cname['ObjectName'] == object_name:
if cname['ObjectName'] not in d.keys():
d[cname['ObjectName']] = [p]
else:
d[cname['ObjectName']].append(p)
return d
def ReadOMSAgentConf(HeartbeatIntervalSeconds, PerfCounterObject):
"""
Read OMSAgent conf file and extract the current settings for
HeartbeatIntervalSeconds and perf objects
"""
txt = get_conf_path_text()
if not txt:
return None, []
heartbeat_srch_str = r'<source>.*?tag heartbeat.*?run_interval ([0-9]+[a-z])\n</source>\n'
heartbeat_srch = re.compile(heartbeat_srch_str, re.M | re.S)
m = heartbeat_srch.search(txt)
if m is not None:
interval = int(m.group(1)[:-1])
if m.group(1)[-1:] == 'm':
interval *= 60
else:
interval = None
new_heartbeat = interval
new_perfobj = []
sources = search_for_perf_sections(txt)
inst = ''
interval = 0
for source in sources:
s_perf = []
if len(source[2]):
s_perf = source[2].strip('(').strip(')').split('|')
object_name = source[0]
interval = int(source[3][:-1])
if source[3][-1:] == 'm':
interval *= 60
inst = source[1]
inst = inst.replace('.*', '*')
new_perfobj.append({'PerformanceCounter': s_perf, 'InstanceName': inst,
'IntervalSeconds': interval, 'AllInstances': True, 'ObjectName': object_name})
return new_heartbeat, new_perfobj
def UpdateOMSAgentConf(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
"""
Write the new values given by parameters to the OMSAgent conf file
"""
txt = get_conf_path_text()
if not txt:
LG().Log('INFO', 'Will create new configuration file at ' + conf_path + '.')
heartbeat_srch_str = r'<source>.*?tag heartbeat.*?</source>\n'
heartbeat_srch = re.compile(heartbeat_srch_str, re.M | re.S)
heartbeat_cmd = non_mh_heartbeat_cmd
if workspace_specific:
heartbeat_cmd = 'echo'
heartbeat_src = '<source>\n type exec\n tag heartbeat.output\n command ' + heartbeat_cmd + ' > /dev/null\n format tsv\n keys severity,message\n run_interval ' + \
str(HeartbeatIntervalSeconds) + 's\n</source>\n'
txt = heartbeat_srch.sub(heartbeat_src, txt)
d = {}
perf_src_srch_str = r'\n<source>\n type oms_omi.*?</source>\n'
perf_src_srch = re.compile(perf_src_srch_str, re.M | re.S)
for source in perf_src_srch.findall(txt):
txt = txt.replace(source, '')
new_source = ''
for perf in PerfCounterObject:
d = TranslatePerfs(perf['ObjectName'], perf['PerformanceCounter'])
for k in d.keys():
names = '(' + reduce(lambda x, y: x + '|' + y, d[k]) + ')'
instances = re.sub(r'([><]|>|<)', '', perf['InstanceName'])
instances = re.sub(r'([*])', '.*', instances)
# omi_map_path will be set to the appropriate value whether or not we are multi-homed
new_source += '\n<source>\n type oms_omi\n object_name "' + k + '"\n instance_regex "' + instances + \
'"\n counter_name_regex "' + names + '"\n interval ' + \
str(perf['IntervalSeconds']) + 's\n omi_mapping_path ' + omi_map_path + '\n</source>\n'
m = heartbeat_srch.search(txt)
if m is not None:
i = m.end(0) + 1
txt = txt[:i] + new_source + txt[i:]
else:
txt = new_source
try:
codecs.open(conf_path, 'w', 'utf8').write(txt)
LG().Log(
'INFO', 'Created omsagent configuration at ' + conf_path + '.')
except:
LG().Log(
'ERROR', 'Unable to create omsagent configuration at ' + conf_path + '.')
return False
global oms_restart_cmd
process_to_restart = 'omsagent'
if workspace_specific:
oms_restart_cmd += ' ' + WorkspaceID
process_to_restart += '-' + WorkspaceID
if os.system(oms_restart_cmd) == 0:
LG().Log('INFO', 'Successfully restarted ' + process_to_restart + '.')
else:
LG().Log('ERROR', 'Error restarting ' + process_to_restart + '.')
return False
return True
def CheckForOMIMappingPathInConf():
"""
Return true if the omi_mapping_path has been specified in all perf
sections in conf_path
"""
conf_path_txt = get_conf_path_text()
sources = search_for_perf_sections(conf_path_txt)
for source in sources:
try:
if 'omi_mapping_path' not in source[4]:
return False
except:
return False
return True
def get_conf_path_text():
"""
Get current text in the conf_path file
"""
txt = ''
if os.path.exists(conf_path):
try:
txt = codecs.open(conf_path, 'r', 'utf8').read().encode('ascii',
'ignore')
LG().Log('INFO', 'Read omsagent configuration ' + conf_path + '.')
except:
LG().Log('ERROR', 'Unable to read omsagent configuration ' + conf_path + '.')
else:
LG().Log('ERROR', 'No omsagent configuration file present.')
return txt
def search_for_perf_sections(txt):
"""
Returns all matches in txt for performance counter configuration
sections
"""
perf_src_srch_str = r'\n<source>\n type oms_omi.*?object_name "(.*?)".*?instance_regex "(.*?)".*?counter_name_regex "(.*?)".*?interval ([0-9]+[a-z])(.*?)</source>\n'
# Since this search uses re.S, newlines and omi_mapping_path will be
# matched
perf_src_srch = re.compile(perf_src_srch_str, re.M | re.S)
return perf_src_srch.findall(txt)
def prune_perfs(PerfCounterObject):
l = len(PerfCounterObject)
i = 0
while i < l:
d = TranslatePerfs(PerfCounterObject[i]['ObjectName'], PerfCounterObject[i]['PerformanceCounter'])
if PerfCounterObject[i]['ObjectName'] in d.keys():
for p in PerfCounterObject[i]['PerformanceCounter']:
if p not in d[PerfCounterObject[i]['ObjectName']]:
LG().Log('INFO', 'No match for PerformanceCounter \'' \
+ p + '\' in ' \
+ repr(PerfCounterObject[i]['ObjectName']) + ' in omi_mapping.json, ignoring.')
PerfCounterObject[i]['PerformanceCounter'].remove(p)
if len(PerfCounterObject[i]['PerformanceCounter']) == 0:
PerfCounterObject.pop(i)
l -= 1
i -= 1
else:
LG().Log('INFO', 'No matches for ObjectName ' \
+ repr(PerfCounterObject[i]['ObjectName']) + ' and PerformanceCounter ' \
+ repr(PerfCounterObject[i]['PerformanceCounter']) + ' in omi_mapping.json, ignoring.')
PerfCounterObject.pop(i)
l -= 1
i -= 1
i += 1
| mit | -2,551,377,977,865,066,500 | 36.559078 | 172 | 0.596869 | false |
pombredanne/MOG | nova/openstack/common/rpc/__init__.py | 13 | 11696 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A remote procedure call (rpc) abstraction.
For some wrappers that add message versioning to rpc, see:
rpc.dispatcher
rpc.proxy
"""
import inspect
from oslo.config import cfg
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import importutils
from nova.openstack.common import local
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
rpc_opts = [
cfg.StrOpt('rpc_backend',
default='%s.impl_kombu' % __package__,
help="The messaging module to use, defaults to kombu."),
cfg.IntOpt('rpc_thread_pool_size',
default=64,
help='Size of RPC thread pool'),
cfg.IntOpt('rpc_conn_pool_size',
default=30,
help='Size of RPC connection pool'),
cfg.IntOpt('rpc_response_timeout',
default=60,
help='Seconds to wait for a response from call or multicall'),
cfg.IntOpt('rpc_cast_timeout',
default=30,
help='Seconds to wait before a cast expires (TTL). '
'Only supported by impl_zmq.'),
cfg.ListOpt('allowed_rpc_exception_modules',
default=['nova.exception',
'cinder.exception',
'exceptions',
],
help='Modules of exceptions that are permitted to be recreated'
'upon receiving exception data from an rpc call.'),
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
cfg.StrOpt('control_exchange',
default='openstack',
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
CONF = cfg.CONF
CONF.register_opts(rpc_opts)
def set_defaults(control_exchange):
cfg.set_defaults(rpc_opts,
control_exchange=control_exchange)
def create_connection(new=True):
"""Create a connection to the message bus used for rpc.
For some example usage of creating a connection and some consumers on that
connection, see nova.service.
:param new: Whether or not to create a new connection. A new connection
will be created by default. If new is False, the
implementation is free to return an existing connection from a
pool.
:returns: An instance of openstack.common.rpc.common.Connection
"""
return _get_impl().create_connection(CONF, new=new)
def _check_for_lock():
if not CONF.debug:
return None
if ((hasattr(local.strong_store, 'locks_held')
and local.strong_store.locks_held)):
stack = ' :: '.join([frame[3] for frame in inspect.stack()])
LOG.warn(_('A RPC is being made while holding a lock. The locks '
'currently held are %(locks)s. This is probably a bug. '
'Please report it. Include the following: [%(stack)s].'),
{'locks': local.strong_store.locks_held,
'stack': stack})
return True
return False
def call(context, topic, msg, timeout=None, check_for_lock=False):
"""Invoke a remote method that returns something.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:param check_for_lock: if True, a warning is emitted if a RPC call is made
with a lock held.
:returns: A dict from the remote method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
if check_for_lock:
_check_for_lock()
return _get_impl().call(CONF, context, topic, msg, timeout)
def cast(context, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast(CONF, context, topic, msg)
def fanout_cast(context, topic, msg):
"""Broadcast a remote method invocation with no return.
This method will get invoked on all consumers that were set up with this
topic name and fanout=True.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=True.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast(CONF, context, topic, msg)
def multicall(context, topic, msg, timeout=None, check_for_lock=False):
"""Invoke a remote method and get back an iterator.
In this case, the remote method will be returning multiple values in
separate messages, so the return values can be processed as the come in via
an iterator.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:param check_for_lock: if True, a warning is emitted if a RPC call is made
with a lock held.
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
an index that starts at 0 and increases by one for each value
returned and X is the Nth value that was returned by the remote
method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
if check_for_lock:
_check_for_lock()
return _get_impl().multicall(CONF, context, topic, msg, timeout)
def notify(context, topic, msg, envelope=False):
"""Send notification event.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the notification to.
:param msg: This is a dict of content of event.
:param envelope: Set to True to enable message envelope for notifications.
:returns: None
"""
return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
def cleanup():
"""Clean up resoruces in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get torn down cleanly.
:returns: None
"""
return _get_impl().cleanup()
def cast_to_server(context, server_params, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast_to_server(CONF, context, server_params, topic,
msg)
def fanout_cast_to_server(context, server_params, topic, msg):
"""Broadcast to a remote method invocation with no return.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast_to_server(CONF, context, server_params,
topic, msg)
def queue_get_for(context, topic, host):
"""Get a queue name for a given topic + host.
This function only works if this naming convention is followed on the
consumer side, as well. For example, in nova, every instance of the
nova-foo service calls create_consumer() for two topics:
foo
foo.<host>
Messages sent to the 'foo' topic are distributed to exactly one instance of
the nova-foo service. The services are chosen in a round-robin fashion.
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
<host>.
"""
return '%s.%s' % (topic, host) if host else topic
_RPCIMPL = None
def _get_impl():
"""Delay import of rpc_backend until configuration is loaded."""
global _RPCIMPL
if _RPCIMPL is None:
try:
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
except ImportError:
# For backwards compatibility with older oslo.config.
impl = CONF.rpc_backend.replace('nova.rpc',
'nova.openstack.common.rpc')
_RPCIMPL = importutils.import_module(impl)
return _RPCIMPL
| apache-2.0 | -8,517,661,493,546,626,000 | 37.222222 | 79 | 0.621409 | false |
noisymime/zookeepr | zkpylons/controllers/location.py | 3 | 6620 | import logging
import vobject
from pylons import request, response, session, tmpl_context as c
from zkpylons.lib.helpers import redirect_to
from pylons.decorators import validate
from pylons.decorators.rest import dispatch_on
from formencode import validators, htmlfill, ForEach, Invalid
from formencode.variabledecode import NestedVariables
from zkpylons.lib.base import BaseController, render
from zkpylons.lib.ssl_requirement import enforce_ssl
from zkpylons.lib.validators import BaseSchema
import zkpylons.lib.helpers as h
from authkit.authorize.pylons_adaptors import authorize
from authkit.permissions import ValidAuthKitUser
from zkpylons.lib.mail import email
from zkpylons.model import meta
from zkpylons.model.location import Location
from zkpylons.config.lca_info import lca_info
log = logging.getLogger(__name__)
class LocationSchema(BaseSchema):
display_name = validators.String(not_empty=True)
display_order = validators.Int()
capacity = validators.Int()
class NewLocationSchema(BaseSchema):
location = LocationSchema()
pre_validators = [NestedVariables]
class EditLocationSchema(BaseSchema):
location = LocationSchema()
pre_validators = [NestedVariables]
class LocationController(BaseController):
@enforce_ssl(required_all=True)
def __before__(self, **kwargs):
c.can_edit = True
@authorize(h.auth.has_organiser_role)
@dispatch_on(POST="_new")
def new(self):
return render('/location/new.mako')
@authorize(h.auth.has_organiser_role)
@validate(schema=NewLocationSchema(), form='new', post_only=True, on_get=True, variable_decode=True)
def _new(self):
results = self.form_result['location']
c.location = Location(**results)
meta.Session.add(c.location)
meta.Session.commit()
h.flash("Location created")
redirect_to(action='index', id=None)
@authorize(h.auth.has_organiser_role)
def view(self, id):
c.location = Location.find_by_id(id)
return render('/location/view.mako')
@authorize(h.auth.has_organiser_role)
def index(self):
c.location_collection = Location.find_all()
return render('/location/list.mako')
@authorize(h.auth.has_organiser_role)
@dispatch_on(POST="_edit")
def edit(self, id):
c.location = Location.find_by_id(id)
defaults = h.object_to_defaults(c.location, 'location')
form = render('/location/edit.mako')
return htmlfill.render(form, defaults)
@authorize(h.auth.has_organiser_role)
@validate(schema=EditLocationSchema(), form='edit', post_only=True, on_get=True, variable_decode=True)
def _edit(self, id):
location = Location.find_by_id(id)
for key in self.form_result['location']:
setattr(location, key, self.form_result['location'][key])
# update the objects with the validated form data
meta.Session.commit()
h.flash("The Location has been updated successfully.")
redirect_to(action='index', id=None)
@authorize(h.auth.has_organiser_role)
@dispatch_on(POST="_delete")
def delete(self, id):
"""Delete the location
GET will return a form asking for approval.
POST requests will delete the item.
"""
c.location = Location.find_by_id(id)
return render('/location/confirm_delete.mako')
@authorize(h.auth.has_organiser_role)
@validate(schema=None, form='delete', post_only=True, on_get=True, variable_decode=True)
def _delete(self, id):
c.location = Location.find_by_id(id)
meta.Session.delete(c.location)
meta.Session.commit()
h.flash("Location has been deleted.")
redirect_to('index')
def ical(self, id):
c.schedule_collection = Location.find_by_id(id).schedule
ical = vobject.iCalendar()
for schedule in c.schedule_collection:
if not schedule.time_slot.heading:
event = ical.add('vevent')
event.add('uid').value = str(schedule.id) + '@' + h.lca_info['event_host']
# Created
event.add('created').value = schedule.creation_timestamp.replace(tzinfo=h.lca_info['time_zone'])
# Last Modified
event.add('dtstamp').value = schedule.last_modification_timestamp.replace(tzinfo=h.lca_info['time_zone'])
event.add('last-modified').value = schedule.last_modification_timestamp.replace(tzinfo=h.lca_info['time_zone'])
# Start and End Time
event.add('dtstart').value = schedule.time_slot.start_time.replace(tzinfo=h.lca_info['time_zone'])
event.add('dtend').value = schedule.time_slot.end_time.replace(tzinfo=h.lca_info['time_zone'])
# Title and Author (need to add Author here)
event.add('summary').value = schedule.event.computed_title() + '. ' + h.list_to_string(schedule.event.computed_speakers())
# Abstract, if we have one
event.add('description').value = schedule.event.computed_abstract()
# Add a URL
if schedule.event.proposal:
event.add('url').value = h.url_for(qualified=True, controller='schedule', action='view_talk', id=schedule.event.proposal.id)
elif not (schedule.event.url is None or schedule.event.url == ''):
if schedule.event.url.startswith('https://') or schedule.event.url.startswith('http://'):
event.add('url').value = h.url_for(str(schedule.event.url))
else:
event.add('url').value = h.url_for(str(schedule.event.url), qualified=True)
concurrent_schedules = schedule.event.schedule_by_time_slot(schedule.time_slot)
for concurrent_schedule in concurrent_schedules:
if concurrent_schedule != schedule:
if concurrent_schedule in c.schedule_collection:
c.schedule_collection.remove(concurrent_schedule)
locations = [concurrent_schedule.location.display_name for concurrent_schedule in concurrent_schedules]
event.add('location').value = h.list_to_string(locations)
response.charset = 'utf8'
response.headers['content-type'] = 'text/calendar; charset=utf8'
response.headers.add('content-transfer-encoding', 'binary')
response.headers.add('Pragma', 'cache')
response.headers.add('Cache-Control', 'max-age=3600,public')
return ical.serialize()
| gpl-2.0 | -7,040,212,445,718,217,000 | 39.365854 | 144 | 0.649245 | false |
PlanetHunt/planet-hunt | app/startup/init_app.py | 1 | 3214 | # Copyright 2014 SolidBuilds.com. All rights reserved
#
# Authors: Ling Thio <[email protected]>
import logging
from logging.handlers import SMTPHandler
from flask_mail import Mail
from flask_user import UserManager, SQLAlchemyAdapter
def init_app(app, db, extra_config_settings={}):
"""
Initialize Flask applicaton
"""
# Initialize app config settings
app.config.from_object('app.startup.settings') # Read config from 'app/startup/settings.py' file
app.config.update(extra_config_settings) # Overwrite with 'extra_config_settings' parameter
if app.testing:
app.config['WTF_CSRF_ENABLED'] = False # Disable CSRF checks while testing
# Setup Flask-Mail
mail = Mail(app)
# Setup an error-logger to send emails to app.config.ADMINS
init_error_logger_with_email_handler(app)
# Setup Flask-User to handle user account related forms
from app.users.models import UserAuth, User
from app.users.forms import MyRegisterForm
from app.users.views import user_profile_page
db_adapter = SQLAlchemyAdapter(db, User, # Setup the SQLAlchemy DB Adapter
UserAuthClass=UserAuth) # using separated UserAuth/User data models
user_manager = UserManager(db_adapter, app, # Init Flask-User and bind to app
register_form=MyRegisterForm, # using a custom register form with UserProfile fields
user_profile_view_function = user_profile_page,
)
# Load all models.py files to register db.Models with SQLAlchemy
from app.users import models
# Load all views.py files to register @app.routes() with Flask
from app.pages import views
from app.users import views
from app.images import views
from app.likes import views
return app
def init_error_logger_with_email_handler(app):
"""
Initialize a logger to send emails on error-level messages.
Unhandled exceptions will now send an email message to app.config.ADMINS.
"""
if app.debug: return # Do not send error emails while developing
# Retrieve email settings from app.config
host = app.config['MAIL_SERVER']
port = app.config['MAIL_PORT']
from_addr = app.config['MAIL_DEFAULT_SENDER']
username = app.config['MAIL_USERNAME']
password = app.config['MAIL_PASSWORD']
secure = () if app.config.get('MAIL_USE_TLS') else None
# Retrieve app settings from app.config
to_addr_list = app.config['ADMINS']
subject = app.config.get('APP_SYSTEM_ERROR_SUBJECT_LINE', 'System Error')
# Setup an SMTP mail handler for error-level messages
mail_handler = SMTPHandler(
mailhost=(host, port), # Mail host and port
fromaddr=from_addr, # From address
toaddrs=to_addr_list, # To address
subject=subject, # Subject line
credentials=(username, password), # Credentials
secure=secure,
)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
# Log errors using: app.logger.error('Some error message')
| bsd-2-clause | 8,879,478,152,464,151,000 | 38.679012 | 110 | 0.658681 | false |
robbi/pyload | module/plugins/hoster/SenditCloud.py | 7 | 1339 | # -*- coding: utf-8 -*-
from ..internal.SimpleHoster import SimpleHoster
class SenditCloud(SimpleHoster):
__name__ = "SenditCloud"
__type__ = "hoster"
__version__ = "0.01"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?sendit\.cloud/\w+'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Sendit.cloud hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
NAME_PATTERN = r'<header class="widget-header ">.+>\s*(.+?)\s*</header>'
SIZE_PATTERN = r'<b>Download</b> <font color="#FFFFFF">\((?P<S>[\d.,]+) (?P<U>[\w_^]+)\)<'
OFFLINE_PATTERN = r'The file you are trying to download is no longer available'
def setup(self):
self.multiDL = True
self.resume_download = False
def handle_free(self, pyfile):
url, inputs = self.parse_html_form('name="F1"')
if inputs is not None:
self.download(pyfile.url, post=inputs) | gpl-3.0 | 3,190,155,604,643,007,500 | 37.285714 | 95 | 0.566094 | false |
QuicklyRainbow/FieldGuideAU | Flask_App/utils/models/data_service.py | 1 | 7774 | __author__ = 'lachlan'
import logging
class ServiceLayerError(Exception):
pass
class DataService(object):
def __init__(self, db_session):
self.db = db_session
self.tablename = None
self.bad_keys = ['id'] # Fields that should not be created or updated using uploaded data. They need to be created server side.
self.valid_fields = [] # Fields that can be updated using uploaded data
self.relationship_fields = {} # Fields that describe relationships with other objects (i.e. other database tables)
self.has_version_management = False
self.logger = logging.getLogger(__name__)
self.key = ''
def create(self, **fields):
fields = self.create_server_side_fields(**fields)
try:
direct_fields = {}
for key in self.valid_fields:
if key in fields.keys():
direct_fields[key] = fields.pop(key)
record = self.tablename(**direct_fields)
for key in self.relationship_fields.keys():
if key in fields.keys():
service_layer = self.relationship_fields[key]['service'](self.db)
if self.relationship_fields[key]['many']:
for item_id in fields.pop(key):
item_obj = service_layer.get_one(int(item_id))
getattr(record, key).append(item_obj)
else:
item_id = fields.pop(key)
item_obj = service_layer.get_one(int(item_id))
setattr(record, key, item_obj)
if len(fields.keys()) is 0:
self.db.add(record)
self.db.commit()
return self.get_one(record.id)
else:
raise ServiceLayerError("Surplus keys detected while creating record")
except Exception as e:
self.logger.error("Error occurred while creating record with error msg: %s" % (str(e)))
self.db.rollback()
raise ServiceLayerError("Error occurred while creating record with error msg: %s" % (str(e)))
def update(self, id, **fields):
if self.exists(id):
try:
for key in self.bad_keys:
if key in fields.keys():
del fields[key]
direct_fields = {}
for key in self.valid_fields:
if key in fields.keys():
direct_fields[key] = fields.pop(key)
if len(direct_fields.keys()) > 0:
self.db.query(self.tablename).filter(self.tablename.id == id).update(direct_fields)
record = self.get_one(id)
for key in self.relationship_fields.keys():
if key in fields.keys():
service_layer = self.relationship_fields[key]['service'](self.db)
if self.relationship_fields[key]['many']:
setattr(record, key, [])
for item_id in fields.pop(key):
item_obj = service_layer.get_one(int(item_id))
getattr(record, key).append(item_obj)
else:
item_id = fields.pop(key)
item_obj = service_layer.get_one(int(item_id))
setattr(record, key, item_obj)
if len(fields.keys()) is 0:
self.db.add(record)
self.db.commit()
return self.get_one(record.id)
else:
raise ServiceLayerError("Surplus keys detected while updating record")
except Exception as e:
self.logger.error("Error occurred while updating record with error msg: %s" % (str(e)))
self.db.rollback()
raise ServiceLayerError("Error occurred while updating record with error msg: %s" % (str(e)))
else:
raise ServiceLayerError()
def get_one(self, id):
try:
if self.exists(id):
return self.db.query(self.tablename).get(id)
else:
return None
except Exception as e:
self.logger.error("Error occurred while retrieving individual record with error msg: %s" % (str(e)))
self.db.rollback()
raise ServiceLayerError("Error occurred while retrieving individual record with error msg: %s" % (str(e)))
def get_many(self):
try:
req = self.db.query(self.tablename).all()
if req == []:
return None
else:
return req
except Exception as e:
self.logger.error("Error occurred while retrieving multiple records with error msg: %s" % (str(e)))
self.db.rollback()
raise ServiceLayerError("Error occurred while retrieving multiple records with error msg: %s" % (str(e)))
def delete(self, id):
try:
if self.exists(id):
record = self.get_one(id)
self.db.delete(record)
self.db.commit()
return record
else:
return None
except Exception as e:
self.logger.error("Error occurred while deleting record with error msg: %s" % (str(e)))
self.db.rollback()
raise ServiceLayerError("Error occurred while deleting record with error msg: %s" % (str(e)))
def exists(self, id):
if self.db.query(self.tablename).get(id) == None:
return False
else:
return True
def get_by_name(self, name):
key = '{0}_name'.format(self.key)
try:
req = self.db.query(self.tablename).filter(getattr(self.tablename, key) == name).first()
if req is None:
return None
else:
return req
except Exception as e:
self.logger.error("Error occurred while retrieving individual record by name: {0} with error msg: {1}".format(name, e))
self.db.rollback()
raise ServiceLayerError("Error occurred while retrieving individual record by name: {0} with error msg: {1}".format(name, e))
def get_id_by_name(self, name):
try:
req = self.get_by_name(name)
if req is None:
return None
else:
return req.id
except Exception as e:
self.logger.error("Error occurred while retrieving individual record id by name: {0} with error msg: {1}".format(name, e))
self.db.rollback()
raise ServiceLayerError("Error occurred while retrieving individual record id by name: {0} with error msg: {1}".format(name, e))
def update_or_create(self, **fields):
key = '{0}_name'.format(self.key)
if key in fields.keys():
try:
id = self.get_id_by_name(fields[key])
if id is None:
return self.create(**fields)
else:
return self.update(id, **fields)
except Exception as e:
self.logger.error("Error occurred in update or create with data: {0} and error msg: {1}".format(fields, e))
self.db.rollback()
raise ServiceLayerError("Error occurred in update or create with data: {0} and error msg: {1}".format(fields, e))
else:
return None
def create_server_side_fields(self, **fields):
return fields
def update_server_side_fields(self, **fields):
return fields | mit | 1,720,140,140,062,181,400 | 41.255435 | 140 | 0.530486 | false |
turbulenz/turbulenz_tools | turbulenz_tools/tools/xml2json.py | 1 | 3268 | #!/usr/bin/env python
# Copyright (c) 2010-2013 Turbulenz Limited
import logging
from re import sub
from optparse import OptionParser, OptionGroup, TitledHelpFormatter
from turbulenz_tools.utils.xml_json import xml2json
from turbulenz_tools.tools.stdtool import simple_options
__version__ = '1.0.0'
__dependencies__ = ['turbulenz_tools.utils.xml_json']
LOG = logging.getLogger(__name__)
def _parser():
usage = "usage: %prog [options] -i source.xml -o output.json"
description = "Convert XML assets into a structured JSON asset."
parser = OptionParser(description=description, usage=usage, formatter=TitledHelpFormatter())
parser.add_option("--version", action="store_true", dest="output_version", default=False,
help="output version number")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="verbose output")
parser.add_option("-s", "--silent", action="store_true", dest="silent", default=False, help="silent running")
parser.add_option("-m", "--metrics", action="store_true", dest="metrics", default=False,
help="output asset metrics")
parser.add_option("-i", "--input", action="store", dest="input", help="input XML file to process")
parser.add_option("-o", "--output", action="store", dest="output", help="output JSON file to process")
group = OptionGroup(parser, "Asset Generation Options")
group.add_option("-j", "--json-indent", action="store", dest="json_indent", type="int", default=0, metavar="SIZE",
help="json output pretty printing indent size, defaults to 0")
group.add_option("-n", "--namespace", action="store_true", dest="namespace", default=False,
help="maintain XML xmlns namespace in JSON asset keys.")
group.add_option("-c", "--convert-types", action="store_true", dest="convert_types", default=False,
help="attempt to convert values to ints, floats and lists.")
parser.add_option_group(group)
return parser
def main():
(options, args_, parser_) = simple_options(_parser, __version__, __dependencies__)
try:
with open(options.input) as xml_file:
xml_string = xml_file.read()
# At the moment there doesn't seem to be an obvious way to extract the xmlns from the asset.
# For now, we'll attempt to just remove it before transforming it into a Python object.
# <COLLADA xmlns="http://www.collada.org/2005/11/COLLADASchema" version="1.4.1">
# ==>
# <COLLADA version="1.4.1">
if options.namespace is False:
xml_string = sub(' xmlns="[^"]*"', '', xml_string)
json_string = xml2json(xml_string, indent=options.json_indent, convert_types=options.convert_types)
if options.output:
with open(options.output, 'w') as target:
target.write(json_string)
target.write('\n')
else:
print json_string
except IOError as e:
LOG.error(e)
return e.errno
except Exception as e:
LOG.critical('Unexpected exception: %s', e)
return 1
if __name__ == "__main__":
exit(main())
| mit | 4,993,451,341,559,350,000 | 42 | 118 | 0.624847 | false |
kercos/PickMeUp | route.py | 1 | 5323 | # -*- coding: utf-8 -*-
from google.appengine.ext import ndb
from utility import convertToUtfIfNeeded
class Route(ndb.Model):
#percorso = ndb.StringProperty() # id
percorso_info = ndb.PickleProperty()
# list of route info:
# for each route ->
# {route_intermediates_fermate: <list>,
# route_duration: <num> (seconds),
# route_distance: <num> (meters)}
fermate_intermedie = ndb.StringProperty(repeated=True) # set of fermate intermedie
percorsi_passeggeri_compatibili = ndb.StringProperty(repeated=True) # set of percorsi compatibili
average_distance = ndb.ComputedProperty(lambda self: self.getAverageDistance())
average_duration = ndb.ComputedProperty(lambda self: self.getAverageDuration())
def getPercorso(self):
return self.key.id()
def hasDetails(self):
return self.percorso_info is not None
def getPercorsiPasseggeriCompatibili(self):
return [convertToUtfIfNeeded(x) for x in self.percorsi_passeggeri_compatibili]
def getNumberPercorsiPasseggeriCompatibili(self):
return len(self.percorsi_passeggeri_compatibili)
def getFermateIntermedie(self):
return [convertToUtfIfNeeded(x) for x in self.fermate_intermedie]
def getAverageDistance(self):
from utility import format_distance
assert self.percorso_info
distances = [r_info['route_distance'] for r_info in self.percorso_info]
avg_km = sum(distances) / float(len(distances)) / 1000
return format_distance(avg_km)
def getAverageDuration(self):
import date_time_util as dtu
assert self.percorso_info
durations = [r_info['route_duration'] for r_info in self.percorso_info]
avg = sum(durations) / float(len(durations))
return dtu.convertSecondsInHourMinString(avg)
def getFermateIntermedieRoutes(self):
assert self.percorso_info
return [r_info['route_intermediates_fermate'] for r_info in self.percorso_info]
def populateWithDetails(self, put=True):
import routing_util
import itertools
self.percorso_info = routing_util.getRoutingDetails(self.getPercorso())
# a list of route info: for each route -> {route_intermediates_fermate, route_duration, route_distance}
fermate_intermedie_set = set()
percorsi_compatibili_set = set()
if self.percorso_info:
for r_info in self.percorso_info:
fermate = r_info['route_intermediates_fermate']
fermate_intermedie_set.update(fermate)
fermate_pairs = tuple(itertools.combinations(fermate, 2))
for pair in fermate_pairs:
percorso = routing_util.encodePercorso(*pair)
percorsi_compatibili_set.add(percorso)
self.fermate_intermedie = list(fermate_intermedie_set)
self.percorsi_passeggeri_compatibili = list(percorsi_compatibili_set)
if put:
self.put()
def getDetails(self):
from utility import format_distance
import date_time_util as dtu
msg = []
msg.append('{} tragitto/i trovati per viaggio\n*{}*:\n'.
format(len(self.percorso_info), self.getPercorso()))
for n, r_info in enumerate(self.percorso_info, 1):
msg.append('*{}.*'.format(n))
distance = format_distance(float(r_info['route_distance']) / 1000)
duration = dtu.convertSecondsInHourMinString(r_info['route_duration'])
fermate_intermedie_str = ', '.join(r_info['route_intermediates_fermate'])
msg.append(' ∙ Fermate intermedie: {}'.format(fermate_intermedie_str))
msg.append(' ∙ Distanza: {}'.format(distance))
msg.append(' ∙ Durata: {}'.format(duration))
num_percorsi_compatibili = len(self.getPercorsiPasseggeriCompatibili())
msg.append('\n{} percorso/i passeggeri compatibilie.'.format(num_percorsi_compatibili))
#percorsi_compatibili_str = ', '.join(self.getPercorsiPasseggeriCompatibili())
#msg.append('\n{} percorso/i passeggeri compatibilie: {}'.format(
# num_percorsi_compatibili, percorsi_compatibili_str))
return '\n'.join(msg)
def addRoute(percorso):
r = Route(
id=percorso,
)
#r.put() always after populatePercorsoWithDetails
return r
def getRouteAddIfNotPresent(percorso):
r = Route.get_by_id(percorso)
if r is None:
r = Route(
id=percorso,
)
#r.put() always after populatePercorsoWithDetails
return r
def getPercorsiCompatibili(percorso_passeggero):
qry_routes = Route.query(
Route.percorsi_passeggeri_compatibili == percorso_passeggero
)
percorsi_compatibili = [r.getPercorso() for r in qry_routes.fetch()]
return percorsi_compatibili
def populateRoutesWithDetails():
more, cursor = True, None
while more:
records, cursor, more = Route.query().fetch_page(100, start_cursor=cursor)
print 'Updating {} records'.format(len(records))
for n, ent in enumerate(records, 1):
print '{}) {}'.format(n, ent.getPercorso().encode('utf-8'))
ent.populateWithDetails(put=False)
create_futures = ndb.put_multi_async(records)
ndb.Future.wait_all(create_futures)
| apache-2.0 | -2,114,500,600,788,252,000 | 39.9 | 111 | 0.657326 | false |
chunywang/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_media-src_corss-origin_video_blocked_int.py | 30 | 3099 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "media-src http://www.w3.org; script-src 'self' 'unsafe-inline'"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Zhang, Zhiqiang <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_media-src_cross-origin_video_blocked_int</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#media-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="media-src http://www.w3.org; script-src 'self' 'unsafe-inline'"/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
</head>
<body>
<div id="log"></div>
<video id="m"></video>
<script>
var t = async_test(document.title);
var m = document.getElementById("m");
m.src = "support/khronos/red-green.theora.ogv";
window.setTimeout(function() {
t.step(function() {
assert_true(m.currentSrc == "",
"video.currentSrc should be empty after setting src attribute");
});
t.done();
}, 0);
</script>
</body>
</html> """
| bsd-3-clause | 7,233,694,666,239,378,000 | 42.647887 | 98 | 0.68635 | false |
kanak87/oldboy_rep | yong_celeb_recognize/image.py | 1 | 1069 | import cv2
from matplotlib import pyplot
import numpy as np
def read_sample(filenames):
images = []
for filename in filenames:
image = cv2.imread(filename)
image = cv2.resize(image, (96, 96))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_array = []
for y in range(0, 96, 1):
for x in range(0, 96, 1):
image_array.append((image[y][x] / 255.))
image_array = np.array(image_array)
image_array = image_array.astype(np.float32)
images.append(image_array)
return np.vstack(images)
def plot_sample(x, y, axis):
img = x.reshape(96, 96)
axis.imshow(img, cmap='gray')
axis.scatter(y[0::2] * 48 + 48, y[1::2] * 48 + 48, marker='x', s=10)
def draw_result(X, y):
fig = pyplot.figure(figsize=(6, 6))
fig.subplots_adjust(
left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(X.shape[0]):
ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
plot_sample(X[i], y[i], ax)
pyplot.show() | mit | 6,226,666,312,204,681,000 | 24.47619 | 72 | 0.573433 | false |
daineseh/kodi-plugin.video.ted-talks-chinese | youtube_dl/extractor/tnaflix.py | 10 | 11477 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
float_or_none,
int_or_none,
parse_duration,
str_to_int,
xpath_text,
)
class TNAFlixNetworkBaseIE(InfoExtractor):
# May be overridden in descendants if necessary
_CONFIG_REGEX = [
r'flashvars\.config\s*=\s*escape\("([^"]+)"',
r'<input[^>]+name="config\d?" value="([^"]+)"',
]
_TITLE_REGEX = r'<input[^>]+name="title" value="([^"]+)"'
_DESCRIPTION_REGEX = r'<input[^>]+name="description" value="([^"]+)"'
_UPLOADER_REGEX = r'<input[^>]+name="username" value="([^"]+)"'
_VIEW_COUNT_REGEX = None
_COMMENT_COUNT_REGEX = None
_AVERAGE_RATING_REGEX = None
_CATEGORIES_REGEX = r'<li[^>]*>\s*<span[^>]+class="infoTitle"[^>]*>Categories:</span>\s*<span[^>]+class="listView"[^>]*>(.+?)</span>\s*</li>'
def _extract_thumbnails(self, flix_xml):
def get_child(elem, names):
for name in names:
child = elem.find(name)
if child is not None:
return child
timeline = get_child(flix_xml, ['timeline', 'rolloverBarImage'])
if timeline is None:
return
pattern_el = get_child(timeline, ['imagePattern', 'pattern'])
if pattern_el is None or not pattern_el.text:
return
first_el = get_child(timeline, ['imageFirst', 'first'])
last_el = get_child(timeline, ['imageLast', 'last'])
if first_el is None or last_el is None:
return
first_text = first_el.text
last_text = last_el.text
if not first_text.isdigit() or not last_text.isdigit():
return
first = int(first_text)
last = int(last_text)
if first > last:
return
width = int_or_none(xpath_text(timeline, './imageWidth', 'thumbnail width'))
height = int_or_none(xpath_text(timeline, './imageHeight', 'thumbnail height'))
return [{
'url': self._proto_relative_url(pattern_el.text.replace('#', compat_str(i)), 'http:'),
'width': width,
'height': height,
} for i in range(first, last + 1)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id') if 'display_id' in mobj.groupdict() else video_id
webpage = self._download_webpage(url, display_id)
cfg_url = self._proto_relative_url(self._html_search_regex(
self._CONFIG_REGEX, webpage, 'flashvars.config', default=None), 'http:')
if not cfg_url:
inputs = self._hidden_inputs(webpage)
cfg_url = 'https://cdn-fck.tnaflix.com/tnaflix/%s.fid?key=%s' % (inputs['vkey'], inputs['nkey'])
cfg_xml = self._download_xml(
cfg_url, display_id, 'Downloading metadata',
transform_source=fix_xml_ampersands)
formats = []
def extract_video_url(vl):
return re.sub('speed=\d+', 'speed=', vl.text)
video_link = cfg_xml.find('./videoLink')
if video_link is not None:
formats.append({
'url': extract_video_url(video_link),
'ext': xpath_text(cfg_xml, './videoConfig/type', 'type', default='flv'),
})
for item in cfg_xml.findall('./quality/item'):
video_link = item.find('./videoLink')
if video_link is None:
continue
res = item.find('res')
format_id = None if res is None else res.text
height = int_or_none(self._search_regex(
r'^(\d+)[pP]', format_id, 'height', default=None))
formats.append({
'url': self._proto_relative_url(extract_video_url(video_link), 'http:'),
'format_id': format_id,
'height': height,
})
self._sort_formats(formats)
thumbnail = self._proto_relative_url(
xpath_text(cfg_xml, './startThumb', 'thumbnail'), 'http:')
thumbnails = self._extract_thumbnails(cfg_xml)
title = self._html_search_regex(
self._TITLE_REGEX, webpage, 'title') if self._TITLE_REGEX else self._og_search_title(webpage)
age_limit = self._rta_search(webpage) or 18
duration = parse_duration(self._html_search_meta(
'duration', webpage, 'duration', default=None))
def extract_field(pattern, name):
return self._html_search_regex(pattern, webpage, name, default=None) if pattern else None
description = extract_field(self._DESCRIPTION_REGEX, 'description')
uploader = extract_field(self._UPLOADER_REGEX, 'uploader')
view_count = str_to_int(extract_field(self._VIEW_COUNT_REGEX, 'view count'))
comment_count = str_to_int(extract_field(self._COMMENT_COUNT_REGEX, 'comment count'))
average_rating = float_or_none(extract_field(self._AVERAGE_RATING_REGEX, 'average rating'))
categories_str = extract_field(self._CATEGORIES_REGEX, 'categories')
categories = [c.strip() for c in categories_str.split(',')] if categories_str is not None else []
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'thumbnails': thumbnails,
'duration': duration,
'age_limit': age_limit,
'uploader': uploader,
'view_count': view_count,
'comment_count': comment_count,
'average_rating': average_rating,
'categories': categories,
'formats': formats,
}
class TNAFlixNetworkEmbedIE(TNAFlixNetworkBaseIE):
_VALID_URL = r'https?://player\.(?:tna|emp)flix\.com/video/(?P<id>\d+)'
_TITLE_REGEX = r'<title>([^<]+)</title>'
_TESTS = [{
'url': 'https://player.tnaflix.com/video/6538',
'info_dict': {
'id': '6538',
'display_id': '6538',
'ext': 'mp4',
'title': 'Educational xxx video',
'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://player.empflix.com/video/33051',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [url for _, url in re.findall(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.(?:tna|emp)flix\.com/video/\d+)\1',
webpage)]
class TNAFlixIE(TNAFlixNetworkBaseIE):
_VALID_URL = r'https?://(?:www\.)?tnaflix\.com/[^/]+/(?P<display_id>[^/]+)/video(?P<id>\d+)'
_TITLE_REGEX = r'<title>(.+?) - TNAFlix Porn Videos</title>'
_DESCRIPTION_REGEX = r'<meta[^>]+name="description"[^>]+content="([^"]+)"'
_UPLOADER_REGEX = r'<i>\s*Verified Member\s*</i>\s*<h1>(.+?)</h1>'
_CATEGORIES_REGEX = r'(?s)<span[^>]*>Categories:</span>(.+?)</div>'
_TESTS = [{
# anonymous uploader, no categories
'url': 'http://www.tnaflix.com/porn-stars/Carmella-Decesare-striptease/video553878',
'md5': '7e569419fe6d69543d01e6be22f5f7c4',
'info_dict': {
'id': '553878',
'display_id': 'Carmella-Decesare-striptease',
'ext': 'mp4',
'title': 'Carmella Decesare - striptease',
'thumbnail': 're:https?://.*\.jpg$',
'duration': 91,
'age_limit': 18,
'categories': ['Porn Stars'],
}
}, {
# non-anonymous uploader, categories
'url': 'https://www.tnaflix.com/teen-porn/Educational-xxx-video/video6538',
'md5': 'fcba2636572895aba116171a899a5658',
'info_dict': {
'id': '6538',
'display_id': 'Educational-xxx-video',
'ext': 'flv',
'title': 'Educational xxx video',
'description': 'md5:b4fab8f88a8621c8fabd361a173fe5b8',
'thumbnail': 're:https?://.*\.jpg$',
'duration': 164,
'age_limit': 18,
'uploader': 'bobwhite39',
'categories': ['Amateur Porn', 'Squirting Videos', 'Teen Girls 18+'],
}
}, {
'url': 'https://www.tnaflix.com/amateur-porn/bunzHD-Ms.Donk/video358632',
'only_matching': True,
}]
class EMPFlixIE(TNAFlixNetworkBaseIE):
_VALID_URL = r'https?://(?:www\.)?empflix\.com/videos/(?P<display_id>.+?)-(?P<id>[0-9]+)\.html'
_UPLOADER_REGEX = r'<span[^>]+class="infoTitle"[^>]*>Uploaded By:</span>(.+?)</li>'
_TESTS = [{
'url': 'http://www.empflix.com/videos/Amateur-Finger-Fuck-33051.html',
'md5': 'b1bc15b6412d33902d6e5952035fcabc',
'info_dict': {
'id': '33051',
'display_id': 'Amateur-Finger-Fuck',
'ext': 'mp4',
'title': 'Amateur Finger Fuck',
'description': 'Amateur solo finger fucking.',
'thumbnail': 're:https?://.*\.jpg$',
'duration': 83,
'age_limit': 18,
'uploader': 'cwbike',
'categories': ['Amateur', 'Anal', 'Fisting', 'Home made', 'Solo'],
}
}, {
'url': 'http://www.empflix.com/videos/[AROMA][ARMD-718]-Aoi-Yoshino-Sawa-25826.html',
'only_matching': True,
}]
class MovieFapIE(TNAFlixNetworkBaseIE):
_VALID_URL = r'https?://(?:www\.)?moviefap\.com/videos/(?P<id>[0-9a-f]+)/(?P<display_id>[^/]+)\.html'
_VIEW_COUNT_REGEX = r'<br>Views\s*<strong>([\d,.]+)</strong>'
_COMMENT_COUNT_REGEX = r'<span[^>]+id="comCount"[^>]*>([\d,.]+)</span>'
_AVERAGE_RATING_REGEX = r'Current Rating\s*<br>\s*<strong>([\d.]+)</strong>'
_CATEGORIES_REGEX = r'(?s)<div[^>]+id="vid_info"[^>]*>\s*<div[^>]*>.+?</div>(.*?)<br>'
_TESTS = [{
# normal, multi-format video
'url': 'http://www.moviefap.com/videos/be9867c9416c19f54a4a/experienced-milf-amazing-handjob.html',
'md5': '26624b4e2523051b550067d547615906',
'info_dict': {
'id': 'be9867c9416c19f54a4a',
'display_id': 'experienced-milf-amazing-handjob',
'ext': 'mp4',
'title': 'Experienced MILF Amazing Handjob',
'description': 'Experienced MILF giving an Amazing Handjob',
'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
'uploader': 'darvinfred06',
'view_count': int,
'comment_count': int,
'average_rating': float,
'categories': ['Amateur', 'Masturbation', 'Mature', 'Flashing'],
}
}, {
# quirky single-format case where the extension is given as fid, but the video is really an flv
'url': 'http://www.moviefap.com/videos/e5da0d3edce5404418f5/jeune-couple-russe.html',
'md5': 'fa56683e291fc80635907168a743c9ad',
'info_dict': {
'id': 'e5da0d3edce5404418f5',
'display_id': 'jeune-couple-russe',
'ext': 'flv',
'title': 'Jeune Couple Russe',
'description': 'Amateur',
'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
'uploader': 'whiskeyjar',
'view_count': int,
'comment_count': int,
'average_rating': float,
'categories': ['Amateur', 'Teen'],
}
}]
| gpl-2.0 | -8,878,899,810,516,388,000 | 36.877888 | 145 | 0.541431 | false |
alhashash/odoo | addons/hr_holidays/report/holidays_summary_report.py | 7 | 7334 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
class HrHolidaySummaryReport(osv.AbstractModel):
_name = 'report.hr_holidays.report_holidayssummary'
def _get_header_info(self, start_date, holiday_type):
return {
'start_date': datetime.strptime(start_date, DEFAULT_SERVER_DATE_FORMAT).strftime('%Y-%m-%d'),
'end_date': (datetime.strptime(start_date, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(days=59)).strftime('%Y-%m-%d'),
'holiday_type': 'Confirmed and Approved' if holiday_type == 'both' else holiday_type
}
def _get_day(self, start_date):
res = []
start_date = datetime.strptime(start_date, DEFAULT_SERVER_DATE_FORMAT)
for x in range(0, 60):
color = '#ababab' if start_date.strftime('%a') == 'Sat' or start_date.strftime('%a') == 'Sun' else ''
res.append({'day_str': start_date.strftime('%a'), 'day': start_date.day , 'color': color})
start_date = start_date + relativedelta(days=1)
return res
def _get_months(self, start_date):
# it works for geting month name between two dates.
res = []
start_date = datetime.strptime(start_date, DEFAULT_SERVER_DATE_FORMAT)
end_date = start_date + relativedelta(days=59)
while start_date <= end_date:
last_date = start_date + relativedelta(day=1, months=+1, days=-1)
if last_date > end_date:
last_date = end_date
month_days = (last_date - start_date).days + 1
res.append({'month_name': start_date.strftime('%B'), 'days': month_days})
start_date += relativedelta(day=1, months=+1)
return res
def _get_leaves_summary(self, cr, uid, ids, start_date, empid, holiday_type, context=None):
res = []
count = 0
start_date = datetime.strptime(start_date, DEFAULT_SERVER_DATE_FORMAT)
end_date = start_date + relativedelta(days=59)
for index in range(0, 60):
current = start_date + timedelta(index)
res.append({'day': current.day, 'color': ''})
if current.strftime('%a') == 'Sat' or current.strftime('%a') == 'Sun':
res[index]['color'] = '#ababab'
# count and get leave summary details.
holidays_obj = self.pool['hr.holidays']
holiday_type = ['confirm','validate'] if holiday_type == 'both' else ['confirm'] if holiday_type == 'Confirmed' else ['validate']
holidays_ids = holidays_obj.search(cr, uid, [('employee_id', '=', empid), ('state', 'in', holiday_type), ('type', '=', 'remove'), ('date_from', '<=', str(end_date)), ('date_to', '>=', str(start_date))], context=context)
for holiday in holidays_obj.browse(cr, uid, holidays_ids, context=context):
date_from = datetime.strptime(holiday.date_from, DEFAULT_SERVER_DATETIME_FORMAT)
date_to = datetime.strptime(holiday.date_to, DEFAULT_SERVER_DATETIME_FORMAT)
for index in range(0, ((date_to - date_from).days + 1)):
if date_from >= start_date and date_from <= end_date:
res[(date_from-start_date).days]['color'] = holiday.holiday_status_id.color_name
count+=1
date_from += timedelta(1)
self.sum = count
return res
def _get_data_from_report(self, cr, uid, ids, data, context=None):
res = []
emp_obj = self.pool['hr.employee']
department_obj = self.pool['hr.department']
if 'depts' in data:
for department in department_obj.browse(cr, uid, data['depts'], context=context):
res.append({'dept' : department.name, 'data': [], 'color': self._get_day(data['date_from'])})
employee_ids = emp_obj.search(cr, uid, [('department_id', '=', department.id)], context=context)
employees = emp_obj.browse(cr, uid, employee_ids, context=context)
for emp in employees:
res[len(res)-1]['data'].append({
'emp': emp.name,
'display': self._get_leaves_summary(cr, uid, ids, data['date_from'], emp.id, data['holiday_type'], context=context),
'sum': self.sum
})
elif 'emp' in data:
employees = emp_obj.browse(cr, uid, data['emp'], context=context)
res.append({'data':[]})
for emp in employees:
res[0]['data'].append({
'emp': emp.name,
'display': self._get_leaves_summary(cr, uid, ids, data['date_from'], emp.id, data['holiday_type'], context=context),
'sum': self.sum
})
return res
def _get_holidays_status(self, cr, uid, ids, context=None):
res = []
holiday_obj = self.pool['hr.holidays.status']
holiday_ids = holiday_obj.search(cr, uid, [], context=context)
holiday_datas = holiday_obj.browse(cr, uid, holiday_ids, context=context)
for holiday in holiday_datas:
res.append({'color': holiday.color_name, 'name': holiday.name})
return res
def render_html(self, cr, uid, ids, data=None, context=None):
report_obj = self.pool['report']
holidays_report = report_obj._get_report_from_name(cr, uid, 'hr_holidays.report_holidayssummary')
selected_records = self.pool['hr.holidays'].browse(cr, uid, ids, context=context)
docargs = {
'doc_ids': ids,
'doc_model': holidays_report.model,
'docs': selected_records,
'get_header_info': self._get_header_info(data['form']['date_from'], data['form']['holiday_type']),
'get_day': self._get_day(data['form']['date_from']),
'get_months': self._get_months(data['form']['date_from']),
'get_data_from_report': self._get_data_from_report(cr, uid, ids, data['form'], context=context),
'get_holidays_status': self._get_holidays_status(cr, uid, ids, context=context),
}
return report_obj.render(cr, uid, ids, 'hr_holidays.report_holidayssummary', docargs, context=context) | agpl-3.0 | -5,709,277,890,058,954,000 | 53.333333 | 227 | 0.585901 | false |
selahssea/ggrc-core | src/ggrc/converters/handlers/custom_attribute.py | 4 | 7690 | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Handlers used for custom attribute columns."""
from dateutil.parser import parse
from sqlalchemy import and_
from ggrc import models
from ggrc.converters import errors
from ggrc.converters.handlers import handlers
_types = models.CustomAttributeDefinition.ValidTypes
class CustomAttributeColumHandler(handlers.TextColumnHandler):
"""Custom attribute column handler
This is a handler for all types of custom attribute column. It works with
any custom attribute definition and with mondatory flag on or off.
"""
_type_handlers = {
_types.TEXT: lambda self: self.get_text_value(),
_types.DATE: lambda self: self.get_date_value(),
_types.DROPDOWN: lambda self: self.get_dropdown_value(),
_types.CHECKBOX: lambda self: self.get_checkbox_value(),
_types.RICH_TEXT: lambda self: self.get_rich_text_value(),
_types.MAP: lambda self: self.get_person_value(),
}
def set_obj_attr(self):
"""Set object attribute method should do nothing for custom attributes.
CA values set in insert_object() method.
"""
if self.value is None:
return
cav = self._get_or_create_ca()
cav.attribute_value = self.value
if isinstance(cav.attribute_value, models.mixins.Identifiable):
obj = cav.attribute_value
cav.attribute_value = obj.__class__.__name__
cav.attribute_object_id = obj.id
def parse_item(self):
"""Parse raw value from csv file
Returns:
CustomAttributeValue with the correct definition type and value.
"""
self.definition = self.get_ca_definition()
if self.definition is None:
self.add_warning(errors.INVALID_ATTRIBUTE_WARNING,
column_name=self.display_name)
return None
type_ = self.definition.attribute_type.split(":")[0]
value_handler = self._type_handlers[type_]
return value_handler(self)
def get_value(self):
"""Return the value of the custom attrbute field.
Returns:
Text representation if the custom attribute value if it exists, otherwise
None.
"""
definition = self.get_ca_definition()
if not definition:
return ""
for value in self.row_converter.obj.custom_attribute_values:
if value.custom_attribute_id == definition.id:
if value.custom_attribute.attribute_type.startswith("Map:"):
if value.attribute_object_id:
obj = value.attribute_object
return getattr(obj, "email", getattr(obj, "slug", None))
elif value.custom_attribute.attribute_type == _types.CHECKBOX:
attr_val = value.attribute_value if value.attribute_value else u"0"
attr_val = int(attr_val)
return str(bool(attr_val)).upper()
else:
return value.attribute_value
return None
def _get_or_create_ca(self):
"""Get a CA value object for the current definition.
This function returns a custom attribute value object that already existed
or creates a new one.
Returns:
custom attribute value object.
"""
ca_definition = self.get_ca_definition()
if not self.row_converter.obj or not ca_definition:
return None
for ca_value in self.row_converter.obj.custom_attribute_values:
if ca_value.custom_attribute_id == ca_definition.id:
return ca_value
ca_value = models.CustomAttributeValue(
custom_attribute=ca_definition,
attributable=self.row_converter.obj,
)
return ca_value
def insert_object(self):
"""Add custom attribute objects to db session."""
def get_date_value(self):
"""Get date value from input string date."""
if not self.mandatory and self.raw_value == "":
return None # ignore empty fields
value = None
try:
value = parse(self.raw_value).strftime(
models.CustomAttributeValue.DATE_FORMAT_ISO,
)
except (TypeError, ValueError):
self.add_warning(errors.WRONG_VALUE, column_name=self.display_name)
if self.mandatory and value is None:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return value
def get_checkbox_value(self):
"""Get boolean value for checkbox fields."""
if not self.mandatory and self.raw_value == "":
return None # ignore empty fields
value = self.raw_value.lower() in ("yes", "true")
if self.raw_value.lower() not in ("yes", "true", "no", "false"):
self.add_warning(errors.WRONG_VALUE, column_name=self.display_name)
value = None
if self.mandatory and value is None:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return value
def get_dropdown_value(self):
"""Get valid value of the dropdown field."""
choices_list = self.definition.multi_choice_options.split(",")
valid_choices = [val.strip() for val in choices_list]
choice_map = {choice.lower(): choice for choice in valid_choices}
value = choice_map.get(self.raw_value.lower())
if value is None and self.raw_value != "":
self.add_warning(errors.WRONG_VALUE, column_name=self.display_name)
if self.mandatory and value is None:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return value
def get_text_value(self):
if not self.mandatory and self.raw_value == "":
return None # ignore empty fields
value = self.clean_whitespaces(self.raw_value)
if self.mandatory and not value:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return value
def get_rich_text_value(self):
if not self.mandatory and self.raw_value == "":
return None # ignore empty fields
if self.mandatory and not self.raw_value:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return self.raw_value
def get_person_value(self):
"""Fetch a person based on the email text in column.
Returns:
Person model instance
"""
if not self.mandatory and self.raw_value == "":
return None # ignore empty fields
if self.mandatory and not self.raw_value:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return
value = models.Person.query.filter_by(email=self.raw_value).first()
if self.mandatory and not value:
self.add_error(errors.WRONG_VALUE, column_name=self.display_name)
return value
def get_ca_definition(self):
"""Get custom attribute definition."""
cache = self.row_converter.block_converter.get_ca_definitions_cache()
return cache.get((None, self.display_name))
class ObjectCaColumnHandler(CustomAttributeColumHandler):
"""Handler for object level custom attributes."""
def set_value(self):
pass
def set_obj_attr(self):
"""Parse item and set the current value.
This is a hack to get set_value on this handler called after all other
values have already been set.
"""
if self.dry_run:
return
self.value = self.parse_item()
super(ObjectCaColumnHandler, self).set_obj_attr()
def get_ca_definition(self):
"""Get custom attribute definition for a specific object."""
if self.row_converter.obj.id is None:
return None
cad = models.CustomAttributeDefinition
cache = self.row_converter.block_converter.get_ca_definitions_cache()
definition = cache.get((self.row_converter.obj.id, self.display_name))
if not definition:
definition = cad.query.filter(and_(
cad.definition_id == self.row_converter.obj.id,
cad.title == self.display_name
)).first()
return definition
| apache-2.0 | 8,865,814,761,532,145,000 | 33.954545 | 79 | 0.680104 | false |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v3.8/gnmt-tpuv3-8/code/gnmt/model/t2t/tensor2tensor/models/research/universal_transformer_util.py | 3 | 66281 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Universal Transformer.
The Universal Transformer is based on the popular encoder-decoder architecture.
However, as opposed to a fixed stack of distinct layers (as is usually the case
for most popular neural sequence models), the Universal Transformer is
recurrent "in depth", and repeatedly applies the same series of functions with
the same parameters to all elements of the sequence in parallel, revising their
representations with every step. The encoder and decoder have the same
recurrent structure, but the decoder additionally consumes the final encoder
representations for each position. Like the Transformer, the Universal
Transformer is autoregressive. Trained using teacher-forcing, at generation
time it produces its output one position at a time, with the decoder consuming
the previously produced output positions.
Given an input sequence of length m, we start with a matrix whose rows are the
d-dimensional embeddings of the symbols at each position of the sequence.
The Universal Transformer then iteratively computes representation of the input
at each step by applying the multiheaded dot-product self-attention mechanism,
followed by a recurrent transition function. We also add residual connections
around each of these function blocks and apply dropout and layer normalization.
The recurrent transition function in fact controls how steps communicate with
each other in depth. For instance, the recurrent transition, can be a simple
identity function which passes the output of a step as the input to next step.
Or it can be an LSTM (filliped vertically) next to the transformer which
controls how state of the model changes in depth.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.models import transformer
from tensor2tensor.utils import expert_utils
import tensorflow as tf
def universal_transformer_encoder(encoder_input,
encoder_self_attention_bias,
hparams,
name="encoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True):
"""Universal Transformer encoder function.
Prepares all the arguments and the inputs and passes it to a
universal_transformer_layer to encode the encoder_input.
Args:
encoder_input: a Tensor
encoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This must either be
passed in, which we do for "packed" datasets, or inferred from
encoder_self_attention_bias. The knowledge about padding is used
for pad_remover(efficiency) and to mask out padding in convoltutional
layers.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
Returns:
y: a Tensors as the output of the encoder
extra_output: which can be used to pass extra information to the body
"""
x = encoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
with tf.variable_scope(name):
if nonpadding is not None:
padding = 1.0 - nonpadding
else:
padding = common_attention.attention_bias_to_padding(
encoder_self_attention_bias)
nonpadding = 1.0 - padding
pad_remover = None
if hparams.use_pad_remover and not common_layers.is_xla_compiled():
pad_remover = expert_utils.PadRemover(padding)
ffn_unit = functools.partial(
transformer_encoder_ffn_unit,
hparams=hparams,
nonpadding_mask=nonpadding,
pad_remover=pad_remover)
attention_unit = functools.partial(
transformer_encoder_attention_unit,
hparams=hparams,
encoder_self_attention_bias=encoder_self_attention_bias,
attention_dropout_broadcast_dims=attention_dropout_broadcast_dims,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary)
x, extra_output = universal_transformer_layer(
x, hparams, ffn_unit, attention_unit, pad_remover=pad_remover)
return common_layers.layer_preprocess(x, hparams), extra_output
def universal_transformer_decoder(decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
name="decoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True):
"""Universal Transformer decoder function.
Prepares all the arguments and the inputs and passes it to a
core_universal_transformer_layer to decoder.
Args:
decoder_input: a Tensor
encoder_output: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This is used
to mask out padding in convoltutional layers. We generally only
need this mask for "packed" datasets, because for ordinary datasets,
no padding is ever followed by nonpadding.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
Returns:
y: the output Tensors
extra_output: which can be used to pass extra information to the body
"""
x = decoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
with tf.variable_scope(name):
ffn_unit = functools.partial(
transformer_decoder_ffn_unit,
hparams=hparams,
nonpadding_mask=nonpadding)
attention_unit = functools.partial(
transformer_decoder_attention_unit,
hparams=hparams,
encoder_output=encoder_output,
decoder_self_attention_bias=decoder_self_attention_bias,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
attention_dropout_broadcast_dims=attention_dropout_broadcast_dims,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary)
x, extra_output = universal_transformer_layer(
x, hparams, ffn_unit, attention_unit)
return common_layers.layer_preprocess(x, hparams), extra_output
def universal_transformer_layer(x,
hparams,
ffn_unit,
attention_unit,
pad_remover=None):
"""Core function applying the universal transformer layer.
Args:
x: input
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
the output tensor, extra output (can be memory, ponder time, etc.)
Raises:
ValueError: Unknown recurrence type
"""
def add_vanilla_transformer_layer(x, num_layers):
"""Passes the input through num_layers of vanilla transformer layers.
Args:
x: input
num_layers: number of layers
Returns:
output of vanilla_transformer_layer
"""
if hparams.add_position_timing_signal:
# In case of add_position_timing_signal=true, we set hparams.pos=None
# and add position timing signal at the beginning of each step, so for
# the vanilla transformer, we need to add timing signal here.
x = common_attention.add_timing_signal_1d(x)
for layer in range(num_layers):
with tf.variable_scope("layer_%d" % layer):
x = ffn_unit(attention_unit(x))
return x
with tf.variable_scope("universal_transformer_%s" % hparams.recurrence_type):
if hparams.mix_with_transformer == "before_ut":
x = add_vanilla_transformer_layer(x, hparams.num_mixedin_layers)
if hparams.recurrence_type == "act":
return universal_transformer_act(x, hparams, ffn_unit, attention_unit)
else: # for all the other recurrency types with fixed number of steps
ut_function, initializer = get_ut_layer(x, hparams, ffn_unit,
attention_unit, pad_remover)
output, _, extra_output = tf.foldl(
ut_function, tf.range(hparams.num_rec_steps), initializer=initializer)
# Right now, this is only possible when the transition function is an lstm
if (hparams.recurrence_type == "lstm" and
hparams.get("use_memory_as_final_state", False)):
output = extra_output
if hparams.mix_with_transformer == "after_ut":
output = add_vanilla_transformer_layer(output, hparams.num_mixedin_layers)
return output, extra_output
def get_ut_layer(x,
hparams,
ffn_unit,
attention_unit,
pad_remover=None):
"""Provides the function that is used in universal transforemr steps.
Args:
x: input
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
ut_function and the ut_initializer
Raises:
ValueError: Unknown recurrence type
"""
if hparams.recurrence_type == "basic":
ut_initializer = (x, x, x) # (state, input, memory)
ut_function = functools.partial(
universal_transformer_basic,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit)
elif hparams.recurrence_type == "highway":
ut_initializer = (x, x, x) # (state, input, memory)
ut_function = functools.partial(
universal_transformer_highway,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
elif hparams.recurrence_type == "skip":
ut_initializer = (x, x, x) # (state, input, memory)
ut_function = functools.partial(
universal_transformer_skip,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
elif hparams.recurrence_type == "dwa":
# memory contains the original input + all the states
memory_size = hparams.num_rec_steps + 1
# prepare initializer:
memory_empty = tf.zeros([memory_size] + common_layers.shape_list(x))
# filling the first slot with the original input
memory = fill_memory_slot(memory_empty, x, 0)
ut_initializer = (x, x, memory) # (state, input, memory)
ut_function = functools.partial(
universal_transformer_depthwise_attention,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit)
elif hparams.recurrence_type == "gru":
ut_initializer = (x, x, x) # (state, input, memory)
ut_function = functools.partial(
universal_transformer_with_gru_as_transition_function,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
elif hparams.recurrence_type == "lstm":
memory = tf.zeros(common_layers.shape_list(x))
ut_initializer = (x, x, memory) # (state, input, memory)
ut_function = functools.partial(
universal_transformer_with_lstm_as_transition_function,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
else:
raise ValueError("Unknown recurrence type: %s" % hparams.recurrence_type)
return ut_function, ut_initializer
def transformer_encoder_ffn_unit(x,
hparams,
nonpadding_mask=None,
pad_remover=None):
"""Applies a feed-forward function which is parametrised for encoding.
Args:
x: input
hparams: model hyper-parameters
nonpadding_mask: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This is used
to mask out padding in convoltutional layers. We generally only
need this mask for "packed" datasets, because for ordinary datasets,
no padding is ever followed by nonpadding.
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
the output tensor
"""
with tf.variable_scope("ffn"):
if hparams.transformer_ffn_type == "fc":
y = transformer.transformer_ffn_layer(
common_layers.layer_preprocess(x, hparams),
hparams,
pad_remover,
conv_padding="SAME",
nonpadding_mask=nonpadding_mask)
if hparams.transformer_ffn_type == "sepconv":
assert nonpadding_mask is not None, (
"The nonpadding_mask should be provided, otherwise the model uses "
"the leaked padding information to estimate the length!")
y = common_layers.sepconv_relu_sepconv(
common_layers.layer_preprocess(x, hparams),
filter_size=hparams.filter_size,
output_size=hparams.hidden_size,
first_kernel_size=(3, 1),
second_kernel_size=(5, 1),
padding="SAME",
nonpadding_mask=nonpadding_mask,
dropout=hparams.relu_dropout)
x = common_layers.layer_postprocess(x, y, hparams)
return x
def transformer_encoder_attention_unit(x,
hparams,
encoder_self_attention_bias,
attention_dropout_broadcast_dims,
save_weights_to=None,
make_image_summary=True):
"""Applies multihead attention function which is parametrised for encoding.
Args:
x: input
hparams: model hyper-parameters
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout
layers to save memory during training
save_weights_to: an optional dictionary to capture attention weights for
visualization; the weights tensor will be appended there under a string
key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
Returns:
the output tensor
"""
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
encoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
save_weights_to=save_weights_to,
max_relative_position=hparams.max_relative_position,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims)
x = common_layers.layer_postprocess(x, y, hparams)
return x
def transformer_decoder_ffn_unit(x,
hparams,
nonpadding_mask=None):
"""Applies a feed-forward function which is parametrised for decoding.
Args:
x: input
hparams: model hyper-parameters
nonpadding_mask: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This is used
to mask out padding in convoltutional layers. We generally only
need this mask for "packed" datasets, because for ordinary datasets,
no padding is ever followed by nonpadding.
Returns:
the output tensor
"""
with tf.variable_scope("ffn"):
if hparams.transformer_ffn_type == "fc":
y = transformer.transformer_ffn_layer(
common_layers.layer_preprocess(x, hparams),
hparams,
conv_padding="LEFT",
nonpadding_mask=nonpadding_mask)
if hparams.transformer_ffn_type == "sepconv":
y = common_layers.sepconv_relu_sepconv(
common_layers.layer_preprocess(x, hparams),
filter_size=hparams.filter_size,
output_size=hparams.hidden_size,
first_kernel_size=(3, 1),
second_kernel_size=(5, 1),
padding="LEFT",
nonpadding_mask=nonpadding_mask,
dropout=hparams.relu_dropout)
x = common_layers.layer_postprocess(x, y, hparams)
return x
def transformer_decoder_attention_unit(x,
hparams,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
attention_dropout_broadcast_dims,
save_weights_to=None,
make_image_summary=True):
"""Applies multihead attention function which is parametrised for decoding.
Args:
x: input (decoder input)
hparams: model hyper-parameters
encoder_output: Encoder representation. [batch_size, input_length,
hidden_dim]
decoder_self_attention_bias: Bias and mask weights for decoder
self-attention. [batch_size, decoder_length]
encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder
attention. [batch_size, input_length]
attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout
layers to save memory during training
save_weights_to: an optional dictionary to capture attention weights for
visualization; the weights tensor will be appended there under a string
key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
Returns:
The output tensor
"""
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
save_weights_to=save_weights_to,
max_relative_position=hparams.max_relative_position,
cache=None,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims)
x = common_layers.layer_postprocess(x, y, hparams)
if encoder_output is not None:
with tf.variable_scope("encdec_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
encoder_output,
encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims)
x = common_layers.layer_postprocess(x, y, hparams)
return x
def universal_transformer_basic(layer_inputs,
step, hparams,
ffn_unit,
attention_unit):
"""Basic Universal Transformer.
This model is pretty similar to the vanilla transformer in which weights are
shared between layers. For some tasks, this simple idea brings a
generalization that is not achievable by playing with the size of the model
or drop_out parameters in the vanilla transformer.
Args:
layer_inputs:
- state: state
step: indicates number of steps taken so far
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
layer_output:
new_state: new state
"""
state, inputs, memory = tf.unstack(layer_inputs, num=None, axis=0,
name="unstack")
state = step_preprocess(state, step, hparams)
new_state = ffn_unit(attention_unit(state))
return new_state, inputs, memory
def universal_transformer_highway(layer_inputs,
step, hparams,
ffn_unit,
attention_unit,
pad_remover=None):
"""Universal Transformer with highway connection.
It transforms the state using a block contaaining sel-attention and transition
function and wrap the whole block with a highway connection.
(the new state is a combination of the state and the transformed-state
based on cary/transform gates.)
Interesting observation:
Controlling the cary/transform gate with the original inputs works usually
better (i.e. hparams.gates_inputs="i")
Args:
layer_inputs:
- state: state
- inputs: the original embedded inputs (= inputs to the first step)
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: the original embedded inputs (= inputs to the first step)
"""
state, inputs, memory = layer_inputs
state = step_preprocess(state, step, hparams)
transformed_state = ffn_unit(attention_unit(state))
state.get_shape().assert_is_compatible_with(state.get_shape())
gate_inputs = []
if "s" in hparams.gates_inputs:
gate_inputs.append(state)
if "t" in hparams.gates_inputs:
gate_inputs.append(transformed_state)
if "i" in hparams.gates_inputs:
gate_inputs.append(inputs)
gate_ffn_layer = hparams.gate_ffn_layer
transform_gate = _ffn_layer_multi_inputs(
gate_inputs,
hparams,
ffn_layer_type=gate_ffn_layer,
name="transform",
bias_initializer=tf.constant_initializer(hparams.transform_bias_init),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=True,
postprocess=True)
if hparams.couple_carry_transform_gates:
carry_gate = tf.subtract(1.0, transform_gate, name="carry")
else:
carry_gate = _ffn_layer_multi_inputs(
gate_inputs,
hparams,
ffn_layer_type=gate_ffn_layer,
name="carry",
bias_initializer=tf.constant_initializer(-hparams.transform_bias_init),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=True,
postprocess=True)
new_state = state * carry_gate + transformed_state * transform_gate
tf.contrib.summary.scalar("highway_transform_gate_layer",
tf.reduce_mean(transform_gate))
tf.contrib.summary.scalar("highway_carry_gate_layer",
tf.reduce_mean(carry_gate))
return new_state, inputs, memory
def universal_transformer_skip(layer_inputs,
step,
hparams,
ffn_unit,
attention_unit,
pad_remover=None):
"""Universal Transformer with highway connection.
It transforms the state using attention and ffn and wrap this transformation
with a skip-all connection. (the new state is a combination of the state and
the inputs (original inputs) based on cary/transform gates.)
Observation:
Controlling the cary/transform gate with the original inputs works usually
better (i.e. hparams.gates_inputs="i")
Args:
layer_inputs:
- state: state
- inputs: the original embedded inputs (= inputs to the first step)
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: the original embedded inputs (= inputs to the first step)
"""
state, inputs, memory = layer_inputs
state = step_preprocess(state, step, hparams)
transformed_state = ffn_unit(attention_unit(state))
inputs.get_shape().assert_is_compatible_with(state.get_shape())
gate_inputs = []
if "s" in hparams.gates_inputs:
gate_inputs.append(state)
if "t" in hparams.gates_inputs:
gate_inputs.append(transformed_state)
if "i" in hparams.gates_inputs:
gate_inputs.append(inputs)
gate_ffn_layer = hparams.gate_ffn_layer
transform_gate = _ffn_layer_multi_inputs(
gate_inputs,
hparams,
ffn_layer_type=gate_ffn_layer,
name="transform",
bias_initializer=tf.constant_initializer(hparams.transform_bias_init),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=True,
postprocess=True)
if hparams.couple_carry_transform_gates:
carry_gate = tf.subtract(1.0, transform_gate, name="carry")
else:
carry_gate = _ffn_layer_multi_inputs(
gate_inputs,
hparams,
ffn_layer_type=gate_ffn_layer,
name="carry",
bias_initializer=tf.constant_initializer(-hparams.transform_bias_init),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=True,
postprocess=True)
tf.contrib.summary.scalar("skip_transform_gate_layer",
tf.reduce_mean(transform_gate))
tf.contrib.summary.scalar("skip_carry_gate_layer", tf.reduce_mean(carry_gate))
new_state = inputs * carry_gate + transformed_state * transform_gate
return new_state, inputs, memory
def universal_transformer_depthwise_attention(layer_inputs,
step, hparams,
ffn_unit,
attention_unit):
"""universal_transformer with depth-wise attention.
It uses an attention mechanism-flipped vertically-
over all the states from previous steps to generate the new_state.
Args:
layer_inputs:
- state: state
- memory: contains states from all the previous steps.
step: indicating number of steps take so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
layer_output:
new_state: new state
memory: contains states from all the previous steps.
"""
_, inputs, memory = layer_inputs
all_states = memory
# add depth signal
if hparams.depth_embedding:
all_states = add_depth_embedding(all_states)
# get the states up to the current step (non-zero part of the memory)
states_so_far = all_states[:step, :, :, :]
states_so_far_weights = tf.nn.softmax(
common_layers.dense(
states_so_far, (hparams.hidden_size if hparams.dwa_elements else 1),
activation=None,
use_bias=True),
axis=-1)
# prepare the state tensor that will be transformed
state_to_be_transformed = tf.reduce_sum(
(states_so_far * states_so_far_weights), axis=0)
state_to_be_transformed = step_preprocess(state_to_be_transformed, step,
hparams)
new_state = ffn_unit(attention_unit(state_to_be_transformed))
# add the new state to the memory
memory = fill_memory_slot(memory, new_state, step + 1)
return new_state, inputs, memory
def universal_transformer_with_gru_as_transition_function(
layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None):
"""Universal Transformer which uses a gru as transition function.
It's kind of like having a gru, filliped vertically next to the Universal
Transformer that controls the flow of the information in depth,
over different steps of the Universal Transformer.
Args:
layer_inputs:
- state: state
- inputs: not used here
- memory: not used here
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: not uesed
memory: not used
"""
state, unused_inputs, unused_memory = tf.unstack(
layer_inputs, num=None, axis=0, name="unstack")
# state (ut_state): output of the gru in the previous step
# Multi_head_attention:
assert not hparams.add_step_timing_signal # Let gru count for us!
mh_attention_input = step_preprocess(state, step, hparams)
transition_function_input = attention_unit(mh_attention_input)
# Transition Function:
if hparams.add_ffn_unit_to_the_transition_function:
transition_function_input = ffn_unit(transition_function_input)
transition_function_input = common_layers.layer_preprocess(
transition_function_input, hparams)
with tf.variable_scope("gru"):
# gru update gate: z_t = sigmoid(W_z.x_t + U_z.h_{t-1})
transition_function_update_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="update",
bias_initializer=tf.constant_initializer(1.0),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("gru_update_gate",
tf.reduce_mean(transition_function_update_gate))
# gru reset gate: r_t = sigmoid(W_r.x_t + U_r.h_{t-1})
transition_function_reset_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="reset",
bias_initializer=tf.constant_initializer(1.0),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("gru_reset_gate",
tf.reduce_mean(transition_function_reset_gate))
reset_state = transition_function_reset_gate * state
# gru_candidate_activation: h' = tanh(W_{x_t} + U (r_t h_{t-1})
transition_function_candidate = _ffn_layer_multi_inputs(
[transition_function_input, reset_state],
hparams,
name="candidate",
bias_initializer=tf.zeros_initializer(),
activation=tf.tanh,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
transition_function_output = (
(1 - transition_function_update_gate) * transition_function_input +
transition_function_update_gate * transition_function_candidate)
transition_function_output = common_layers.layer_preprocess(
transition_function_output, hparams)
return transition_function_output, unused_inputs, unused_memory
def universal_transformer_with_lstm_as_transition_function(
layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None):
"""Universal Transformer which uses a lstm as transition function.
It's kind of like having a lstm, filliped vertically next to the Universal
Transformer that controls the flow of the information in depth,
over different steps of the Universal Transformer.
Args:
layer_inputs:
- state: state
- inputs: the original embedded inputs (= inputs to the first step)
- memory: memory used in lstm.
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: the original embedded inputs (= inputs to the first step)
memory: contains information of state from all the previous steps.
"""
state, unused_inputs, memory = tf.unstack(
layer_inputs, num=None, axis=0, name="unstack")
# NOTE:
# state (ut_state): output of the lstm in the previous step
# inputs (ut_input): original input --> we don't use it here
# memory: lstm memory
# Multi_head_attention:
assert not hparams.add_step_timing_signal # Let lstm count for us!
mh_attention_input = step_preprocess(state, step, hparams)
transition_function_input = attention_unit(mh_attention_input)
# Transition Function:
if hparams.add_ffn_unit_to_the_transition_function:
transition_function_input = ffn_unit(transition_function_input)
transition_function_input = common_layers.layer_preprocess(
transition_function_input, hparams)
with tf.variable_scope("lstm"):
# lstm input gate: i_t = sigmoid(W_i.x_t + U_i.h_{t-1})
transition_function_input_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="input",
bias_initializer=tf.zeros_initializer(),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("lstm_input_gate",
tf.reduce_mean(transition_function_input_gate))
# lstm forget gate: f_t = sigmoid(W_f.x_t + U_f.h_{t-1})
transition_function_forget_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="forget",
bias_initializer=tf.zeros_initializer(),
activation=None,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
forget_bias_tensor = tf.constant(hparams.lstm_forget_bias)
transition_function_forget_gate = tf.sigmoid(
transition_function_forget_gate + forget_bias_tensor)
tf.contrib.summary.scalar("lstm_forget_gate",
tf.reduce_mean(transition_function_forget_gate))
# lstm output gate: o_t = sigmoid(W_o.x_t + U_o.h_{t-1})
transition_function_output_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="output",
bias_initializer=tf.zeros_initializer(),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("lstm_output_gate",
tf.reduce_mean(transition_function_output_gate))
# lstm input modulation
transition_function_input_modulation = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="input_modulation",
bias_initializer=tf.zeros_initializer(),
activation=tf.tanh,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
transition_function_memory = (
memory * transition_function_forget_gate +
transition_function_input_gate * transition_function_input_modulation)
transition_function_output = (
tf.tanh(transition_function_memory) * transition_function_output_gate)
transition_function_output = common_layers.layer_preprocess(
transition_function_output, hparams)
return transition_function_output, unused_inputs, transition_function_memory
def universal_transformer_act(x, hparams, ffn_unit, attention_unit):
"""ACT based models.
Implementations of all act models are based on craffel@'s cl/160711592.
Args:
x: input
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
the output tensor, (ponder_times, remainders)
Raises:
ValueError: Unknown act type
"""
# TODO(dehghani): Enable pad_remover for the act computations.
if hparams.act_type == "basic":
return universal_transformer_act_basic(
x, hparams, ffn_unit, attention_unit)
elif hparams.act_type == "accumulated":
return universal_transformer_act_accumulated(
x, hparams, ffn_unit, attention_unit)
elif hparams.act_type == "global":
return universal_transformer_act_global(
x, hparams, ffn_unit, attention_unit)
elif hparams.act_type == "random":
return universal_transformer_act_random(
x, hparams, ffn_unit, attention_unit)
else:
raise ValueError("Unknown act type: %s" % hparams.act_type)
def universal_transformer_act_basic(x, hparams, ffn_unit, attention_unit):
"""Basic universal_transformer with ACT based on remainder-distribution ACT.
Args:
x: input
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
the output tensor, (ponder_times, remainders)
"""
state = x
act_max_steps = hparams.act_max_steps
threshold = 1.0 - hparams.act_epsilon
batch_size = tf.shape(state)[0]
length = tf.shape(state)[1]
# Halting probabilities (p_t^n in the paper)
halting_probability = tf.zeros(
(
batch_size,
length,
), name="halting_probability")
# Remainders (R(t) in the paper)
remainders = tf.zeros(
(
batch_size,
length,
), name="remainder")
# Number of updates performed (N(t) in the paper)
n_updates = tf.zeros(
(
batch_size,
length,
), name="n_updates")
# Previous cell states (s_t in the paper)
previous_state = tf.zeros_like(state, name="previous_state")
step = tf.constant(0, dtype=tf.int32)
def ut_function(state, step, halting_probability, remainders, n_updates,
previous_state):
"""implements act (position-wise halting).
Args:
state: 3-D Tensor: [batch_size, length, channel]
step: indicates number of steps taken so far
halting_probability: halting probability
remainders: act remainders
n_updates: act n_updates
previous_state: previous state
Returns:
transformed_state: transformed state
step: step+1
halting_probability: halting probability
remainders: act remainders
n_updates: act n_updates
new_state: new state
"""
state_shape = state.get_shape()
state = step_preprocess(state, step, hparams)
with tf.variable_scope("sigmoid_activation_for_pondering"):
p = common_layers.dense(
state,
1,
activation=tf.nn.sigmoid,
use_bias=True,
bias_initializer=tf.constant_initializer(
hparams.act_halting_bias_init))
p = tf.squeeze(p, axis=-1)
# Mask for inputs which have not halted yet
still_running = tf.cast(tf.less(halting_probability, 1.0), tf.float32)
# Mask of inputs which halted at this step
new_halted = tf.cast(
tf.greater(halting_probability + p * still_running, threshold),
tf.float32) * still_running
# Mask of inputs which haven't halted, and didn't halt this step
still_running = tf.cast(
tf.less_equal(halting_probability + p * still_running, threshold),
tf.float32) * still_running
# Add the halting probability for this step to the halting
# probabilities for those input which haven't halted yet
halting_probability += p * still_running
# Compute remainders for the inputs which halted at this step
remainders += new_halted * (1 - halting_probability)
# Add the remainders to those inputs which halted at this step
halting_probability += new_halted * remainders
# Increment n_updates for all inputs which are still running
n_updates += still_running + new_halted
# Compute the weight to be applied to the new state and output
# 0 when the input has already halted
# p when the input hasn't halted yet
# the remainders when it halted this step
update_weights = tf.expand_dims(p * still_running + new_halted * remainders,
-1)
# apply transformation on the state
transformed_state = ffn_unit(attention_unit(state))
# update running part in the weighted state and keep the rest
new_state = ((transformed_state * update_weights) +
(previous_state * (1 - update_weights)))
# remind TensorFlow of everything's shape
transformed_state.set_shape(state_shape)
for x in [halting_probability, remainders, n_updates]:
x.set_shape([
state_shape[0],
state_shape[1],
])
new_state.set_shape(state_shape)
step += 1
return (transformed_state, step, halting_probability, remainders, n_updates,
new_state)
# While loop stops when this predicate is FALSE.
# Ie all (probability < 1-eps AND counter < N) are false.
def should_continue(u0, u1, halting_probability, u2, n_updates, u3):
del u0, u1, u2, u3
return tf.reduce_any(
tf.logical_and(
tf.less(halting_probability, threshold),
tf.less(n_updates, act_max_steps)))
# Do while loop iterations until predicate above is false.
(_, _, _, remainder, n_updates, new_state) = tf.while_loop(
should_continue, ut_function,
(state, step, halting_probability, remainders, n_updates, previous_state))
ponder_times = n_updates
remainders = remainder
tf.contrib.summary.scalar("ponder_times", tf.reduce_mean(ponder_times))
return new_state, (ponder_times, remainders)
def universal_transformer_act_accumulated(x, hparams, ffn_unit, attention_unit):
"""The UTAct layer where the final state is the accumulation of all states.
(similar to the main ACT paper: --> check the issue of differentiability)
Args:
x: input
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
the output tensor, (ponder_times, remainders)
"""
state = x
act_max_steps = hparams.act_max_steps
threshold = 1.0 - hparams.act_epsilon
batch_size = tf.shape(state)[0]
length = tf.shape(state)[1]
# Halting probabilities (p_t^n in the paper)
halting_probability = tf.zeros(
(
batch_size,
length,
), name="halting_probability")
# Remainders (R(t) in the paper)
remainders = tf.zeros(
(
batch_size,
length,
), name="remainder")
# Number of updates performed (N(t) in the paper)
n_updates = tf.zeros(
(
batch_size,
length,
), name="n_updates")
# Accumulated cell states (s_t in the paper)
accumulated_state = tf.zeros_like(state, name="previous_state")
step = tf.constant(0, dtype=tf.int32)
def ut_function(state, step, halting_probability, remainders, n_updates,
accumulated_state):
"""Position-wise act.
Args:
state: 3-D Tensor: [batch_size, length, channel]
step: indicates number of steps taken so far
halting_probability: halting probability
remainders: act remainders
n_updates: act n_updates
accumulated_state: accumulated state
Returns:
transformed_state: transformed state
step: step+1
halting_probability: halting probability
remainders: act remainders
n_updates: act n_updates
accumulated_state: accumulated state
"""
state_shape = state.get_shape()
state = step_preprocess(state, step, hparams)
with tf.variable_scope("sigmoid_activation_for_pondering"):
p = common_layers.dense(
state,
1,
activation=tf.nn.sigmoid,
use_bias=True,
bias_initializer=tf.constant_initializer(
hparams.act_halting_bias_init))
p = tf.squeeze(p, axis=-1)
# Mask for inputs which have not halted yet
still_running = tf.cast(tf.less(halting_probability, 1.0), tf.float32)
# Mask of inputs which halted at this step
new_halted = tf.cast(
tf.greater(halting_probability + p * still_running, threshold),
tf.float32) * still_running
# Mask of inputs which haven't halted, and didn't halt this step
still_running = tf.cast(
tf.less_equal(halting_probability + p * still_running, threshold),
tf.float32) * still_running
# Add the halting probability for this step to the halting
# probabilities for those input which haven't halted yet
halting_probability += p * still_running
# Compute remainders for the inputs which halted at this step
remainders += new_halted * (1 - halting_probability)
# Add the remainders to those inputs which halted at this step
halting_probability += new_halted * remainders
# Increment n_updates for all inputs which are still running
n_updates += still_running + new_halted
# Compute the weight to be applied to the new state and output
# 0 when the input has already halted
# p when the input hasn't halted yet
# the remainders when it halted this step
update_weights = tf.expand_dims(p * still_running + new_halted * remainders,
-1)
# apply transformation on the state
transformed_state = ffn_unit(attention_unit(state))
# Add in the weighted state
accumulated_state = (transformed_state * update_weights) + accumulated_state
# Remind TensorFlow of everything's shape
state.set_shape(state_shape)
for x in [halting_probability, remainders, n_updates]:
x.set_shape([
state_shape[0],
state_shape[1],
])
accumulated_state.set_shape(state_shape)
step += 1
return (transformed_state, step, halting_probability, remainders, n_updates,
accumulated_state)
# While loop stops when this predicate is FALSE.
# Ie all (probability < 1-eps AND counter < N) are false.
def should_continue(u0, u1, halting_probability, u2, n_updates, u3):
del u0, u1, u2, u3
return tf.reduce_any(
tf.logical_and(
tf.less(halting_probability, threshold),
tf.less(n_updates, act_max_steps)))
# Do while loop iterations until predicate above is false.
(_, _, _, remainder, n_updates, accumulated_state) = tf.while_loop(
should_continue, ut_function, (state, step, halting_probability,
remainders, n_updates, accumulated_state))
ponder_times = n_updates
remainders = remainder
tf.contrib.summary.scalar("ponder_times", tf.reduce_mean(ponder_times))
return accumulated_state, (ponder_times, remainders)
def universal_transformer_act_global(x, hparams, ffn_unit, attention_unit):
"""The UTAct with global halting probability (not position-wise).
Args:
x: input
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
the output tensor, (ponder_times, remainders)
"""
state = x
act_max_steps = hparams.act_max_steps
threshold = 1.0 - hparams.act_epsilon
act_max_steps = hparams.act_max_steps
batch_size = tf.shape(state)[0]
state_shape = state.get_shape()
# Halting probabilities (p_t^n in the paper)
halting_probability = tf.zeros((batch_size,), name="halting_probability")
# Remainders (R(t) in the paper)
remainders = tf.zeros((batch_size,), name="remainder")
# Number of updates performed (N(t) in the paper)
n_updates = tf.zeros((batch_size,), name="n_updates")
# Previous cell states (s_t in the paper)
previous_state = tf.zeros_like(state, name="previous_state")
step = tf.constant(0, dtype=tf.int32)
def ut_function(state, step, halting_probability, remainders, n_updates,
previous_state):
"""implements act (global halting).
Args:
state: 3-D Tensor: [batch_size, length, channel]
step: indicates number of steps taken so far
halting_probability: halting probability
remainders: act remainders
n_updates: act n_updates
previous_state: previous state
Returns:
transformed_state: transformed state
step: step+1
halting_probability: halting probability
remainders: act remainders
n_updates: act n_updates
new_state: new state
"""
state = step_preprocess(state, step, hparams)
with tf.variable_scope("sigmoid_activation_for_pondering"):
p = common_layers.dense(
state,
1,
activation=tf.nn.sigmoid,
use_bias=True,
bias_initializer=tf.constant_initializer(
hparams.act_halting_bias_init))
# average over all positions (as a global halting prob)
p = tf.reduce_mean(p, axis=1)
p = tf.squeeze(p)
# Mask for inputs which have not halted yet
still_running = tf.cast(tf.less(halting_probability, 1.0), tf.float32)
# Mask of inputs which halted at this step
new_halted = tf.cast(
tf.greater(halting_probability + p * still_running, threshold),
tf.float32) * still_running
# Mask of inputs which haven't halted, and didn't halt this step
still_running = tf.cast(
tf.less_equal(halting_probability + p * still_running, threshold),
tf.float32) * still_running
# Add the halting probability for this step to the halting
# probabilities for those input which haven't halted yet
halting_probability += p * still_running
# Compute remainders for the inputs which halted at this step
remainders += new_halted * (1 - halting_probability)
# Add the remainders to those inputs which halted at this step
halting_probability += new_halted * remainders
# Increment n_updates for all inputs which are still running
n_updates += still_running + new_halted
# Compute the weight to be applied to the new state and output
# 0 when the input has already halted
# p when the input hasn't halted yet
# the remainders when it halted this step
update_weights = tf.expand_dims(
tf.expand_dims(p * still_running + new_halted * remainders, -1), -1)
# apply transformation on the state
transformed_state = ffn_unit(attention_unit(state))
# Add in the weighted state
new_state = ((transformed_state * update_weights) +
(previous_state * (1 - update_weights)))
# Remind TensorFlow of everything's shape
state.set_shape(state_shape)
for x in [halting_probability, remainders, n_updates]:
x.set_shape([
state_shape[0],
])
new_state.set_shape(state_shape)
step += 1
return [
transformed_state, step, halting_probability, remainders, n_updates,
new_state
]
# While loop stops when this predicate is FALSE.
# Ie all (probability < 1-eps AND counter < N) are false.
def should_continue(u0, u1, halting_probability, u2, n_updates, u3):
del u0, u1, u2, u3
return tf.reduce_any(
tf.logical_and(
tf.less(halting_probability, threshold),
tf.less(n_updates, act_max_steps)))
# Do while loop iterations until predicate above is false.
(_, _, _, remainder, n_updates, new_state) = tf.while_loop(
should_continue, ut_function,
(state, step, halting_probability, remainders, n_updates, previous_state))
ponder_times = n_updates
remainders = remainder
tf.contrib.summary.scalar("ponder_times", tf.reduce_mean(ponder_times))
return new_state, (ponder_times, remainders)
def universal_transformer_act_random(x, hparams, ffn_unit, attention_unit):
"""universal_transformer with ACT with random halting probability.
Args:
x: input
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
the output tensor, (ponder_times, remainders)
"""
state = x
act_max_steps = hparams.act_max_steps
threshold = 1.0 - hparams.act_epsilon
batch_size = tf.shape(state)[0]
length = tf.shape(state)[1]
# Halting probabilities (p_t^n in the paper)
halting_probability = tf.zeros(
(
batch_size,
length,
), name="halting_probability")
# Remainders (R(t) in the paper)
remainders = tf.zeros(
(
batch_size,
length,
), name="remainder")
# Number of updates performed (N(t) in the paper)
n_updates = tf.zeros(
(
batch_size,
length,
), name="n_updates")
# Previous cell states (s_t in the paper)
previous_state = tf.zeros_like(state, name="previous_state")
step = tf.constant(0, dtype=tf.int32)
def ut_function(state, step, halting_probability, remainders, n_updates,
previous_state):
"""Implements act (position-wise halting).
Args:
state: 3-D Tensor: [batch_size, length, channel]
step: indicates number of steps taken so far
halting_probability: halting probability
remainders: act remainders
n_updates: act n_updates
previous_state: previous state
Returns:
transformed_state: transformed state
step: step+1
halting_probability: halting probability
remainders: act remainders
n_updates: act n_updates
new_state: new state
"""
state_shape = state.get_shape()
state = step_preprocess(state, step, hparams)
# random as halting probability
p = tf.random_uniform(shape=common_layers.shape_list(halting_probability))
# Mask for inputs which have not halted yet
still_running = tf.cast(tf.less(halting_probability, 1.0), tf.float32)
# Mask of inputs which halted at this step
new_halted = tf.cast(
tf.greater(halting_probability + p * still_running, threshold),
tf.float32) * still_running
# Mask of inputs which haven't halted, and didn't halt this step
still_running = tf.cast(
tf.less_equal(halting_probability + p * still_running, threshold),
tf.float32) * still_running
# Add the halting probability for this step to the halting
# probabilities for those input which haven't halted yet
halting_probability += p * still_running
# Compute remainders for the inputs which halted at this step
remainders += new_halted * (1 - halting_probability)
# Add the remainders to those inputs which halted at this step
halting_probability += new_halted * remainders
# Increment n_updates for all inputs which are still running
n_updates += still_running + new_halted
# Compute the weight to be applied to the new state and output
# 0 when the input has already halted
# p when the input hasn't halted yet
# the remainders when it halted this step
update_weights = tf.expand_dims(p * still_running + new_halted * remainders,
-1)
# apply transformation on the state
transformed_state = ffn_unit(attention_unit(state))
# update running part in the weighted state and keep the rest
new_state = ((transformed_state * update_weights) +
(previous_state * (1 - update_weights)))
# remind TensorFlow of everything's shape
transformed_state.set_shape(state_shape)
for x in [halting_probability, remainders, n_updates]:
x.set_shape([
state_shape[0],
state_shape[1],
])
new_state.set_shape(state_shape)
step += 1
return [
transformed_state, step, halting_probability, remainders, n_updates,
new_state
]
# While loop stops when this predicate is FALSE.
# Ie all (probability < 1-eps AND counter < N) are false.
def should_continue(u0, u1, halting_probability, u2, n_updates, u3):
del u0, u1, u2, u3
return tf.reduce_any(
tf.logical_and(
tf.less(halting_probability, threshold),
tf.less(n_updates, act_max_steps)))
# Do while loop iterations until predicate above is false.
(_, _, _, remainder, n_updates, new_state) = tf.while_loop(
should_continue, ut_function,
(state, step, halting_probability, remainders, n_updates, previous_state))
ponder_times = n_updates
remainders = remainder
tf.contrib.summary.scalar("ponder_times", tf.reduce_mean(ponder_times))
return new_state, (ponder_times, remainders)
def _ffn_layer_multi_inputs(inputs_list,
hparams,
ffn_layer_type="dense",
name="ffn",
kernel_initializer=None,
bias_initializer=None,
activation=None,
pad_remover=None,
preprocess=False,
postprocess=False):
"""Implements a Feed-forward layer with multiple inputs, pad-removing, etc.
Args:
inputs_list: list of input tensors
hparams: hyper-parameters
ffn_layer_type: dense / dense_dropconnect/ dense_relu_dense
name: name
kernel_initializer: kernel initializer
bias_initializer: bias initializer
activation: activation function
pad_remover: pad remover
preprocess: if preprocess the input
postprocess: if postprocess the output
Returns:
a tensor
Raises:
ValueError: Unknown ffn_layer type.
"""
# need at least one inputs
num_inputs = len(inputs_list)
assert num_inputs > 0
if preprocess and num_inputs == 1:
inputs_list[0] = common_layers.layer_preprocess(inputs_list[0], hparams)
if postprocess:
original_inputs = inputs_list[0]
# the output size is the hidden size of the main inputs
main_input = inputs_list[0]
original_shape = common_layers.shape_list(main_input)
assert hparams.hidden_size == common_layers.shape_list(main_input)[-1]
# all the inputs are in the same shape with main inputs
for inputs in inputs_list:
main_input.get_shape().assert_is_compatible_with(inputs.get_shape())
def remove_pads(x):
original_shape = common_layers.shape_list(x)
# Collapse `x` across examples, and remove padding positions.
x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0))
x = tf.expand_dims(pad_remover.remove(x), axis=0)
return x
if pad_remover:
for i, inputs in enumerate(inputs_list):
inputs_list[i] = remove_pads(inputs)
ffn_inputs = (
inputs_list[0]
if len(inputs_list) == 1 else tf.concat(inputs_list, axis=-1))
if ffn_layer_type == "dense":
output = common_layers.dense(
ffn_inputs,
hparams.hidden_size,
name=name,
activation=activation,
use_bias=True,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
elif ffn_layer_type == "dense_dropconnect":
output = common_layers.dense_dropconnect(
ffn_inputs,
hparams.hidden_size,
name=name,
dropconnect_dropout=hparams.dropconnect_dropout,
output_activation=activation)
postprocess = False # no dropout on the output unit
elif ffn_layer_type == "dense_relu_dense":
output = common_layers.dense_relu_dense(
ffn_inputs,
hparams.filter_size,
hparams.hidden_size,
name=name,
dropout=hparams.relu_dropout,
output_activation=activation,
)
else:
raise ValueError("Unknown ffn_layer type: %s" % ffn_layer_type)
if pad_remover:
# Restore `output` to the original shape of `x`, including padding.
output = tf.reshape(
pad_remover.restore(tf.squeeze(output, axis=0)), original_shape)
if postprocess:
if num_inputs == 1:
output = common_layers.layer_postprocess(original_inputs, output, hparams)
else: # only dropout (no residual)x
hp = copy.copy(hparams)
hp.layer_postprocess_sequence = hp.layer_postprocess_sequence.replace(
"a", "")
output = common_layers.layer_postprocess(original_inputs, output, hp)
return output
def fill_memory_slot(memory, value, index):
"""Fills the memory slot at a particular index with the given value.
Args:
memory: a 4-d tensor [memory_size, batch, length, channel] containing
the state of all steps
value: a 3-d tensor [batch, length, channel] as the sate
index: integer in [0, memory_size)
Returns:
filled memory
"""
mask = tf.to_float(
tf.one_hot(index,
tf.shape(memory)[0])[:, None, None, None])
fill_memory = (1 - mask) * memory + mask * value[None, ...]
return fill_memory
def add_depth_embedding(x):
"""Add n-dimensional embedding as the depth embedding (timing signal).
Adds embeddings to represent the position of the step in the recurrent
tower.
Args:
x: a tensor with shape [max_step, batch, length, depth]
Returns:
a Tensor the same shape as x.
"""
x_shape = common_layers.shape_list(x)
depth = x_shape[-1]
num_steps = x_shape[0]
shape = [num_steps, 1, 1, depth]
depth_embedding = (
tf.get_variable(
"depth_embedding",
shape,
initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth**
0.5))
x += depth_embedding
return x
def step_preprocess(x, step, hparams):
"""Preprocess the input at the beginning of each step.
Args:
x: input tensor
step: step
hparams: model hyper-parameters
Returns:
preprocessed input.
"""
original_channel_size = common_layers.shape_list(x)[-1]
if hparams.add_position_timing_signal:
x = add_position_timing_signal(x, step, hparams)
if hparams.add_step_timing_signal:
x = add_step_timing_signal(x, step, hparams)
if ((hparams.add_position_timing_signal or hparams.add_position_timing_signal)
and hparams.add_or_concat_timing_signal == "concat"):
# linear projection to the original dimension of x
x = common_layers.dense(
x, original_channel_size, activation=None, use_bias=False)
if hparams.add_sru:
x = common_layers.sru(x)
return x
def add_position_timing_signal(x, step, hparams):
"""Add n-dimensional embedding as the position (horizontal) timing signal.
Args:
x: a tensor with shape [batch, length, depth]
step: step
hparams: model hyper parameters
Returns:
a Tensor with the same shape as x.
"""
if not hparams.position_start_index:
index = 0
elif hparams.position_start_index == "random":
# Shift all positions randomly
# TODO(dehghani): What would be reasonable for max number of shift?
index = tf.random_uniform(
[], maxval=common_layers.shape_list(x)[1], dtype=tf.int32)
elif hparams.position_start_index == "step":
# Shift positions based on the step
num_steps = (
hparams.act_max_steps
if hparams.recurrence_type == "act" else hparams.num_rec_steps)
index = tf.cast(
common_layers.shape_list(x)[1] * step / num_steps, dtype=tf.int32)
# No need for the timing signal in the encoder/decoder input preparation
assert hparams.pos is None
length = common_layers.shape_list(x)[1]
channels = common_layers.shape_list(x)[2]
signal = common_attention.get_timing_signal_1d(
length, channels, start_index=index)
if hparams.add_or_concat_timing_signal == "add":
x_with_timing = x + signal
elif hparams.add_or_concat_timing_signal == "concat":
batch_size = common_layers.shape_list(x)[0]
signal_tiled = tf.tile(signal, [batch_size, 1, 1])
x_with_timing = tf.concat((x, signal_tiled), axis=-1)
return x_with_timing
def add_step_timing_signal(x, step, hparams):
"""Add n-dimensional embedding as the step (vertical) timing signal.
Args:
x: a tensor with shape [batch, length, depth]
step: step
hparams: model hyper parameters
Returns:
a Tensor with the same shape as x.
"""
num_steps = (
hparams.act_max_steps
if hparams.recurrence_type == "act" else hparams.num_rec_steps)
channels = common_layers.shape_list(x)[-1]
if hparams.step_timing_signal_type == "learned":
signal = common_attention.get_layer_timing_signal_learned_1d(
channels, step, num_steps)
elif hparams.step_timing_signal_type == "sinusoid":
signal = common_attention.get_layer_timing_signal_sinusoid_1d(
channels, step, num_steps)
if hparams.add_or_concat_timing_signal == "add":
x_with_timing = x + signal
elif hparams.add_or_concat_timing_signal == "concat":
batch_size = common_layers.shape_list(x)[0]
length = common_layers.shape_list(x)[1]
signal_tiled = tf.tile(signal, [batch_size, length, 1])
x_with_timing = tf.concat((x, signal_tiled), axis=-1)
return x_with_timing
| apache-2.0 | 4,190,197,228,734,371,300 | 33.378112 | 80 | 0.658831 | false |
SEMAFORInformatik/femagtools | femagtools/gmsh.py | 1 | 3903 | # -*- coding: utf-8 -*-
"""
femagtools.gmsh
~~~~~~~~~~~~~~~~
Handle gmsh models
"""
import numpy as np
import logging
class Gmsh(object):
def __init__(self, mshfile):
import meshio
self.m = meshio.read(mshfile)
self.r = np.linalg.norm(self.m.points, axis=1)
self.phi = [np.arctan2(p[1], p[0]) for p in self.m.points]
def get_section_angles(self):
return np.min(self.phi), np.max(self.phi)
def get_section_radius(self):
return np.min(self.r), np.max(self.r)
def get_subregions(self):
return self.m.field_data.keys()
def get_points(self, srname):
"""return x,y coordinates of all points in subregion srname"""
srid = self.m.field_data[srname][0]
trids = [i for i, k in enumerate(
self.m.cell_data['gmsh:physical'][0]) if k == srid]
quids = [i for i, k in enumerate(
self.m.cell_data['gmsh:physical'][1]) if k == srid]
p = np.unique(np.concatenate((self.m.cells[0].data[trids].flatten(),
self.m.cells[1].data[quids].flatten())))
return self.m.points[p]
def get_location(self, srname):
"""return x,y coordinates of first element (triangle, quad) in subregion srname"""
srid = self.m.field_data[srname][0]
eids = [i for i, k in enumerate(self.m.cell_data['gmsh:physical'][0]) if k == srid]
if eids:
return (np.sum(self.m.points[self.m.cells[0].data[eids[0]]], axis=0)/3)[:2]
eids = [i for i, k in enumerate(self.m.cell_data['gmsh:physical'][1]) if k == srid]
if eids:
return (np.sum(self.m.points[self.m.cells[1].data[eids[0]]], axis=0)/4)[:2]
raise ValueError("subregion '{}' not found".format(srname))
def get_corners(self, srname):
"""return corner points in counterclockwise order"""
p = self.get_points(srname)
corner = []
# 1: lower left
for p1 in p:
x = []
for p2 in p:
if p1[0] > p2[0]:
x.append(p2)
break
if not x:
corner.append(p1[:2])
# 2: lower right
for p1 in p:
x = []
for p2 in p:
if p1[1] > p2[1]:
x.append(p2)
break
if not x:
corner.append(p1[:2])
# 3: upper right
for p1 in p:
x = []
for p2 in p:
if p1[0] < p2[0]:
x.append(p2)
break
if not x:
corner.append(p1[:2])
# 4: upper left
for p1 in p:
x = []
for p2 in p:
if p1[1] < p2[1]:
x.append(p2)
break
if not x:
corner.append(p1[:2])
return corner
def get_axis_angle(self, srname):
"""returns angle of axis in degrees"""
corners = self.get_corners(srname)
logging.debug("Corners %s of '%s'", corners, srname)
l = corners[3], corners[2]
alfa = np.arctan2(l[0][1] - l[1][1],
l[0][0] - l[1][0])/np.pi*180
logging.debug("Line l %s angle %s", l, alfa)
return alfa
if __name__ == "__main__":
import femagtools.isa7
import logging
import os
import sys
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = sys.stdin.readline().strip()
isa = femagtools.isa7.read(filename)
basename = os.path.splitext(os.path.basename(filename))[0]
with open(basename + '.msh', 'w') as f:
f.write('\n'.join(isa.msh()))
| bsd-2-clause | 7,853,747,746,885,079,000 | 31.256198 | 95 | 0.487061 | false |
broxtronix/distributed | distributed/tests/test_nanny.py | 1 | 3622 | from __future__ import print_function, division, absolute_import
from datetime import datetime
import os
import sys
from time import time
import pytest
from toolz import valmap
from tornado.tcpclient import TCPClient
from tornado.iostream import StreamClosedError
from tornado import gen
from distributed import Nanny, rpc, Scheduler
from distributed.core import connect, read, write, dumps, loads
from distributed.utils import ignoring
from distributed.utils_test import gen_cluster
from distributed.nanny import isalive
@gen_cluster(ncores=[])
def test_nanny(s):
n = Nanny(s.ip, s.port, ncores=2, ip='127.0.0.1', loop=s.loop)
yield n._start(0)
nn = rpc(ip=n.ip, port=n.port)
assert isalive(n.process) # alive
assert s.ncores[n.worker_address] == 2
assert s.worker_info[n.worker_address]['services']['nanny'] > 1024
yield nn.kill()
assert not n.process
assert n.worker_address not in s.ncores
assert n.worker_address not in s.worker_info
yield nn.kill()
assert n.worker_address not in s.ncores
assert n.worker_address not in s.worker_info
assert not n.process
yield nn.instantiate()
assert isalive(n.process)
assert s.ncores[n.worker_address] == 2
assert s.worker_info[n.worker_address]['services']['nanny'] > 1024
yield nn.terminate()
assert not n.process
yield n._close()
@gen_cluster(ncores=[], timeout=20)
def test_nanny_process_failure(s):
n = Nanny(s.ip, s.port, ncores=2, ip='127.0.0.1', loop=s.loop)
yield n._start()
nn = rpc(ip=n.ip, port=n.port)
first_dir = n.worker_dir
assert os.path.exists(first_dir)
original_process = n.process
ww = rpc(ip=n.ip, port=n.worker_port)
yield ww.update_data(data=valmap(dumps, {'x': 1, 'y': 2}))
with ignoring(StreamClosedError):
yield ww.compute(function=dumps(sys.exit),
args=dumps((0,)),
key='z')
start = time()
while n.process is original_process: # wait while process dies
yield gen.sleep(0.01)
assert time() - start < 5
start = time()
while not isalive(n.process): # wait while process comes back
yield gen.sleep(0.01)
assert time() - start < 5
start = time()
while n.worker_address not in s.ncores or n.worker_dir is None:
yield gen.sleep(0.01)
assert time() - start < 5
second_dir = n.worker_dir
yield n._close()
assert not os.path.exists(second_dir)
assert not os.path.exists(first_dir)
assert first_dir != n.worker_dir
nn.close_streams()
s.stop()
@gen_cluster(ncores=[])
def test_monitor_resources(s):
pytest.importorskip('psutil')
n = Nanny(s.ip, s.port, ncores=2, ip='127.0.0.1', loop=s.loop)
yield n._start()
nn = rpc(ip=n.ip, port=n.port)
assert isalive(n.process)
d = n.resource_collect()
assert {'cpu_percent', 'memory_percent'}.issubset(d)
assert 'timestamp' in d
stream = yield connect(ip=n.ip, port=n.port)
yield write(stream, {'op': 'monitor_resources', 'interval': 0.01})
for i in range(3):
msg = yield read(stream)
assert isinstance(msg, dict)
assert {'cpu_percent', 'memory_percent'}.issubset(msg)
stream.close()
yield n._close()
s.stop()
@gen_cluster(ncores=[])
def test_run(s):
pytest.importorskip('psutil')
n = Nanny(s.ip, s.port, ncores=2, ip='127.0.0.1', loop=s.loop)
yield n._start()
nn = rpc(n.address)
response = yield nn.run(function=dumps(lambda: 1))
assert response['status'] == 'OK'
assert loads(response['result']) == 1
| bsd-3-clause | -1,880,225,421,499,286,800 | 26.648855 | 70 | 0.6455 | false |
mavit/ansible | lib/ansible/modules/network/cnos/cnos_backup.py | 10 | 11252 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Backup Config to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_backup
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Backup the current running or startup configuration to a
remote server on devices running Lenovo CNOS
description:
- This module allows you to work with switch configurations. It provides a
way to back up the running or startup configurations of a switch to a
remote server. This is achieved by periodically saving a copy of the
startup or running configuration of the network device to a remote server
using FTP, SFTP, TFTP, or SCP. The first step is to create a directory from
where the remote server can be reached. The next step is to provide the
full file path of the location where the configuration will be backed up.
Authentication details required by the remote server must be provided as
well. This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the
playbook is run. For more information about this module from Lenovo and
customizing it usage for your use cases, please visit
U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_backup.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
configType:
description:
- This specifies what type of configuration will be backed up. The
choices are the running or startup configurations. There is no
default value, so it will result in an error if the input is
incorrect.
required: Yes
default: Null
choices: [running-config, startup-config]
protocol:
description:
- This refers to the protocol used by the network device to
interact with the remote server to where to upload the backup
configuration. The choices are FTP, SFTP, TFTP, or SCP. Any other
protocols will result in error. If this parameter is
not specified, there is no default value to be used.
required: Yes
default: Null
choices: [SFTP, SCP, FTP, TFTP]
rcserverip:
description:
-This specifies the IP Address of the remote server to where the
configuration will be backed up.
required: Yes
default: Null
rcpath:
description:
- This specifies the full file path where the configuration file
will be copied on the remote server. In case the relative path is
used as the variable value, the root folder for the user of the
server needs to be specified.
required: Yes
default: Null
serverusername:
description:
- Specify the username for the server relating to the protocol
used.
required: Yes
default: Null
serverpassword:
description:
- Specify the password for the server relating to the protocol
used.
required: Yes
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_backup.
These are written in the main.yml file of the tasks directory.
---
- name: Test Running Config Backup
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Startup Config Backup
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Running Config Backup -TFTP
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Startup Config Backup - TFTP
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Config file tranferred to server"
'''
import sys
import time
import socket
import array
import json
import time
import re
import os
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
# Utility Method to back up the running config or start up copnfig
# This method supports only SCP or SFTP or FTP or TFTP
# Tuning of timeout parameter is pending
def doConfigBackUp(module, prompt, answer):
host = module.params['host']
server = module.params['serverip']
username = module.params['serverusername']
password = module.params['serverpassword']
protocol = module.params['protocol'].lower()
rcPath = module.params['rcpath']
configType = module.params['configType']
confPath = rcPath + host + '_' + configType + '.txt'
retVal = ''
# config backup command happens here
command = "copy " + configType + " " + protocol + " " + protocol + "://"
command = command + username + "@" + server + "/" + confPath
command = command + " vrf management\n"
cnos.debugOutput(command + "\n")
# cnos.checkForFirstTimeAccess(module, command, 'yes/no', 'yes')
cmd = []
if(protocol == "scp"):
scp_cmd1 = [{'command': command, 'prompt': 'timeout:', 'answer': '0'}]
scp_cmd2 = [{'command': '\n', 'prompt': 'Password:',
'answer': password}]
cmd.extend(scp_cmd1)
cmd.extend(scp_cmd2)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
elif(protocol == "sftp"):
sftp_cmd = [{'command': command, 'prompt': 'Password:',
'answer': password}]
cmd.extend(sftp_cmd)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
elif(protocol == "ftp"):
ftp_cmd = [{'command': command, 'prompt': 'Password:',
'answer': password}]
cmd.extend(ftp_cmd)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
elif(protocol == "tftp"):
command = "copy " + configType + " " + protocol + " " + protocol
command = command + "://" + server + "/" + confPath
command = command + + " vrf management\n"
# cnos.debugOutput(command)
tftp_cmd = [{'command': command, 'prompt': None, 'answer': None}]
cmd.extend(tftp_cmd)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
else:
return "Error-110"
return retVal
# EOM
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
configType=dict(required=True),
protocol=dict(required=True),
serverip=dict(required=True),
rcpath=dict(required=True),
serverusername=dict(required=False),
serverpassword=dict(required=False, no_log=True),),
supports_check_mode=False)
outputfile = module.params['outputfile']
protocol = module.params['protocol'].lower()
output = ''
if(protocol == "tftp" or protocol == "ftp" or
protocol == "sftp" or protocol == "scp"):
transfer_status = doConfigBackUp(module, None, None)
else:
transfer_status = "Invalid Protocol option"
output = output + "\n Config Back Up status \n" + transfer_status
# Save it into the file
path = outputfile.rsplit('/', 1)
# cnos.debugOutput(path[0])
if not os.path.exists(path[0]):
os.makedirs(path[0])
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Config file tranferred to server")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,848,159,835,767,778,600 | 37.40273 | 114 | 0.648507 | false |
GastonLab/ddb-toil_test | test.py | 1 | 1936 | #!/usr/bin/env python
from toil.job import Job
import os
def globalFileStoreJobFn(job):
job.fileStore.logToMaster("The following example exercises all the"
" methods provided by the"
" toil.fileStore.FileStore class")
scratchFile = job.fileStore.getLocalTempFile() # Create a local
# temporary file.
with open(scratchFile, 'w') as fH: # Write something in the
# scratch file.
fH.write("What a tangled web we weave")
# Write a copy of the file into the file-store;
# fileID is the key that can be used to retrieve the file.
fileID = job.fileStore.writeGlobalFile(scratchFile) #This write
# is asynchronous by default
# Write another file using a stream; fileID2 is the
# key for this second file.
with job.fileStore.writeGlobalFileStream(cleanup=True) as (fH, fileID2):
fH.write("Out brief candle")
# Now read the first file; scratchFile2 is a local copy of the file
# that is read only by default.
scratchFile2 = job.fileStore.readGlobalFile(fileID)
# Read the second file to a desired location: scratchFile3.
scratchFile3 = os.path.join(job.fileStore.getLocalTempDir(), "foo.txt")
job.fileStore.readGlobalFile(fileID, userPath=scratchFile3)
# Read the second file again using a stream.
with job.fileStore.readGlobalFileStream(fileID2) as fH:
print fH.read() #This prints "Out brief candle"
# Delete the first file from the global file-store.
job.fileStore.deleteGlobalFile(fileID)
# It is unnecessary to delete the file keyed by fileID2
# because we used the cleanup flag, which removes the file after this
# job and all its successors have run (if the file still exists)
if __name__ == "__main__":
options = Job.Runner.getDefaultOptions("./toilWorkflowRun")
Job.Runner.startToil(Job.wrapJobFn(globalFileStoreJobFn), options) | mit | -7,528,098,965,882,290,000 | 36.980392 | 76 | 0.692665 | false |
Wilee999/panda3d | direct/src/showbase/LeakDetectors.py | 11 | 12662 | # objects that report different types of leaks to the ContainerLeakDetector
from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
from direct.showbase.Job import Job
import __builtin__, gc
class LeakDetector:
def __init__(self):
# put this object just under __builtins__ where the
# ContainerLeakDetector will find it quickly
if not hasattr(__builtin__, "leakDetectors"):
__builtin__.leakDetectors = {}
self._leakDetectorsKey = self.getLeakDetectorKey()
if __dev__:
assert self._leakDetectorsKey not in leakDetectors
leakDetectors[self._leakDetectorsKey] = self
def destroy(self):
del leakDetectors[self._leakDetectorsKey]
def getLeakDetectorKey(self):
# this string will be shown to the end user and should ideally contain enough information to
# point to what is leaking
return '%s-%s' % (self.__class__.__name__, id(self))
class ObjectTypeLeakDetector(LeakDetector):
def __init__(self, otld, objType, generation):
self._otld = otld
self._objType = objType
self._generation = generation
LeakDetector.__init__(self)
def destroy(self):
self._otld = None
LeakDetector.destroy(self)
def getLeakDetectorKey(self):
return '%s-%s' % (self._objType, self.__class__.__name__)
def __len__(self):
num = self._otld._getNumObjsOfType(self._objType, self._generation)
self._generation = self._otld._getGeneration()
return num
class ObjectTypesLeakDetector(LeakDetector):
# are we accumulating any particular Python object type?
def __init__(self):
LeakDetector.__init__(self)
self._type2ld = {}
self._type2count = {}
self._generation = 0
self._thisLdGen = 0
def destroy(self):
for ld in self._type2ld.itervalues():
ld.destroy()
LeakDetector.destroy(self)
def _recalc(self):
objs = gc.get_objects()
self._type2count = {}
for obj in objs:
objType = safeTypeName(obj)
if objType not in self._type2ld:
self._type2ld[objType] = ObjectTypeLeakDetector(self, objType, self._generation)
self._type2count.setdefault(objType, 0)
self._type2count[objType] += 1
self._generation += 1
def _getGeneration(self):
return self._generation
def _getNumObjsOfType(self, objType, otherGen):
if self._generation == otherGen:
self._recalc()
return self._type2count.get(objType, 0)
def __len__(self):
if self._generation == self._thisLdGen:
self._recalc()
self._thisLdGen = self._generation
return len(self._type2count)
class GarbageLeakDetector(LeakDetector):
# are we accumulating Python garbage?
def __len__(self):
# do a garbage collection
oldFlags = gc.get_debug()
gc.set_debug(0)
gc.collect()
numGarbage = len(gc.garbage)
del gc.garbage[:]
gc.set_debug(oldFlags)
return numGarbage
class SceneGraphLeakDetector(LeakDetector):
# is a scene graph leaking nodes?
def __init__(self, render):
LeakDetector.__init__(self)
self._render = render
if config.GetBool('leak-scene-graph', 0):
self._leakTaskName = 'leakNodes-%s' % serialNum()
self._leakNode()
def destroy(self):
if hasattr(self, '_leakTaskName'):
taskMgr.remove(self._leakTaskName)
del self._render
LeakDetector.destroy(self)
def __len__(self):
try:
# this will be available when the build server finishes
return self._render.countNumDescendants()
except:
return self._render.getNumDescendants()
def __repr__(self):
return 'SceneGraphLeakDetector(%s)' % self._render
def _leakNode(self, task=None):
self._render.attachNewNode('leakNode-%s' % serialNum())
taskMgr.doMethodLater(10, self._leakNode, self._leakTaskName)
class CppMemoryUsage(LeakDetector):
def __len__(self):
haveMemoryUsage = True
try:
MemoryUsage
except:
haveMemoryUsage = False
if haveMemoryUsage:
return int(MemoryUsage.getCurrentCppSize())
else:
return 0
class TaskLeakDetectorBase:
def _getTaskNamePattern(self, taskName):
# get a generic string pattern from a task name by removing numeric characters
for i in xrange(10):
taskName = taskName.replace('%s' % i, '')
return taskName
class _TaskNamePatternLeakDetector(LeakDetector, TaskLeakDetectorBase):
# tracks the number of each individual task type
# e.g. are we leaking 'examine-<doId>' tasks
def __init__(self, taskNamePattern):
self._taskNamePattern = taskNamePattern
LeakDetector.__init__(self)
def __len__(self):
# count the number of tasks that match our task name pattern
numTasks = 0
for task in taskMgr.getTasks():
if self._getTaskNamePattern(task.name) == self._taskNamePattern:
numTasks += 1
for task in taskMgr.getDoLaters():
if self._getTaskNamePattern(task.name) == self._taskNamePattern:
numTasks += 1
return numTasks
def getLeakDetectorKey(self):
return '%s-%s' % (self._taskNamePattern, self.__class__.__name__)
class TaskLeakDetector(LeakDetector, TaskLeakDetectorBase):
# tracks the number task 'types' and creates leak detectors for each task type
def __init__(self):
LeakDetector.__init__(self)
self._taskName2collector = {}
def destroy(self):
for taskName, collector in self._taskName2collector.iteritems():
collector.destroy()
del self._taskName2collector
LeakDetector.destroy(self)
def _processTaskName(self, taskName):
# if this is a new task name pattern, create a leak detector for that pattern
namePattern = self._getTaskNamePattern(taskName)
if namePattern not in self._taskName2collector:
self._taskName2collector[namePattern] = _TaskNamePatternLeakDetector(namePattern)
def __len__(self):
self._taskName2collector = {}
# update our table of task leak detectors
for task in taskMgr.getTasks():
self._processTaskName(task.name)
for task in taskMgr.getDoLaters():
self._processTaskName(task.name)
# are we leaking task types?
return len(self._taskName2collector)
class MessageLeakDetectorBase:
def _getMessageNamePattern(self, msgName):
# get a generic string pattern from a message name by removing numeric characters
for i in xrange(10):
msgName = msgName.replace('%s' % i, '')
return msgName
class _MessageTypeLeakDetector(LeakDetector, MessageLeakDetectorBase):
# tracks the number of objects that are listening to each message
def __init__(self, msgNamePattern):
self._msgNamePattern = msgNamePattern
self._msgNames = set()
LeakDetector.__init__(self)
def addMsgName(self, msgName):
# for efficiency, we keep the actual message names around
# for queries on the messenger
self._msgNames.add(msgName)
def __len__(self):
toRemove = set()
num = 0
for msgName in self._msgNames:
n = messenger._getNumListeners(msgName)
if n == 0:
toRemove.add(msgName)
else:
num += n
# remove message names that are no longer in the messenger
self._msgNames.difference_update(toRemove)
return num
def getLeakDetectorKey(self):
return '%s-%s' % (self._msgNamePattern, self.__class__.__name__)
class _MessageTypeLeakDetectorCreator(Job):
def __init__(self, creator):
Job.__init__(self, uniqueName(typeName(self)))
self._creator = creator
def destroy(self):
self._creator = None
Job.destroy(self)
def finished(self):
Job.finished(self)
def run(self):
for msgName in messenger._getEvents():
yield None
namePattern = self._creator._getMessageNamePattern(msgName)
if namePattern not in self._creator._msgName2detector:
self._creator._msgName2detector[namePattern] = _MessageTypeLeakDetector(namePattern)
self._creator._msgName2detector[namePattern].addMsgName(msgName)
yield Job.Done
class MessageTypesLeakDetector(LeakDetector, MessageLeakDetectorBase):
def __init__(self):
LeakDetector.__init__(self)
self._msgName2detector = {}
self._createJob = None
if config.GetBool('leak-message-types', 0):
self._leakers = []
self._leakTaskName = uniqueName('leak-message-types')
taskMgr.add(self._leak, self._leakTaskName)
def _leak(self, task):
self._leakers.append(DirectObject())
self._leakers[-1].accept('leak-msg', self._leak)
return task.cont
def destroy(self):
if hasattr(self, '_leakTaskName'):
taskMgr.remove(self._leakTaskName)
for leaker in self._leakers:
leaker.ignoreAll()
self._leakers = None
if self._createJob:
self._createJob.destroy()
self._createJob = None
for msgName, detector in self._msgName2detector.iteritems():
detector.destroy()
del self._msgName2detector
LeakDetector.destroy(self)
def __len__(self):
if self._createJob:
if self._createJob.isFinished():
self._createJob.destroy()
self._createJob = None
self._createJob = _MessageTypeLeakDetectorCreator(self)
jobMgr.add(self._createJob)
# are we leaking message types?
return len(self._msgName2detector)
class _MessageListenerTypeLeakDetector(LeakDetector):
# tracks the number of each object type that is listening for events
def __init__(self, typeName):
self._typeName = typeName
LeakDetector.__init__(self)
def __len__(self):
numObjs = 0
for obj in messenger._getObjects():
if typeName(obj) == self._typeName:
numObjs += 1
return numObjs
def getLeakDetectorKey(self):
return '%s-%s' % (self._typeName, self.__class__.__name__)
class _MessageListenerTypeLeakDetectorCreator(Job):
def __init__(self, creator):
Job.__init__(self, uniqueName(typeName(self)))
self._creator = creator
def destroy(self):
self._creator = None
Job.destroy(self)
def finished(self):
Job.finished(self)
def run(self):
for obj in messenger._getObjects():
yield None
tName = typeName(obj)
if tName not in self._creator._typeName2detector:
self._creator._typeName2detector[tName] = (
_MessageListenerTypeLeakDetector(tName))
yield Job.Done
class MessageListenerTypesLeakDetector(LeakDetector):
def __init__(self):
LeakDetector.__init__(self)
self._typeName2detector = {}
self._createJob = None
if config.GetBool('leak-message-listeners', 0):
self._leakers = []
self._leakTaskName = uniqueName('leak-message-listeners')
taskMgr.add(self._leak, self._leakTaskName)
def _leak(self, task):
self._leakers.append(DirectObject())
self._leakers[-1].accept(uniqueName('leak-msg-listeners'), self._leak)
return task.cont
def destroy(self):
if hasattr(self, '_leakTaskName'):
taskMgr.remove(self._leakTaskName)
for leaker in self._leakers:
leaker.ignoreAll()
self._leakers = None
if self._createJob:
self._createJob.destroy()
self._createJob = None
for typeName, detector in self._typeName2detector.iteritems():
detector.destroy()
del self._typeName2detector
LeakDetector.destroy(self)
def __len__(self):
if self._createJob:
if self._createJob.isFinished():
self._createJob.destroy()
self._createJob = None
self._createJob = _MessageListenerTypeLeakDetectorCreator(self)
jobMgr.add(self._createJob)
# are we leaking listener types?
return len(self._typeName2detector)
| bsd-3-clause | 6,030,039,357,654,520,000 | 34.368715 | 100 | 0.614674 | false |
Jokeren/neon | examples/fast-rcnn/inference.py | 1 | 6411 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Test a trained Fast-RCNN model to do object detection using PASCAL VOC dataset.
This test currently runs 1 image at a time.
Reference:
"Fast R-CNN"
http://arxiv.org/pdf/1504.08083v2.pdf
https://github.com/rbgirshick/fast-rcnn
Usage:
python examples/fast-rcnn/inference.py --model_file frcn_vgg.pkl
Notes:
1. For VGG16 based Fast R-CNN model, we can support testing with batch size as 1
images. The testing consumes about 7G memory.
2. During testing/inference, all the selective search ROIs will be used to go
through the network, so the inference time varies based on how many ROIs in each
image. For PASCAL VOC 2007, the average number of SelectiveSearch ROIs is around
2000.
3. The dataset will cache the preprocessed file and re-use that if the same
configuration of the dataset is used again. The cached file by default is in
~/nervana/data/VOCDevkit/VOC<year>/train_< >.pkl or
~/nervana/data/VOCDevkit/VOC<year>/inference_< >.pkl
The mAP evaluation script is adapted from:
https://github.com/rbgirshick/py-faster-rcnn/commit/45e0da9a246fab5fd86e8c96dc351be7f145499f
"""
import sys
import os
import numpy as np
import heapq
from neon import logger as neon_logger
from neon.data.pascal_voc import PASCAL_VOC_CLASSES, PASCALVOCInference
from neon.util.argparser import NeonArgparser
from neon.util.compat import xrange
from util import create_frcn_model, run_voc_eval
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
assert args.model_file is not None, "need a model file to do Fast R-CNN testing"
# hyperparameters
args.batch_size = 1
n_mb = None
img_per_batch = args.batch_size
rois_per_img = 5403
# setup dataset
image_set = 'test'
year = '2007'
valid_set = PASCALVOCInference(image_set, year, path=args.data_dir,
n_mb=n_mb, rois_per_img=rois_per_img)
# setup models
model = create_frcn_model()
model.load_params(args.model_file)
model.initialize(dataset=valid_set)
# set up the detection params
num_images = valid_set.num_images if n_mb is None else n_mb
num_classes = valid_set.num_classes
image_index = valid_set.image_index
# heuristic: keep an average of 40 detections per class per images prior
# to NMS
max_per_set = 40 * num_images
# heuristic: keep at most 100 detection per class per image prior to NMS
max_per_image = 100
# detection thresold for each class (this is adaptively set based on the
# max_per_set constraint)
thresh = -np.inf * np.ones(num_classes)
# top_scores will hold one minheap of scores per class (used to enforce
# the max_per_set constraint)
top_scores = [[] for _ in xrange(num_classes)]
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
NMS_THRESH = 0.3
neon_logger.display('total batches {}'.format(valid_set.nbatches))
last_strlen = 0
# iterate through minibatches of the dataset
for mb_idx, (x, db) in enumerate(valid_set):
# print testing progress
prt_str = "Finished: {} / {}".format(mb_idx, valid_set.nbatches)
sys.stdout.write('\r' + ' ' * last_strlen + '\r')
sys.stdout.write(prt_str)
last_strlen = len(prt_str)
sys.stdout.flush()
if hasattr(valid_set, 'actual_seq_len'):
model.set_seq_len(valid_set.actual_seq_len)
outputs = model.fprop(x, inference=True)
scores, boxes = valid_set.post_processing(outputs, db)
# Skip the background class, start processing from class 1
for cls in PASCAL_VOC_CLASSES[1:]:
# pick out scores and bboxes replated to this class
cls_ind = PASCAL_VOC_CLASSES.index(cls)
cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
cls_scores = scores[cls_ind]
# only keep that ones with high enough scores
# and use gt_class being 0 as the ss ROIs, not the gt ones
keep = np.where((cls_scores.reshape(-1, 1) >
thresh[cls_ind]) & (db['gt_classes'] == 0))[0]
if len(keep) == 0:
continue
# with these, do nonmaximum suppression
cls_boxes = cls_boxes[keep]
cls_scores = cls_scores[keep]
top_inds = np.argsort(-cls_scores)[:max_per_image]
cls_scores = cls_scores[top_inds]
cls_boxes = cls_boxes[top_inds]
# push new scores onto the minheap
for val in cls_scores:
heapq.heappush(top_scores[cls_ind], val)
# if we've collected more than the max number of detection,
# then pop items off the minheap and update the class threshold
if len(top_scores[cls_ind]) > max_per_set:
while len(top_scores[cls_ind]) > max_per_set:
heapq.heappop(top_scores[cls_ind])
thresh[cls_ind] = top_scores[cls_ind][0]
all_boxes[cls_ind][mb_idx] = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(
np.float32, copy=False)
for j in xrange(1, num_classes):
for i in xrange(num_images):
if len(all_boxes[j][i]) > 0:
inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0]
all_boxes[j][i] = all_boxes[j][i][inds, :]
neon_logger.display('\nApplying NMS to all detections')
all_boxes = valid_set.apply_nms(all_boxes, NMS_THRESH)
neon_logger.display('Evaluating detections')
output_dir = 'frcn_output'
annopath, imagesetfile = valid_set.evaluation(
all_boxes, os.path.join(args.data_dir, output_dir))
run_voc_eval(annopath, imagesetfile, year, image_set, PASCAL_VOC_CLASSES,
os.path.join(args.data_dir, output_dir))
| apache-2.0 | -8,089,277,523,610,130,000 | 37.160714 | 94 | 0.667915 | false |
w1ll1am23/home-assistant | tests/components/ozw/test_climate.py | 14 | 11016 | """Test Z-Wave Multi-setpoint Climate entities."""
from homeassistant.components.climate import ATTR_TEMPERATURE
from homeassistant.components.climate.const import (
ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE,
ATTR_FAN_MODES,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODES,
ATTR_PRESET_MODE,
ATTR_PRESET_MODES,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_IDLE,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
)
from .common import setup_ozw
async def test_climate(hass, climate_data, sent_messages, climate_msg, caplog):
"""Test setting up config entry."""
receive_message = await setup_ozw(hass, fixture=climate_data)
# Test multi-setpoint thermostat (node 7 in dump)
# mode is heat, this should be single setpoint
state = hass.states.get("climate.ct32_thermostat_mode")
assert state is not None
assert state.state == HVAC_MODE_HEAT
assert state.attributes[ATTR_HVAC_MODES] == [
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
]
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 23.1
assert state.attributes[ATTR_TEMPERATURE] == 21.1
assert state.attributes.get(ATTR_TARGET_TEMP_LOW) is None
assert state.attributes.get(ATTR_TARGET_TEMP_HIGH) is None
assert state.attributes[ATTR_FAN_MODE] == "Auto Low"
assert state.attributes[ATTR_FAN_MODES] == ["Auto Low", "On Low"]
# Test set target temperature
await hass.services.async_call(
"climate",
"set_temperature",
{"entity_id": "climate.ct32_thermostat_mode", "temperature": 26.1},
blocking=True,
)
assert len(sent_messages) == 1
msg = sent_messages[-1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
# Celsius is converted to Fahrenheit here!
assert round(msg["payload"]["Value"], 2) == 78.98
assert msg["payload"]["ValueIDKey"] == 281475099443218
# Test hvac_mode with set_temperature
await hass.services.async_call(
"climate",
"set_temperature",
{
"entity_id": "climate.ct32_thermostat_mode",
"temperature": 24.1,
"hvac_mode": "cool",
},
blocking=True,
)
assert len(sent_messages) == 3 # 2 messages
msg = sent_messages[-1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
# Celsius is converted to Fahrenheit here!
assert round(msg["payload"]["Value"], 2) == 75.38
assert msg["payload"]["ValueIDKey"] == 281475099443218
# Test set mode
await hass.services.async_call(
"climate",
"set_hvac_mode",
{"entity_id": "climate.ct32_thermostat_mode", "hvac_mode": HVAC_MODE_HEAT_COOL},
blocking=True,
)
assert len(sent_messages) == 4
msg = sent_messages[-1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 3, "ValueIDKey": 122683412}
# Test set missing mode
await hass.services.async_call(
"climate",
"set_hvac_mode",
{"entity_id": "climate.ct32_thermostat_mode", "hvac_mode": "fan_only"},
blocking=True,
)
assert len(sent_messages) == 4
assert "Received an invalid hvac mode: fan_only" in caplog.text
# Test set fan mode
await hass.services.async_call(
"climate",
"set_fan_mode",
{"entity_id": "climate.ct32_thermostat_mode", "fan_mode": "On Low"},
blocking=True,
)
assert len(sent_messages) == 5
msg = sent_messages[-1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 1, "ValueIDKey": 122748948}
# Test set invalid fan mode
await hass.services.async_call(
"climate",
"set_fan_mode",
{"entity_id": "climate.ct32_thermostat_mode", "fan_mode": "invalid fan mode"},
blocking=True,
)
assert len(sent_messages) == 5
assert "Received an invalid fan mode: invalid fan mode" in caplog.text
# Test incoming mode change to auto,
# resulting in multiple setpoints
receive_message(climate_msg)
await hass.async_block_till_done()
state = hass.states.get("climate.ct32_thermostat_mode")
assert state is not None
assert state.state == HVAC_MODE_HEAT_COOL
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert state.attributes[ATTR_TARGET_TEMP_LOW] == 21.1
assert state.attributes[ATTR_TARGET_TEMP_HIGH] == 25.6
# Test setting high/low temp on multiple setpoints
await hass.services.async_call(
"climate",
"set_temperature",
{
"entity_id": "climate.ct32_thermostat_mode",
"target_temp_low": 20,
"target_temp_high": 25,
},
blocking=True,
)
assert len(sent_messages) == 7 # 2 messages !
msg = sent_messages[-2] # low setpoint
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert round(msg["payload"]["Value"], 2) == 68.0
assert msg["payload"]["ValueIDKey"] == 281475099443218
msg = sent_messages[-1] # high setpoint
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert round(msg["payload"]["Value"], 2) == 77.0
assert msg["payload"]["ValueIDKey"] == 562950076153874
# Test basic/single-setpoint thermostat (node 16 in dump)
state = hass.states.get("climate.komforthaus_spirit_z_wave_plus_mode")
assert state is not None
assert state.state == HVAC_MODE_HEAT
assert state.attributes[ATTR_HVAC_MODES] == [
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
]
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 17.3
assert round(state.attributes[ATTR_TEMPERATURE], 0) == 19
assert state.attributes.get(ATTR_TARGET_TEMP_LOW) is None
assert state.attributes.get(ATTR_TARGET_TEMP_HIGH) is None
assert state.attributes[ATTR_PRESET_MODES] == [
"none",
"Heat Eco",
"Full Power",
"Manufacturer Specific",
]
# Test set target temperature
await hass.services.async_call(
"climate",
"set_temperature",
{
"entity_id": "climate.komforthaus_spirit_z_wave_plus_mode",
"temperature": 28.0,
},
blocking=True,
)
assert len(sent_messages) == 8
msg = sent_messages[-1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {
"Value": 28.0,
"ValueIDKey": 281475250438162,
}
# Test set preset mode
await hass.services.async_call(
"climate",
"set_preset_mode",
{
"entity_id": "climate.komforthaus_spirit_z_wave_plus_mode",
"preset_mode": "Heat Eco",
},
blocking=True,
)
assert len(sent_messages) == 9
msg = sent_messages[-1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {
"Value": 11,
"ValueIDKey": 273678356,
}
# Test set preset mode None
# This preset should set and return to current hvac mode
await hass.services.async_call(
"climate",
"set_preset_mode",
{
"entity_id": "climate.komforthaus_spirit_z_wave_plus_mode",
"preset_mode": "none",
},
blocking=True,
)
assert len(sent_messages) == 10
msg = sent_messages[-1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {
"Value": 1,
"ValueIDKey": 273678356,
}
# Test set invalid preset mode
await hass.services.async_call(
"climate",
"set_preset_mode",
{
"entity_id": "climate.komforthaus_spirit_z_wave_plus_mode",
"preset_mode": "invalid preset mode",
},
blocking=True,
)
assert len(sent_messages) == 10
assert "Received an invalid preset mode: invalid preset mode" in caplog.text
# test thermostat device without a mode commandclass
state = hass.states.get("climate.danfoss_living_connect_z_v1_06_014g0013_heating_1")
assert state is not None
assert state.state == HVAC_MODE_HEAT
assert state.attributes[ATTR_HVAC_MODES] == [
HVAC_MODE_HEAT,
]
assert state.attributes.get(ATTR_CURRENT_TEMPERATURE) is None
assert round(state.attributes[ATTR_TEMPERATURE], 0) == 21
assert state.attributes.get(ATTR_TARGET_TEMP_LOW) is None
assert state.attributes.get(ATTR_TARGET_TEMP_HIGH) is None
assert state.attributes.get(ATTR_PRESET_MODE) is None
assert state.attributes.get(ATTR_PRESET_MODES) is None
# Test set target temperature
await hass.services.async_call(
"climate",
"set_temperature",
{
"entity_id": "climate.danfoss_living_connect_z_v1_06_014g0013_heating_1",
"temperature": 28.0,
},
blocking=True,
)
assert len(sent_messages) == 11
msg = sent_messages[-1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {
"Value": 28.0,
"ValueIDKey": 281475116220434,
}
await hass.services.async_call(
"climate",
"set_hvac_mode",
{
"entity_id": "climate.danfoss_living_connect_z_v1_06_014g0013_heating_1",
"hvac_mode": HVAC_MODE_HEAT,
},
blocking=True,
)
assert len(sent_messages) == 11
assert "does not support setting a mode" in caplog.text
# test thermostat device without a mode commandclass
state = hass.states.get("climate.secure_srt321_zwave_stat_tx_heating_1")
assert state is not None
assert state.state == HVAC_MODE_HEAT
assert state.attributes[ATTR_HVAC_MODES] == [
HVAC_MODE_HEAT,
]
assert state.attributes.get(ATTR_CURRENT_TEMPERATURE) == 29.0
assert round(state.attributes[ATTR_TEMPERATURE], 0) == 16
assert state.attributes.get(ATTR_TARGET_TEMP_LOW) is None
assert state.attributes.get(ATTR_TARGET_TEMP_HIGH) is None
assert state.attributes.get(ATTR_PRESET_MODE) is None
assert state.attributes.get(ATTR_PRESET_MODES) is None
# Test set target temperature
await hass.services.async_call(
"climate",
"set_temperature",
{
"entity_id": "climate.secure_srt321_zwave_stat_tx_heating_1",
"temperature": 28.0,
},
blocking=True,
)
assert len(sent_messages) == 12
msg = sent_messages[-1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {
"Value": 28.0,
"ValueIDKey": 281475267215378,
}
await hass.services.async_call(
"climate",
"set_hvac_mode",
{
"entity_id": "climate.secure_srt321_zwave_stat_tx_heating_1",
"hvac_mode": HVAC_MODE_HEAT,
},
blocking=True,
)
assert len(sent_messages) == 12
assert "does not support setting a mode" in caplog.text
| apache-2.0 | 1,312,410,803,001,183,000 | 32.688073 | 88 | 0.618646 | false |
HerlanAssis/Django-AulaOsvandoSantana | lib/python2.7/site-packages/django/forms/models.py | 10 | 55377 | """
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
import warnings
from collections import OrderedDict
from itertools import chain
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.forms.fields import ChoiceField, Field
from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (
HiddenInput, MultipleHiddenInput, SelectMultiple,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_text, smart_text
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext, ugettext_lazy as _
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ALL_FIELDS', 'BaseModelFormSet', 'modelformset_factory',
'BaseInlineFormSet', 'inlineformset_factory', 'modelform_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
# Note that for historical reasons we want to include also
# virtual_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.virtual_fields):
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.virtual_fields, opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primary key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
qs = f.value_from_object(instance)
if qs._result_cache is not None:
data[f.name] = [item.pk for item in qs]
else:
data[f.name] = list(qs.values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_virtual_fields = [f for f in opts.virtual_fields
if isinstance(f, ModelField)]
for f in sorted(chain(opts.concrete_fields, sortable_virtual_fields, opts.many_to_many)):
if not getattr(f, 'editable', False):
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, six.string_types) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(opts.model, opts.fields, opts.exclude,
opts.widgets, formfield_callback,
opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = (set(none_model_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
# Apply ``limit_choices_to`` to each field.
for field_name in self.fields:
formfield = self.fields[field_name]
if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'):
limit_choices_to = formfield.get_limit_choices_to()
if limit_choices_to is not None:
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field, None)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
for field, messages in errors.error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
try:
self.instance = construct_instance(self, self.instance, opts.fields, exclude)
except ValidationError as e:
self._update_errors(e)
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(name)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
return save_instance(self, self.instance, self._meta.fields,
fail_message, commit, self._meta.exclude,
construct=False)
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields. If omitted or '__all__',
all fields will be used.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instantiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = {o.pk: o for o in self.get_queryset()}
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) to_python.
"""
while field.rel is not None:
field = field.rel.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# get data for each field of each of unique_check
row_data = (form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, "
"which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
if form in forms_to_delete:
# If the pk is None, it means that the object can't be
# deleted again. Possible reason for this is that the
# object was already deleted from the DB. Refs #14877.
if obj.pk is None:
continue
self.deleted_objects.append(obj)
if commit:
obj.delete()
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.rel and pk.rel.parent_link and pk_is_not_editable(pk.rel.to._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
# If we're adding the related instance, ignore its primary key
# as it could be an auto-generated default which isn't actually
# in the database.
pk_value = None if form.instance._state.adding else form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.rel.to._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False):
"""
Returns a FormSet class for the given Django model class.
"""
meta = getattr(form, 'Meta', None)
if meta is None:
meta = type(str('Meta'), (object,), {})
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
raise ImproperlyConfigured(
"Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts, error_messages=error_messages)
FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_min=validate_min, validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.rel.to()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do its validation.
fk_value = self.instance.pk
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
fk_value = getattr(self.instance, self.fk.rel.field_name)
fk_value = getattr(fk_value, 'pk', fk_value)
setattr(form.instance, self.fk.get_attname(), fk_value)
return form
@classmethod
def get_default_prefix(cls):
return cls.fk.rel.get_accessor_name(model=cls.model).replace('+', '')
def save_new(self, form, commit=True):
# Ensure the latest copy of the related instance is present on each
# form (it may have been saved after the formset was originally
# instantiated).
setattr(form.instance, self.fk.name, self.instance)
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.rel.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
kwargs['to_field'] = self.fk.rel.field_name
# If we're adding a new object, ignore a parent's auto-generated key
# as it will be regenerated on the save request.
if self.instance._state.adding:
if kwargs.get('to_field') is not None:
to_field = self.instance._meta.get_field(kwargs['to_field'])
else:
to_field = self.instance._meta.pk
if to_field.has_default():
setattr(self.instance, to_field.attname, None)
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unless can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.rel.to != parent_model and
fk.rel.to not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s.%s'."
% (fk_name, parent_model._meta.app_label, parent_model._meta.object_name))
elif len(fks_to_parent) == 0:
raise ValueError(
"'%s.%s' has no field named '%s'."
% (model._meta.app_label, model._meta.object_name, fk_name))
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.rel.to == parent_model
or f.rel.to in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise ValueError(
"'%s.%s' has no ForeignKey to '%s.%s'." % (
model._meta.app_label,
model._meta.object_name,
parent_model._meta.app_label,
parent_model._meta.object_name,
)
)
else:
raise ValueError(
"'%s.%s' has more than one ForeignKey to '%s.%s'." % (
model._meta.app_label,
model._meta.object_name,
parent_model._meta.app_label,
parent_model._meta.object_name,
)
)
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'min_num': min_num,
'max_num': max_num,
'widgets': widgets,
'validate_min': validate_min,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def has_changed(self, initial, data):
return False
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
method = 'all' if self.queryset._prefetch_related_lookups else 'iterator'
queryset = getattr(self.queryset, method)
if self.field.cache_choices:
if self.field.choice_cache is None:
self.field.choice_cache = [
self.choice(obj) for obj in queryset()
]
for choice in self.field.choice_cache:
yield choice
else:
for obj in queryset():
yield self.choice(obj)
def __len__(self):
return (len(self.queryset) +
(1 if self.field.empty_label is not None else 0))
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
def __init__(self, queryset, empty_label="---------", cache_choices=None,
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
*args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
if cache_choices is not None:
warnings.warn("cache_choices has been deprecated and will be "
"removed in Django 1.9.",
RemovedInDjango19Warning, stacklevel=2)
else:
cache_choices = False
self.cache_choices = cache_choices
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.choice_cache = None
self.to_field_name = to_field_name
def get_limit_choices_to(self):
"""
Returns ``limit_choices_to`` for this form field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def has_changed(self, initial, data):
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return force_text(self.prepare_value(initial_value)) != force_text(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value for a primary key.')
}
def __init__(self, queryset, cache_choices=None, required=True,
widget=None, label=None, initial=None,
help_text='', *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
cache_choices, required, widget, label, initial, help_text,
*args, **kwargs)
def to_python(self, value):
if not value:
return []
return list(self._check_values(value))
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
qs = self._check_values(value)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def _check_values(self, value):
"""
Given a list of possible PK values, returns a QuerySet of the
corresponding objects. Raises a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages['list'],
code='list',
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except (ValueError, TypeError):
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set(force_text(getattr(o, key)) for o in qs)
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in self.prepare_value(initial))
data_set = set(force_text(value) for value in data)
return data_set != initial_set
def modelform_defines_fields(form_class):
return (form_class is not None and (
hasattr(form_class, '_meta') and
(form_class._meta.fields is not None or
form_class._meta.exclude is not None)
))
| mit | -4,313,641,301,726,599,000 | 40.730972 | 116 | 0.586399 | false |
mat12/mytest | lib/python/Plugins/SystemPlugins/CommonInterfaceAssignment/plugin.py | 7 | 26313 | from Screens.Screen import Screen
from Screens.ChannelSelection import ChannelSelectionBase
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.config import config, ConfigNothing
from Components.ConfigList import ConfigList
from Components.Label import Label
from Components.SelectionList import SelectionList
from ServiceReference import ServiceReference
from Plugins.Plugin import PluginDescriptor
from xml.etree.cElementTree import parse as ci_parse
from enigma import eDVBCI_UI, eDVBCIInterfaces, eEnv, eServiceReference, eServiceCenter
from os import path as os_path, fsync
from boxbranding import getMachineBrand, getMachineName
class CIselectMainMenu(Screen):
skin = """
<screen name="CIselectMainMenu" position="center,center" size="500,250" title="CI assignment" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="CiList" position="5,50" size="490,200" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, args = 0):
Screen.__init__(self, session)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Edit"))
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"green": self.greenPressed,
"red": self.close,
"ok": self.greenPressed,
"cancel": self.close
}, -1)
NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
print "[CI_Wizzard] FOUND %d CI Slots " % NUM_CI
self.state = { }
self.list = [ ]
if NUM_CI > 0:
for slot in range(NUM_CI):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
if state == 0:
appname = _("Slot %d") %(slot+1) + " - " + _("no module found")
elif state == 1:
appname = _("Slot %d") %(slot+1) + " - " + _("init modules")
elif state == 2:
appname = _("Slot %d") %(slot+1) + " - " + eDVBCI_UI.getInstance().getAppName(slot)
self.list.append( (appname, ConfigNothing(), 0, slot) )
else:
self.list.append( (_("Slot %d") %(slot+1) + " - " + _("no module found") , ConfigNothing(), 1, -1) )
else:
self.list.append( (_("no CI slots found") , ConfigNothing(), 1, -1) )
menuList = ConfigList(self.list)
menuList.list = self.list
menuList.l.setList(self.list)
self["CiList"] = menuList
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("CI assignment"))
def greenPressed(self):
cur = self["CiList"].getCurrent()
if cur and len(cur) > 2:
action = cur[2]
slot = cur[3]
if action == 1:
print "[CI_Wizzard] there is no CI Slot in your %s %s" % (getMachineBrand(), getMachineName())
else:
print "[CI_Wizzard] selected CI Slot : %d" % slot
if config.usage.setup_level.index > 1: # advanced
self.session.open(CIconfigMenu, slot)
else:
self.session.open(easyCIconfigMenu, slot)
class CIconfigMenu(Screen):
skin = """
<screen name="CIconfigMenu" position="center,center" size="560,440" title="CI assignment" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="CAidList_desc" render="Label" position="5,50" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget source="CAidList" render="Label" position="5,80" size="550,45" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<ePixmap pixmap="div-h.png" position="0,125" zPosition="1" size="560,2" />
<widget source="ServiceList_desc" render="Label" position="5,130" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget name="ServiceList" position="5,160" size="550,250" zPosition="1" scrollbarMode="showOnDemand" />
<widget source="ServiceList_info" render="Label" position="5,160" size="550,250" zPosition="2" font="Regular;20" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, ci_slot="9"):
Screen.__init__(self, session)
self.ci_slot=ci_slot
self.filename = eEnv.resolve("${sysconfdir}/enigma2/ci") + str(self.ci_slot) + ".xml"
self["key_red"] = StaticText(_("Delete"))
self["key_green"] = StaticText(_("Add service"))
self["key_yellow"] = StaticText(_("Add provider"))
self["key_blue"] = StaticText(_("Select CAId"))
self["CAidList_desc"] = StaticText(_("Assigned CAIds:"))
self["CAidList"] = StaticText()
self["ServiceList_desc"] = StaticText(_("Assigned services/provider:"))
self["ServiceList_info"] = StaticText()
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"green": self.greenPressed,
"red": self.redPressed,
"yellow": self.yellowPressed,
"blue": self.bluePressed,
"cancel": self.cancel
}, -1)
print "[CI_Wizzard_Config] Configuring CI Slots : %d " % self.ci_slot
i=0
self.caidlist=[]
print eDVBCIInterfaces.getInstance().readCICaIds(self.ci_slot)
for caid in eDVBCIInterfaces.getInstance().readCICaIds(self.ci_slot):
i+=1
self.caidlist.append((str(hex(int(caid))),str(caid),i))
print "[CI_Wizzard_Config_CI%d] read following CAIds from CI: %s" %(self.ci_slot, self.caidlist)
self.selectedcaid = []
self.servicelist = []
self.caids = ""
serviceList = ConfigList(self.servicelist)
serviceList.list = self.servicelist
serviceList.l.setList(self.servicelist)
self["ServiceList"] = serviceList
self.loadXML()
# if config mode !=advanced autoselect any caid
if config.usage.setup_level.index <= 1: # advanced
self.selectedcaid=self.caidlist
self.finishedCAidSelection(self.selectedcaid)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("CI assignment"))
def redPressed(self):
self.delete()
def greenPressed(self):
self.session.openWithCallback( self.finishedChannelSelection, myChannelSelection, None)
def yellowPressed(self):
self.session.openWithCallback( self.finishedProviderSelection, myProviderSelection, None)
def bluePressed(self):
self.session.openWithCallback(self.finishedCAidSelection, CAidSelect, self.caidlist, self.selectedcaid)
def cancel(self):
self.saveXML()
activate_all(self)
self.close()
def setServiceListInfo(self):
if len(self.servicelist):
self["ServiceList_info"].setText("")
else:
self["ServiceList_info"].setText(_("No services/providers selected"))
def delete(self):
cur = self["ServiceList"].getCurrent()
if cur and len(cur) > 2:
self.servicelist.remove(cur)
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedChannelSelection(self, *args):
if len(args):
ref=args[0]
service_ref = ServiceReference(ref)
service_name = service_ref.getServiceName()
if find_in_list(self.servicelist, service_name, 0)==False:
split_ref=service_ref.ref.toString().split(":")
if split_ref[0] == "1":#== dvb service und nicht muell von None
self.servicelist.append( (service_name , ConfigNothing(), 0, service_ref.ref.toString()) )
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedProviderSelection(self, *args):
if len(args)>1: # bei nix selected kommt nur 1 arg zurueck (==None)
name=args[0]
dvbnamespace=args[1]
if find_in_list(self.servicelist, name, 0)==False:
self.servicelist.append( (name , ConfigNothing(), 1, dvbnamespace) )
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedCAidSelection(self, *args):
if len(args):
self.selectedcaid=args[0]
self.caids=""
if len(self.selectedcaid):
for item in self.selectedcaid:
if len(self.caids):
self.caids+= ", " + item[0]
else:
self.caids=item[0]
else:
self.selectedcaid=[]
self.caids=_("no CAId selected")
else:
self.selectedcaid=[]
self.caids=_("no CAId selected")
self["CAidList"].setText(self.caids)
def saveXML(self):
try:
fp = file(self.filename, 'w')
fp.write("<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n")
fp.write("<ci>\n")
fp.write("\t<slot>\n")
fp.write("\t\t<id>%s</id>\n" % self.ci_slot)
for item in self.selectedcaid:
if len(self.selectedcaid):
fp.write("\t\t<caid id=\"%s\" />\n" % item[0])
for item in self.servicelist:
if len(self.servicelist):
if item[2]==1:
fp.write("\t\t<provider name=\"%s\" dvbnamespace=\"%s\" />\n" % (item[0], item[3]))
else:
fp.write("\t\t<service name=\"%s\" ref=\"%s\" />\n" % (item[0], item[3]))
fp.write("\t</slot>\n")
fp.write("</ci>\n")
fp.flush()
fsync(fp.fileno())
fp.close()
except:
print "[CI_Config_CI%d] xml not written" %self.ci_slot
os.unlink(self.filename)
def loadXML(self):
if not os_path.exists(self.filename):
return
def getValue(definitions, default):
ret = ""
Len = len(definitions)
return Len > 0 and definitions[Len-1].text or default
try:
tree = ci_parse(self.filename).getroot()
self.read_services=[]
self.read_providers=[]
self.usingcaid=[]
self.ci_config=[]
for slot in tree.findall("slot"):
read_slot = getValue(slot.findall("id"), False).encode("UTF-8")
print "ci " + read_slot
i=0
for caid in slot.findall("caid"):
read_caid = caid.get("id").encode("UTF-8")
self.selectedcaid.append((str(read_caid),str(read_caid),i))
self.usingcaid.append(long(read_caid,16))
i+=1
for service in slot.findall("service"):
read_service_name = service.get("name").encode("UTF-8")
read_service_ref = service.get("ref").encode("UTF-8")
self.read_services.append (read_service_ref)
for provider in slot.findall("provider"):
read_provider_name = provider.get("name").encode("UTF-8")
read_provider_dvbname = provider.get("dvbnamespace").encode("UTF-8")
self.read_providers.append((read_provider_name,read_provider_dvbname))
self.ci_config.append((int(read_slot), (self.read_services, self.read_providers, self.usingcaid)))
except:
print "[CI_Config_CI%d] error parsing xml..." %self.ci_slot
for item in self.read_services:
if len(item):
self.finishedChannelSelection(item)
for item in self.read_providers:
if len(item):
self.finishedProviderSelection(item[0],item[1])
print self.ci_config
self.finishedCAidSelection(self.selectedcaid)
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
class easyCIconfigMenu(CIconfigMenu):
skin = """
<screen name="easyCIconfigMenu" position="center,center" size="560,440" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="ServiceList_desc" render="Label" position="5,50" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget name="ServiceList" position="5,80" size="550,300" zPosition="1" scrollbarMode="showOnDemand" />
<widget source="ServiceList_info" render="Label" position="5,80" size="550,300" zPosition="2" font="Regular;20" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, ci_slot="9"):
Screen.setTitle(self, _("CI assignment"))
ci=ci_slot
CIconfigMenu.__init__(self, session, ci_slot)
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"green": self.greenPressed,
"red": self.redPressed,
"yellow": self.yellowPressed,
"cancel": self.cancel
})
class CAidSelect(Screen):
skin = """
<screen name="CAidSelect" position="center,center" size="450,440" title="select CAId's" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="list" position="5,50" size="440,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,390" zPosition="1" size="450,2" />
<widget source="introduction" render="Label" position="0,400" size="450,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, list, selected_caids):
Screen.__init__(self, session)
self.list = SelectionList()
self["list"] = self.list
for listindex in range(len(list)):
if find_in_list(selected_caids,list[listindex][0],0):
self.list.addSelection(list[listindex][0], list[listindex][1], listindex, True)
else:
self.list.addSelection(list[listindex][0], list[listindex][1], listindex, False)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["introduction"] = StaticText(_("Press OK to select/deselect a CAId."))
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"ok": self.list.toggleSelection,
"cancel": self.cancel,
"green": self.greenPressed,
"red": self.cancel
}, -1)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("select CAId's"))
def greenPressed(self):
list = self.list.getSelectionsList()
print list
self.close(list)
def cancel(self):
self.close()
class myProviderSelection(ChannelSelectionBase):
skin = """
<screen name="myProviderSelection" position="center,center" size="560,440" title="Select provider to add...">
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="5,50" size="550,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,390" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="0,400" size="560,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, title):
ChannelSelectionBase.__init__(self, session)
self.onShown.append(self.__onExecCallback)
self["actions"] = ActionMap(["OkCancelActions", "ChannelSelectBaseActions"],
{
"showFavourites": self.doNothing,
"showAllServices": self.cancel,
"showProviders": self.doNothing,
"showSatellites": self.doNothing,
"cancel": self.cancel,
"ok": self.channelSelected
})
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText()
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["introduction"] = StaticText(_("Press OK to select a provider."))
def doNothing(self):
pass
def __onExecCallback(self):
self.showSatellites()
self.setTitle(_("Select provider to add..."))
def channelSelected(self): # just return selected service
ref = self.getCurrentSelection()
splited_ref=ref.toString().split(":")
if ref.flags == 7 and splited_ref[6] != "0":
self.dvbnamespace=splited_ref[6]
self.enterPath(ref)
else:
self.close(ref.getName(), self.dvbnamespace)
def showSatellites(self):
if not self.pathChangeDisabled:
refstr = '%s FROM SATELLITES ORDER BY satellitePosition'%(self.service_types)
if not self.preEnterPath(refstr):
ref = eServiceReference(refstr)
justSet=False
prev = None
if self.isBasePathEqual(ref):
if self.isPrevPathEqual(ref):
justSet=True
prev = self.pathUp(justSet)
else:
currentRoot = self.getRoot()
if currentRoot is None or currentRoot != ref:
justSet=True
self.clearPath()
self.enterPath(ref, True)
if justSet:
serviceHandler = eServiceCenter.getInstance()
servicelist = serviceHandler.list(ref)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid(): #check if end of list
break
unsigned_orbpos = service.getUnsignedData(4) >> 16
orbpos = service.getData(4) >> 16
if orbpos < 0:
orbpos += 3600
if service.getPath().find("FROM PROVIDER") != -1:
service_type = _("Providers")
try:
# why we need this cast?
service_name = str(nimmanager.getSatDescription(orbpos))
except:
if unsigned_orbpos == 0xFFFF: #Cable
service_name = _("Cable")
elif unsigned_orbpos == 0xEEEE: #Terrestrial
service_name = _("Terrestrial")
else:
if orbpos > 1800: # west
orbpos = 3600 - orbpos
h = _("W")
else:
h = _("E")
service_name = ("%d.%d" + h) % (orbpos / 10, orbpos % 10)
service.setName("%s - %s" % (service_name, service_type))
self.servicelist.addService(service)
self.servicelist.finishFill()
if prev is not None:
self.setCurrentSelection(prev)
def cancel(self):
self.close(None)
class myChannelSelection(ChannelSelectionBase):
skin = """
<screen name="myChannelSelection" position="center,center" size="560,440" title="Select service to add...">
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="5,50" size="550,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,390" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="0,400" size="560,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, title):
ChannelSelectionBase.__init__(self, session)
self.onShown.append(self.__onExecCallback)
self["actions"] = ActionMap(["OkCancelActions", "TvRadioActions", "ChannelSelectBaseActions"],
{
"showProviders": self.doNothing,
"showSatellites": self.showAllServices,
"showAllServices": self.cancel,
"cancel": self.cancel,
"ok": self.channelSelected,
"keyRadio": self.setModeRadio,
"keyTV": self.setModeTv
})
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("All"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText(_("Favourites"))
self["introduction"] = StaticText(_("Press OK to select a provider."))
def __onExecCallback(self):
self.setModeTv()
self.setTitle(_("Select service to add..."))
def doNothing(self):
pass
def channelSelected(self): # just return selected service
ref = self.getCurrentSelection()
if (ref.flags & 7) == 7:
self.enterPath(ref)
elif not (ref.flags & eServiceReference.isMarker):
ref = self.getCurrentSelection()
self.close(ref)
def setModeTv(self):
self.setTvMode()
self.showFavourites()
def setModeRadio(self):
self.setRadioMode()
self.showFavourites()
def cancel(self):
self.close(None)
def activate_all(session):
NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
print "[CI_Activate] FOUND %d CI Slots " % NUM_CI
if NUM_CI > 0:
ci_config=[]
def getValue(definitions, default):
# Initialize Output
ret = ""
# How many definitions are present
Len = len(definitions)
return Len > 0 and definitions[Len-1].text or default
for ci in range(NUM_CI):
filename = eEnv.resolve("${sysconfdir}/enigma2/ci") + str(ci) + ".xml"
if not os_path.exists(filename):
print "[CI_Activate_Config_CI%d] no config file found" %ci
try:
tree = ci_parse(filename).getroot()
read_services=[]
read_providers=[]
usingcaid=[]
for slot in tree.findall("slot"):
read_slot = getValue(slot.findall("id"), False).encode("UTF-8")
for caid in slot.findall("caid"):
read_caid = caid.get("id").encode("UTF-8")
usingcaid.append(long(read_caid,16))
for service in slot.findall("service"):
read_service_ref = service.get("ref").encode("UTF-8")
read_services.append (read_service_ref)
for provider in slot.findall("provider"):
read_provider_name = provider.get("name").encode("UTF-8")
read_provider_dvbname = provider.get("dvbnamespace").encode("UTF-8")
read_providers.append((read_provider_name,long(read_provider_dvbname,16)))
ci_config.append((int(read_slot), (read_services, read_providers, usingcaid)))
except:
print "[CI_Activate_Config_CI%d] error parsing xml..." %ci
for item in ci_config:
print "[CI_Activate] activate CI%d with following settings:" %item[0]
print item[0]
print item[1]
try:
eDVBCIInterfaces.getInstance().setDescrambleRules(item[0],item[1])
except:
print "[CI_Activate_Config_CI%d] error setting DescrambleRules..." %item[0]
def find_in_list(list, search, listpos=0):
for item in list:
if item[listpos]==search:
return True
return False
global_session = None
def isModule():
if eDVBCIInterfaces.getInstance().getNumOfSlots():
NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
if NUM_CI > 0:
for slot in range(NUM_CI):
state = eDVBCI_UI.getInstance().getState(slot)
if state > 0:
return True
return False
def sessionstart(reason, session):
global global_session
global_session = session
def autostart(reason, **kwargs):
global global_session
if reason == 0:
print "[CI_Assignment] activating ci configs:"
activate_all(global_session)
elif reason == 1:
global_session = None
def main(session, **kwargs):
session.open(CIselectMainMenu)
def menu(menuid, **kwargs):
if menuid == "cam" and isModule():
return [(_("Common Interface Assignment"), main, "ci_assign", 11)]
return [ ]
def Plugins(**kwargs):
if config.usage.setup_level.index > 1:
return [PluginDescriptor( where = PluginDescriptor.WHERE_SESSIONSTART, needsRestart = False, fnc = sessionstart ),
PluginDescriptor( where = PluginDescriptor.WHERE_AUTOSTART, needsRestart = False, fnc = autostart ),
PluginDescriptor( name = _("Common Interface assignment"), description = _("a gui to assign services/providers/caids to common interface modules"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = menu )]
else:
return [PluginDescriptor( where = PluginDescriptor.WHERE_SESSIONSTART, needsRestart = False, fnc = sessionstart ),
PluginDescriptor( where = PluginDescriptor.WHERE_AUTOSTART, needsRestart = False, fnc = autostart ),
PluginDescriptor( name = _("Common Interface assignment"), description = _("a gui to assign services/providers to common interface modules"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = menu )]
| gpl-2.0 | -6,624,194,504,077,798,000 | 39.419355 | 224 | 0.680424 | false |
antoine-levitt/wannier | tests/KaneMele/kanemele.py | 1 | 7793 | from __future__ import division
import sys
from numpy import *
from numpy.linalg import *
import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
PLOT_EIG = False
WRITE_AMN = True
#Define some useful matrices
I = eye(2)
sigma_x = array([[0, 1],[1, 0]])
sigma_y = array([[0, -1j],[1j, 0]])
sigma_z = array([[1, 0],[0, -1]])
GammaA = zeros((5,4,4), dtype=complex)
GammaA[0,:,:] = kron(sigma_x, I)
GammaA[1,:,:] = kron(sigma_z, I)
GammaA[2,:,:] = kron(sigma_y, sigma_x)
GammaA[3,:,:] = kron(sigma_y, sigma_y)
GammaA[4,:,:] = kron(sigma_y, sigma_z)
GammaAB = zeros((5,5,4,4), dtype=complex)
for i in range(5):
for j in range(5):
GammaAB[i,j,:,:] = (GammaA[i].dot(GammaA[j]) - GammaA[j].dot(GammaA[i]))/(2*1j)
#This function defines the Hamiltonian matrix of the Kane-Mele model.
def Ham(a,t,l_nu,l_R,l_SO,k):
x = k[0]*a/2
y = sqrt(3)*k[1]*a/2
d1 = t*(1+2*cos(x)*cos(y))
d2 = l_nu
d3 = l_R*(1 - cos(x)*cos(y))
d4 = -sqrt(3)*l_R*sin(x)*sin(y)
d12 = -2*t*cos(x)*sin(y)
d15 = l_SO*(2*sin(2*x) - 4*sin(x)*cos(y))
d23 = -l_R*cos(x)*sin(y)
d24 = sqrt(3)*l_R*sin(x)*cos(y)
return d1*GammaA[0,:,:] + d2*GammaA[1,:,:] + d3*GammaA[2,:,:] + d4*GammaA[3,:,:] + \
d12*GammaAB[0,1,:,:] + d15*GammaAB[0,4,:,:] + d23*GammaAB[1,2,:,:] + d24*GammaAB[1,3,:,:]
if (len(sys.argv)>1):
N = int(sys.argv[1])
l_nu = float(sys.argv[2])
else:
N = 0
l_nu = 0
#Define parameters
a = 1
t = 1
l_R = 1 #1#0 #1.5
l_SO = 1
if N==0:
N1 = 100
N2 = 100
l_nu = 1.0 #6
else:
N1 = N
N2 = N
N3 = 1
Ntot = N1*N2*N3
nband = 2
nwannier = 2
nneighbors = 4
#Reciprocal unit cell
Lx = 4*pi/a
Ly = 4*pi/a/sqrt(3)
Lz = 1.
#Create the array of k-points: the mesh of the Brillouin Zone
#Create the correspondence arrays between (i,j,k)-notation (3D) and K-notation (linear)
k_pts = zeros((N1,N2,N3,3))
K_to_ijk = zeros((Ntot,3),dtype = int64)
ijk_to_K = zeros((N1,N2,N3),dtype = int64)
for i in range(N1):
for j in range(N2):
for k in range(N3):
K = i*N2*N3+j*N3+k
ijk_to_K[i,j,k] = K
K_to_ijk[K,:] = [i, j, k]
k_pts[i,j,k,:] = [i*Lx/N1,j*Ly/N2,k*Lz/N3]
#Open mmn file to write stream
mmn=open("kanemele_"+str(l_nu)+"_"+str(N1)+".mmn",'w')
mmn.write("This mmn file was generated by kanemele.py ")
mmn.write("Ntot = "+ str(Ntot) + " nband = " + str(nband) + "\n")
mmn.write(str(nband) + " " + str(Ntot) + " " + str(nneighbors)+"\n")
#Open amn file to write stream
if WRITE_AMN:
amn=open("kanemele_"+str(l_nu)+"_"+str(N1)+".amn",'w')
amn.write("This amn file was generated by kanemele.py ")
amn.write("Ntot = "+ str(Ntot) + " nband = " + str(nband) + "\n")
amn.write(str(nband) + " " + str(Ntot) + " " + str(nwannier)+"\n")
#Define the container array for the eigenvalues of the Hamiltonian
eigs = zeros((N1,N2,N3,4))
#MAIN LOOP
#Go through all points in the Brillouin Zone, and compute the matrix Mmn of overlap
#of the eigenvalues at a k-point and its neighbors
for i in range(N1):
for j in range(N2):
for k in range(N3):
K = ijk_to_K[i,j,k]
#Fill the list of neighboring points of K in K-notation
Kpb_list = []
if(N1>1):
Kpb_list.append( ijk_to_K[(i+1)%N1,j,k])
Kpb_list.append( ijk_to_K[(i-1)%N1,j,k])
if(N2>1):
Kpb_list.append( ijk_to_K[i,(j+1)%N2,k])
Kpb_list.append( ijk_to_K[i,(j-1)%N2,k])
if(N3>1):
Kpb_list.append( ijk_to_K[i,j,(k+1)%N3])
Kpb_list.append( ijk_to_K[i,j,(k-1)%N3])
#Compute the eigenvalues at point K
wK, vK = eigh(Ham(a,t,l_nu,l_R,l_SO, k_pts[i,j,k,0:2]))
eigs[i,j,k,:] = wK
#Write eigenvector in amn file
if WRITE_AMN:
for mn in range(nband**2):
m,n = mod(mn,nband), int(mn/nband)
amn.write(str(m+1)+ ' ' + str(n+1) + ' ' + str(K+1) + ' ' + str(real(vK[n,m])) + ' ' + str(-imag(vK[n,m])) +'\n')
#Compute the eigenvalues at neighboring points
for Kpb in Kpb_list:
iN = K_to_ijk[Kpb,0]
jN = K_to_ijk[Kpb,1]
kN = K_to_ijk[Kpb,2]
k_pt = k_pts[iN,jN,kN,:]
wKpb, vKpb = eigh(Ham(a,t,l_nu,l_R,l_SO,[k_pt[0], k_pt[1]]))
#Compute the overlap at points K and Kpb,
#and write it to mmn file
disp_vec = [0, 0, 0]
if(N1>1):
if((i==0) & (iN == N1-1)):
disp_vec[0] = -1
#print "i,j = "+str((i,j))+" iN,jN = "+str((iN,jN))+" K = "+str(K)+" Kpb = "+str(Kpb)
#print disp_vec
if((i==N1-1) & (iN == 0)):
disp_vec[0] = 1
#print "i,j = "+str((i,j))+" iN,jN = "+str((iN,jN))+" K = "+str(K)+" Kpb = "+str(Kpb)
#print disp_vec
if(N2>1):
if((j==0) & (jN == N2-1)):
disp_vec[1] = -1
#print "i,j = "+str((i,j))+" iN,jN = "+str((iN,jN))+" K = "+str(K)+" Kpb = "+str(Kpb)
#print disp_vec
if((j==N2-1) & (jN == 0)):
disp_vec[1] = 1
#print "i,j = "+str((i,j))+" iN,jN = "+str((iN,jN))+" K = "+str(K)+" Kpb = "+str(Kpb)
#print disp_vec
if(N3>1):
if((k==0) & (kN == N3-1)):
disp_vec[2] = -1
if((k==N3-1) & (kN == 0)):
disp_vec[2] = 1
mmn.write(str(K+1) + ' ' + str(Kpb+1)+' '+ str(disp_vec[0]) + ' ' + str(disp_vec[1])+ ' ' + str(disp_vec[2]) + '\n')# TODO this should have the displacement vector
for mn in range(nband**2):
m,n = mod(mn,nband), int(mn/nband)
overlap = dot(conj(vK[:,m]),vKpb[:,n])
mmn.write(str(real(overlap)) + ' ' + str(imag(overlap)) + '\n')
mmn.close()
amn.close()
win = open("kanemele_"+str(l_nu)+"_"+str(N1)+".win",'w')
win.write("""num_bands = 2
num_wann = 2
num_iter = 500
num_print_cycles = 1
bands_plot = true
wannier_plot = true
auto_proj = false
begin unit_cell_cart
bohr
""")
win.write(str(2*pi) + " 0 0\n")
win.write("0 " + str(2*pi) + " 0\n")
win.write("0 0 " + str(2*pi) + "\n")
win.write("""end unit_cell_cart
""")
win.write("mp_grid = " + str(N1) + " " + str(N2) + " " + str(N3) + "\n")
win.write("begin kpoints\n")
t1 = [i/N1 for i in range(N1)]
t2 = [i/N2 for i in range(N2)]
t3 = [i/N3 for i in range(N3)]
for i in range(N1):
for j in range(N2):
for k in range(N3):
t1_ = floor(t1[i]*10000)/10000.
t2_ = floor(t2[j]*10000)/10000.
t3_ = floor(t3[k]*10000)/10000.
win.write(str(t1_) + " " + str(t2_) + " " + str(t3_) + "\n")
win.write("end kpoints\n")
win.close()
if(PLOT_EIG):
#Plot eigenvalue surfaces
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = meshgrid(range(N1),range(N2))
Z = zeros((N1,N2,4))
for i in range(N1):
for j in range(N2):
for n in range(4):
Z[i,j,n] = eigs[X[i,j],Y[i,j],0,n]
for n in range(4):
ax.plot_surface(X,Y,Z[:,:,n])
l_nu_val = bool(l_nu>3*sqrt(3))*"+ " + bool(l_nu!=3*sqrt(3))*str(l_nu - 3*sqrt(3))
plt.title("Eigenvalues of the Hamiltonian, l_nu = l_nu,c " +l_nu_val)
l_nu_readable = str(int(l_nu*100)/100.)
plt.savefig("eigenvalues_ham_"+l_nu_readable+".pdf")
| mit | -1,328,779,657,670,939,600 | 30.678862 | 185 | 0.478763 | false |
kisel/trex-core | scripts/external_libs/elasticsearch/elasticsearch/connection/base.py | 3 | 4906 | import logging
try:
import simplejson as json
except ImportError:
import json
from ..exceptions import TransportError, HTTP_EXCEPTIONS
logger = logging.getLogger('elasticsearch')
# create the elasticsearch.trace logger, but only set propagate to False if the
# logger hasn't already been configured
_tracer_already_configured = 'elasticsearch.trace' in logging.Logger.manager.loggerDict
tracer = logging.getLogger('elasticsearch.trace')
if not _tracer_already_configured:
tracer.propagate = False
class Connection(object):
"""
Class responsible for maintaining a connection to an Elasticsearch node. It
holds persistent connection pool to it and it's main interface
(`perform_request`) is thread-safe.
Also responsible for logging.
"""
transport_schema = 'http'
def __init__(self, host='localhost', port=9200, use_ssl=False, url_prefix='', timeout=10, **kwargs):
"""
:arg host: hostname of the node (default: localhost)
:arg port: port to use (integer, default: 9200)
:arg url_prefix: optional url prefix for elasticsearch
:arg timeout: default timeout in seconds (float, default: 10)
"""
scheme = self.transport_schema
if use_ssl:
scheme += 's'
self.host = '%s://%s:%s' % (scheme, host, port)
if url_prefix:
url_prefix = '/' + url_prefix.strip('/')
self.url_prefix = url_prefix
self.timeout = timeout
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.host)
def _pretty_json(self, data):
# pretty JSON in tracer curl logs
try:
return json.dumps(json.loads(data), sort_keys=True, indent=2, separators=(',', ': ')).replace("'", r'\u0027')
except (ValueError, TypeError):
# non-json data or a bulk request
return data
def _log_trace(self, method, path, body, status_code, response, duration):
if not tracer.isEnabledFor(logging.INFO) or not tracer.handlers:
return
# include pretty in trace curls
path = path.replace('?', '?pretty&', 1) if '?' in path else path + '?pretty'
if self.url_prefix:
path = path.replace(self.url_prefix, '', 1)
tracer.info("curl -X%s 'http://localhost:9200%s' -d '%s'", method, path, self._pretty_json(body) if body else '')
if tracer.isEnabledFor(logging.DEBUG):
tracer.debug('#[%s] (%.3fs)\n#%s', status_code, duration, self._pretty_json(response).replace('\n', '\n#') if response else '')
def log_request_success(self, method, full_url, path, body, status_code, response, duration):
""" Log a successful API call. """
# TODO: optionally pass in params instead of full_url and do urlencode only when needed
# body has already been serialized to utf-8, deserialize it for logging
# TODO: find a better way to avoid (de)encoding the body back and forth
if body:
body = body.decode('utf-8')
logger.info(
'%s %s [status:%s request:%.3fs]', method, full_url,
status_code, duration
)
logger.debug('> %s', body)
logger.debug('< %s', response)
self._log_trace(method, path, body, status_code, response, duration)
def log_request_fail(self, method, full_url, path, body, duration, status_code=None, response=None, exception=None):
""" Log an unsuccessful API call. """
# do not log 404s on HEAD requests
if method == 'HEAD' and status_code == 404:
return
logger.warning(
'%s %s [status:%s request:%.3fs]', method, full_url,
status_code or 'N/A', duration, exc_info=exception is not None
)
# body has already been serialized to utf-8, deserialize it for logging
# TODO: find a better way to avoid (de)encoding the body back and forth
if body:
body = body.decode('utf-8')
logger.debug('> %s', body)
self._log_trace(method, path, body, status_code, response, duration)
if response is not None:
logger.debug('< %s', response)
def _raise_error(self, status_code, raw_data):
""" Locate appropriate exception and raise it. """
error_message = raw_data
additional_info = None
try:
if raw_data:
additional_info = json.loads(raw_data)
error_message = additional_info.get('error', error_message)
if isinstance(error_message, dict) and 'type' in error_message:
error_message = error_message['type']
except (ValueError, TypeError) as err:
logger.warning('Undecodable raw error response from server: %s', err)
raise HTTP_EXCEPTIONS.get(status_code, TransportError)(status_code, error_message, additional_info)
| apache-2.0 | -641,368,096,871,400,000 | 38.564516 | 139 | 0.612923 | false |
xuewei4d/scikit-learn | sklearn/utils/tests/test_fixes.py | 4 | 3281 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import math
import numpy as np
import pytest
import scipy.stats
from sklearn.utils._testing import assert_array_equal
from sklearn.utils.fixes import _joblib_parallel_args
from sklearn.utils.fixes import _object_dtype_isnan
from sklearn.utils.fixes import loguniform
from sklearn.utils.fixes import MaskedArray
@pytest.mark.parametrize('joblib_version', ('0.11', '0.12.0'))
def test_joblib_parallel_args(monkeypatch, joblib_version):
import joblib
monkeypatch.setattr(joblib, '__version__', joblib_version)
if joblib_version == '0.12.0':
# arguments are simply passed through
assert _joblib_parallel_args(prefer='threads') == {'prefer': 'threads'}
assert _joblib_parallel_args(prefer='processes', require=None) == {
'prefer': 'processes', 'require': None}
assert _joblib_parallel_args(non_existing=1) == {'non_existing': 1}
elif joblib_version == '0.11':
# arguments are mapped to the corresponding backend
assert _joblib_parallel_args(prefer='threads') == {
'backend': 'threading'}
assert _joblib_parallel_args(prefer='processes') == {
'backend': 'multiprocessing'}
with pytest.raises(ValueError):
_joblib_parallel_args(prefer='invalid')
assert _joblib_parallel_args(
prefer='processes', require='sharedmem') == {
'backend': 'threading'}
with pytest.raises(ValueError):
_joblib_parallel_args(require='invalid')
with pytest.raises(NotImplementedError):
_joblib_parallel_args(verbose=True)
else:
raise ValueError
@pytest.mark.parametrize("dtype, val", ([object, 1],
[object, "a"],
[float, 1]))
def test_object_dtype_isnan(dtype, val):
X = np.array([[val, np.nan],
[np.nan, val]], dtype=dtype)
expected_mask = np.array([[False, True],
[True, False]])
mask = _object_dtype_isnan(X)
assert_array_equal(mask, expected_mask)
@pytest.mark.parametrize("low,high,base",
[(-1, 0, 10), (0, 2, np.exp(1)), (-1, 1, 2)])
def test_loguniform(low, high, base):
rv = loguniform(base ** low, base ** high)
assert isinstance(rv, scipy.stats._distn_infrastructure.rv_frozen)
rvs = rv.rvs(size=2000, random_state=0)
# Test the basics; right bounds, right size
assert (base ** low <= rvs).all() and (rvs <= base ** high).all()
assert len(rvs) == 2000
# Test that it's actually (fairly) uniform
log_rvs = np.array([math.log(x, base) for x in rvs])
counts, _ = np.histogram(log_rvs)
assert counts.mean() == 200
assert np.abs(counts - counts.mean()).max() <= 40
# Test that random_state works
assert (
loguniform(base ** low, base ** high).rvs(random_state=0)
== loguniform(base ** low, base ** high).rvs(random_state=0)
)
def test_masked_array_deprecated(): # TODO: remove in 1.0
with pytest.warns(FutureWarning, match='is deprecated'):
MaskedArray()
| bsd-3-clause | 1,962,554,988,841,810,400 | 35.054945 | 79 | 0.605608 | false |
2ndQuadrant/ansible | lib/ansible/modules/cloud/vultr/vultr_server.py | 2 | 31379 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vultr_server
short_description: Manages virtual servers on Vultr.
description:
- Deploy, start, stop, update, restart, reinstall servers.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the server.
required: true
aliases: [ label ]
hostname:
description:
- Hostname to assign to this server.
os:
description:
- The operating system.
- Required if the server does not yet exist and is not restoring from a snapshot.
snapshot:
version_added: "2.8"
description:
- Name of snapshot to restore server from.
firewall_group:
description:
- The firewall group to assign this server to.
plan:
description:
- Plan to use for the server.
- Required if the server does not yet exist.
force:
description:
- Force stop/start the server if required to apply changes
- Otherwise a running server will not be changed.
type: bool
notify_activate:
description:
- Whether to send an activation email when the server is ready or not.
- Only considered on creation.
type: bool
private_network_enabled:
description:
- Whether to enable private networking or not.
type: bool
auto_backup_enabled:
description:
- Whether to enable automatic backups or not.
type: bool
ipv6_enabled:
description:
- Whether to enable IPv6 or not.
type: bool
tag:
description:
- Tag for the server.
user_data:
description:
- User data to be passed to the server.
startup_script:
description:
- Name of the startup script to execute on boot.
- Only considered while creating the server.
ssh_keys:
description:
- List of SSH keys passed to the server on creation.
aliases: [ ssh_key ]
reserved_ip_v4:
description:
- IP address of the floating IP to use as the main IP of this server.
- Only considered on creation.
region:
description:
- Region the server is deployed into.
- Required if the server does not yet exist.
state:
description:
- State of the server.
default: present
choices: [ present, absent, restarted, reinstalled, started, stopped ]
extends_documentation_fragment: vultr
'''
EXAMPLES = '''
- name: create server
local_action:
module: vultr_server
name: "{{ vultr_server_name }}"
os: CentOS 7 x64
plan: 1024 MB RAM,25 GB SSD,1.00 TB BW
ssh_keys:
- my_key
- your_key
region: Amsterdam
state: present
- name: ensure a server is present and started
local_action:
module: vultr_server
name: "{{ vultr_server_name }}"
os: CentOS 7 x64
plan: 1024 MB RAM,25 GB SSD,1.00 TB BW
ssh_key: my_key
region: Amsterdam
state: started
- name: ensure a server is present and stopped
local_action:
module: vultr_server
name: "{{ vultr_server_name }}"
os: CentOS 7 x64
plan: 1024 MB RAM,25 GB SSD,1.00 TB BW
region: Amsterdam
state: stopped
- name: ensure an existing server is stopped
local_action:
module: vultr_server
name: "{{ vultr_server_name }}"
state: stopped
- name: ensure an existing server is started
local_action:
module: vultr_server
name: "{{ vultr_server_name }}"
state: started
- name: ensure a server is absent
local_action:
module: vultr_server
name: "{{ vultr_server_name }}"
state: absent
'''
RETURN = '''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_server:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
id:
description: ID of the server
returned: success
type: str
sample: 10194376
name:
description: Name (label) of the server
returned: success
type: str
sample: "ansible-test-vm"
plan:
description: Plan used for the server
returned: success
type: str
sample: "1024 MB RAM,25 GB SSD,1.00 TB BW"
allowed_bandwidth_gb:
description: Allowed bandwidth to use in GB
returned: success
type: int
sample: 1000
auto_backup_enabled:
description: Whether automatic backups are enabled
returned: success
type: bool
sample: false
cost_per_month:
description: Cost per month for the server
returned: success
type: float
sample: 5.00
current_bandwidth_gb:
description: Current bandwidth used for the server
returned: success
type: int
sample: 0
date_created:
description: Date when the server was created
returned: success
type: str
sample: "2017-08-26 12:47:48"
default_password:
description: Password to login as root into the server
returned: success
type: str
sample: "!p3EWYJm$qDWYaFr"
disk:
description: Information about the disk
returned: success
type: str
sample: "Virtual 25 GB"
v4_gateway:
description: IPv4 gateway
returned: success
type: str
sample: "45.32.232.1"
internal_ip:
description: Internal IP
returned: success
type: str
sample: ""
kvm_url:
description: URL to the VNC
returned: success
type: str
sample: "https://my.vultr.com/subs/vps/novnc/api.php?data=xyz"
region:
description: Region the server was deployed into
returned: success
type: str
sample: "Amsterdam"
v4_main_ip:
description: Main IPv4
returned: success
type: str
sample: "45.32.233.154"
v4_netmask:
description: Netmask IPv4
returned: success
type: str
sample: "255.255.254.0"
os:
description: Operating system used for the server
returned: success
type: str
sample: "CentOS 6 x64"
firewall_group:
description: Firewall group the server is assigned to
returned: success and available
type: str
sample: "CentOS 6 x64"
pending_charges:
description: Pending charges
returned: success
type: float
sample: 0.01
power_status:
description: Power status of the server
returned: success
type: str
sample: "running"
ram:
description: Information about the RAM size
returned: success
type: str
sample: "1024 MB"
server_state:
description: State about the server
returned: success
type: str
sample: "ok"
status:
description: Status about the deployment of the server
returned: success
type: str
sample: "active"
tag:
description: TBD
returned: success
type: str
sample: ""
v6_main_ip:
description: Main IPv6
returned: success
type: str
sample: ""
v6_network:
description: Network IPv6
returned: success
type: str
sample: ""
v6_network_size:
description: Network size IPv6
returned: success
type: str
sample: ""
v6_networks:
description: Networks IPv6
returned: success
type: list
sample: []
vcpu_count:
description: Virtual CPU count
returned: success
type: int
sample: 1
'''
import time
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrServer(Vultr):
def __init__(self, module):
super(AnsibleVultrServer, self).__init__(module, "vultr_server")
self.server = None
self.returns = {
'SUBID': dict(key='id'),
'label': dict(key='name'),
'date_created': dict(),
'allowed_bandwidth_gb': dict(convert_to='int'),
'auto_backups': dict(key='auto_backup_enabled', convert_to='bool'),
'current_bandwidth_gb': dict(),
'kvm_url': dict(),
'default_password': dict(),
'internal_ip': dict(),
'disk': dict(),
'cost_per_month': dict(convert_to='float'),
'location': dict(key='region'),
'main_ip': dict(key='v4_main_ip'),
'network_v4': dict(key='v4_network'),
'gateway_v4': dict(key='v4_gateway'),
'os': dict(),
'pending_charges': dict(convert_to='float'),
'power_status': dict(),
'ram': dict(),
'plan': dict(),
'server_state': dict(),
'status': dict(),
'firewall_group': dict(),
'tag': dict(),
'v6_main_ip': dict(),
'v6_network': dict(),
'v6_network_size': dict(),
'v6_networks': dict(),
'vcpu_count': dict(convert_to='int'),
}
self.server_power_state = None
def get_startup_script(self):
return self.query_resource_by_key(
key='name',
value=self.module.params.get('startup_script'),
resource='startupscript',
)
def get_os(self):
if self.module.params.get('snapshot'):
os_name = 'Snapshot'
else:
os_name = self.module.params.get('os')
return self.query_resource_by_key(
key='name',
value=os_name,
resource='os',
use_cache=True
)
def get_snapshot(self):
return self.query_resource_by_key(
key='description',
value=self.module.params.get('snapshot'),
resource='snapshot',
use_cache=True
)
def get_ssh_keys(self):
ssh_key_names = self.module.params.get('ssh_keys')
if not ssh_key_names:
return []
ssh_keys = []
for ssh_key_name in ssh_key_names:
ssh_key = self.query_resource_by_key(
key='name',
value=ssh_key_name,
resource='sshkey',
use_cache=True
)
if ssh_key:
ssh_keys.append(ssh_key)
return ssh_keys
def get_region(self):
return self.query_resource_by_key(
key='name',
value=self.module.params.get('region'),
resource='regions',
use_cache=True
)
def get_plan(self):
return self.query_resource_by_key(
key='name',
value=self.module.params.get('plan'),
resource='plans',
use_cache=True
)
def get_firewall_group(self):
return self.query_resource_by_key(
key='description',
value=self.module.params.get('firewall_group'),
resource='firewall',
query_by='group_list'
)
def get_user_data(self):
user_data = self.module.params.get('user_data')
if user_data is not None:
user_data = to_text(base64.b64encode(to_bytes(user_data)))
return user_data
def get_server_user_data(self, server):
if not server or not server.get('SUBID'):
return None
user_data = self.api_query(path="/v1/server/get_user_data?SUBID=%s" % server.get('SUBID'))
return user_data.get('userdata')
def get_server(self, refresh=False):
if self.server is None or refresh:
self.server = None
server_list = self.api_query(path="/v1/server/list")
if server_list:
for server_id, server_data in server_list.items():
if server_data.get('label') == self.module.params.get('name'):
self.server = server_data
plan = self.query_resource_by_key(
key='VPSPLANID',
value=server_data['VPSPLANID'],
resource='plans',
use_cache=True
)
self.server['plan'] = plan.get('name')
os = self.query_resource_by_key(
key='OSID',
value=int(server_data['OSID']),
resource='os',
use_cache=True
)
self.server['os'] = os.get('name')
fwg_id = server_data.get('FIREWALLGROUPID')
fw = self.query_resource_by_key(
key='FIREWALLGROUPID',
value=server_data.get('FIREWALLGROUPID') if fwg_id and fwg_id != "0" else None,
resource='firewall',
query_by='group_list',
use_cache=True
)
self.server['firewall_group'] = fw.get('description')
return self.server
def present_server(self, start_server=True):
server = self.get_server()
if not server:
server = self._create_server(server=server)
else:
server = self._update_server(server=server, start_server=start_server)
return server
def _create_server(self, server=None):
required_params = [
'os',
'plan',
'region',
]
snapshot_restore = self.module.params.get('snapshot') is not None
if snapshot_restore:
required_params.remove('os')
self.module.fail_on_missing_params(required_params=required_params)
self.result['changed'] = True
if not self.module.check_mode:
data = {
'DCID': self.get_region().get('DCID'),
'VPSPLANID': self.get_plan().get('VPSPLANID'),
'FIREWALLGROUPID': self.get_firewall_group().get('FIREWALLGROUPID'),
'OSID': self.get_os().get('OSID'),
'SNAPSHOTID': self.get_snapshot().get('SNAPSHOTID'),
'label': self.module.params.get('name'),
'hostname': self.module.params.get('hostname'),
'SSHKEYID': ','.join([ssh_key['SSHKEYID'] for ssh_key in self.get_ssh_keys()]),
'enable_ipv6': self.get_yes_or_no('ipv6_enabled'),
'enable_private_network': self.get_yes_or_no('private_network_enabled'),
'auto_backups': self.get_yes_or_no('auto_backup_enabled'),
'notify_activate': self.get_yes_or_no('notify_activate'),
'tag': self.module.params.get('tag'),
'reserved_ip_v4': self.module.params.get('reserved_ip_v4'),
'user_data': self.get_user_data(),
'SCRIPTID': self.get_startup_script().get('SCRIPTID'),
}
self.api_query(
path="/v1/server/create",
method="POST",
data=data
)
server = self._wait_for_state(key='status', state='active')
server = self._wait_for_state(state='running', timeout=3600 if snapshot_restore else 60)
return server
def _update_auto_backups_setting(self, server, start_server):
auto_backup_enabled_changed = self.switch_enable_disable(server, 'auto_backup_enabled', 'auto_backups')
if auto_backup_enabled_changed:
if auto_backup_enabled_changed == "enable" and server['auto_backups'] == 'disable':
self.module.warn("Backups are disabled. Once disabled, backups can only be enabled again by customer support")
else:
server, warned = self._handle_power_status_for_update(server, start_server)
if not warned:
self.result['changed'] = True
self.result['diff']['before']['auto_backup_enabled'] = server.get('auto_backups')
self.result['diff']['after']['auto_backup_enabled'] = self.get_yes_or_no('auto_backup_enabled')
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/backup_%s" % auto_backup_enabled_changed,
method="POST",
data=data
)
return server
def _update_ipv6_setting(self, server, start_server):
ipv6_enabled_changed = self.switch_enable_disable(server, 'ipv6_enabled', 'v6_main_ip')
if ipv6_enabled_changed:
if ipv6_enabled_changed == "disable":
self.module.warn("The Vultr API does not allow to disable IPv6")
else:
server, warned = self._handle_power_status_for_update(server, start_server)
if not warned:
self.result['changed'] = True
self.result['diff']['before']['ipv6_enabled'] = False
self.result['diff']['after']['ipv6_enabled'] = True
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/ipv6_%s" % ipv6_enabled_changed,
method="POST",
data=data
)
server = self._wait_for_state(key='v6_main_ip')
return server
def _update_private_network_setting(self, server, start_server):
private_network_enabled_changed = self.switch_enable_disable(server, 'private_network_enabled', 'internal_ip')
if private_network_enabled_changed:
if private_network_enabled_changed == "disable":
self.module.warn("The Vultr API does not allow to disable private network")
else:
server, warned = self._handle_power_status_for_update(server, start_server)
if not warned:
self.result['changed'] = True
self.result['diff']['before']['private_network_enabled'] = False
self.result['diff']['after']['private_network_enabled'] = True
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/private_network_%s" % private_network_enabled_changed,
method="POST",
data=data
)
return server
def _update_plan_setting(self, server, start_server):
plan = self.get_plan()
plan_changed = True if plan and plan['VPSPLANID'] != server.get('VPSPLANID') else False
if plan_changed:
server, warned = self._handle_power_status_for_update(server, start_server)
if not warned:
self.result['changed'] = True
self.result['diff']['before']['plan'] = server.get('plan')
self.result['diff']['after']['plan'] = plan['name']
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
'VPSPLANID': plan['VPSPLANID'],
}
self.api_query(
path="/v1/server/upgrade_plan",
method="POST",
data=data
)
return server
def _handle_power_status_for_update(self, server, start_server):
# Remember the power state before we handle any action
if self.server_power_state is None:
self.server_power_state = server['power_status']
# A stopped server can be updated
if self.server_power_state == "stopped":
return server, False
# A running server must be forced to update unless the wanted state is stopped
elif self.module.params.get('force') or not start_server:
warned = False
if not self.module.check_mode:
# Some update APIs would restart the VM, we handle the restart manually
# by stopping the server and start it at the end of the changes
server = self.stop_server(skip_results=True)
# Warn the user that a running server won't get changed
else:
warned = True
self.module.warn("Some changes won't be applied to running instances. " +
"Use force=true to allow the instance %s to be stopped/started." % server['label'])
return server, warned
def _update_server(self, server=None, start_server=True):
# Wait for server to unlock if restoring
if server.get('os').strip() == 'Snapshot':
server = self._wait_for_state(key='server_status', state='ok', timeout=3600)
# Update auto backups settings, stops server
server = self._update_auto_backups_setting(server=server, start_server=start_server)
# Update IPv6 settings, stops server
server = self._update_ipv6_setting(server=server, start_server=start_server)
# Update private network settings, stops server
server = self._update_private_network_setting(server=server, start_server=start_server)
# Update plan settings, stops server
server = self._update_plan_setting(server=server, start_server=start_server)
# User data
user_data = self.get_user_data()
server_user_data = self.get_server_user_data(server=server)
if user_data is not None and user_data != server_user_data:
self.result['changed'] = True
self.result['diff']['before']['user_data'] = server_user_data
self.result['diff']['after']['user_data'] = user_data
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
'userdata': user_data,
}
self.api_query(
path="/v1/server/set_user_data",
method="POST",
data=data
)
# Tags
tag = self.module.params.get('tag')
if tag is not None and tag != server.get('tag'):
self.result['changed'] = True
self.result['diff']['before']['tag'] = server.get('tag')
self.result['diff']['after']['tag'] = tag
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
'tag': tag,
}
self.api_query(
path="/v1/server/tag_set",
method="POST",
data=data
)
# Firewall group
firewall_group = self.get_firewall_group()
if firewall_group and firewall_group.get('description') != server.get('firewall_group'):
self.result['changed'] = True
self.result['diff']['before']['firewall_group'] = server.get('firewall_group')
self.result['diff']['after']['firewall_group'] = firewall_group.get('description')
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
'FIREWALLGROUPID': firewall_group.get('FIREWALLGROUPID'),
}
self.api_query(
path="/v1/server/firewall_group_set",
method="POST",
data=data
)
# Start server again if it was running before the changes
if not self.module.check_mode:
if self.server_power_state in ['starting', 'running'] and start_server:
server = self.start_server(skip_results=True)
server = self._wait_for_state(key='status', state='active')
return server
def absent_server(self):
server = self.get_server()
if server:
self.result['changed'] = True
self.result['diff']['before']['id'] = server['SUBID']
self.result['diff']['after']['id'] = ""
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/destroy",
method="POST",
data=data
)
for s in range(0, 60):
if server is not None:
break
time.sleep(2)
server = self.get_server(refresh=True)
else:
self.fail_json(msg="Wait for server '%s' to get deleted timed out" % server['label'])
return server
def restart_server(self):
self.result['changed'] = True
server = self.get_server()
if server:
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/reboot",
method="POST",
data=data
)
server = self._wait_for_state(state='running')
return server
def reinstall_server(self):
self.result['changed'] = True
server = self.get_server()
if server:
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/reinstall",
method="POST",
data=data
)
server = self._wait_for_state(state='running')
return server
def _wait_for_state(self, key='power_status', state=None, timeout=60):
time.sleep(1)
server = self.get_server(refresh=True)
for s in range(0, timeout):
# Check for Truely if wanted state is None
if state is None and server.get(key):
break
elif server.get(key) == state:
break
time.sleep(2)
server = self.get_server(refresh=True)
# Timed out
else:
if state is None:
msg = "Wait for '%s' timed out" % key
else:
msg = "Wait for '%s' to get into state '%s' timed out" % (key, state)
self.fail_json(msg=msg)
return server
def start_server(self, skip_results=False):
server = self.get_server()
if server:
if server['power_status'] == 'starting':
server = self._wait_for_state(state='running')
elif server['power_status'] != 'running':
if not skip_results:
self.result['changed'] = True
self.result['diff']['before']['power_status'] = server['power_status']
self.result['diff']['after']['power_status'] = "running"
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/start",
method="POST",
data=data
)
server = self._wait_for_state(state='running')
return server
def stop_server(self, skip_results=False):
server = self.get_server()
if server and server['power_status'] != "stopped":
if not skip_results:
self.result['changed'] = True
self.result['diff']['before']['power_status'] = server['power_status']
self.result['diff']['after']['power_status'] = "stopped"
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
}
self.api_query(
path="/v1/server/halt",
method="POST",
data=data
)
server = self._wait_for_state(state='stopped')
return server
def main():
argument_spec = vultr_argument_spec()
argument_spec.update(dict(
name=dict(required=True, aliases=['label']),
hostname=dict(),
os=dict(),
snapshot=dict(),
plan=dict(),
force=dict(type='bool', default=False),
notify_activate=dict(type='bool', default=False),
private_network_enabled=dict(type='bool'),
auto_backup_enabled=dict(type='bool'),
ipv6_enabled=dict(type='bool'),
tag=dict(),
reserved_ip_v4=dict(),
firewall_group=dict(),
startup_script=dict(),
user_data=dict(),
ssh_keys=dict(type='list', aliases=['ssh_key']),
region=dict(),
state=dict(choices=['present', 'absent', 'restarted', 'reinstalled', 'started', 'stopped'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
vultr_server = AnsibleVultrServer(module)
if module.params.get('state') == "absent":
server = vultr_server.absent_server()
else:
if module.params.get('state') == "started":
server = vultr_server.present_server()
server = vultr_server.start_server()
elif module.params.get('state') == "stopped":
server = vultr_server.present_server(start_server=False)
server = vultr_server.stop_server()
elif module.params.get('state') == "restarted":
server = vultr_server.present_server()
server = vultr_server.restart_server()
elif module.params.get('state') == "reinstalled":
server = vultr_server.reinstall_server()
else:
server = vultr_server.present_server()
result = vultr_server.get_result(server)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 9,218,975,227,851,380,000 | 33.291803 | 126 | 0.536189 | false |
mrquim/repository.mrquim | repo/script.module.schism.common/lib/incapsula/config.py | 10 | 1861 | """
This module should work out of the box.
If there are problems, then it may need to be manually configured.
Configure by typing the following variables into your web browser console and checking their output:
navigator
if this returns undefined then config['navigator']['exists'] = False, otherwise True.
navigator.vendor
if this returns undefined then config['navigator']['vendor'] = None, otherwise set to what ever value is
returned, even if the value is an empty string.
opera
if this returns undefined then config['opera']['exists'] = False, otherwise True.
ActiveXObject
if this returns undefined then config['ActiveXObject']['exists'] = False, otherwise True.
navigator.appName
if this returns undefined then config['navigator']['appName'] = None, otherwise set to whatever value
is returned, even if the value is an empty string.
webkitURL
if this returns undefined then config['webkitURL']['exists'] = False, otherwise True.
_phantom
if this returns undefined then config['_phantom']['exists'] = False, otherwise True.
"""
config = {
'navigator': {
'exists': True,
'vendor': "",
'appName': "Netscape"
},
'opera': {
'exists': False
},
'webkitURL': {
'exists': False,
},
'_phantom': {
'exists': False
},
'ActiveXObject': {
'exists': False
}
}
host = ''
scheme = 'http'
# Edit these endpoints based on the url params following the host's incapsula resource url
# Ex. www.whoscored.com's incapsula resource is /_IncapsulaResource?SWJIYLWA=2977d8d74f63d7f8fedbea018b7a1d05&ns=1
# so each of the params is it's own key/value pair
endpoints = {
'www.whoscored.com': {
'SWJIYLWA': '2977d8d74f63d7f8fedbea018b7a1d05',
'ns': '1'
}
}
| gpl-2.0 | 5,120,729,821,055,526,000 | 31.649123 | 114 | 0.652337 | false |
rsalveti/zephyr | scripts/scl.py | 9 | 2327 | #! /usr/bin/python
#
# Zephyr's Sanity Check library
#
# Set of code that other projects can also import to do things on
# Zephyr's sanity check testcases.
import logging
import os
import yaml
log = logging.getLogger("scl")
#
#
def yaml_load(filename):
"""
Safely load a YAML document
Follows recomendations from
https://security.openstack.org/guidelines/dg_avoid-dangerous-input-parsing-libraries.html.
:param str filename: filename to load
:raises yaml.scanner: On YAML scan issues
:raises: any other exception on file access erors
:return: dictionary representing the YAML document
"""
try:
with open(filename, 'r') as f:
return yaml.safe_load(f)
except yaml.scanner.ScannerError as e: # For errors parsing schema.yaml
mark = e.problem_mark
cmark = e.context_mark
log.error("%s:%d:%d: error: %s (note %s context @%s:%d:%d %s)",
mark.name, mark.line, mark.column, e.problem,
e.note, cmark.name, cmark.line, cmark.column, e.context)
raise
# If pykwalify is installed, then the validate functionw ill work --
# otherwise, it is a stub and we'd warn about it.
try:
import pykwalify.core
# Don't print error messages yourself, let us do it
logging.getLogger("pykwalify.core").setLevel(50)
def _yaml_validate(data, schema):
if not schema:
return
c = pykwalify.core.Core(source_data = data, schema_data = schema)
c.validate(raise_exception = True)
except ImportError as e:
log.warning("can't import pykwalify; won't validate YAML (%s)", e)
def _yaml_validate(data, schema):
pass
def yaml_load_verify(filename, schema):
"""
Safely load a testcase/sample yaml document and validate it
against the YAML schema, returing in case of success the YAML data.
:param str filename: name of the file to load and process
:param dict schema: loaded YAML schema (can load with :func:`yaml_load`)
# 'document.yaml' contains a single YAML document.
:raises yaml.scanner.ScannerError: on YAML parsing error
:raises pykwalify.errors.SchemaError: on Schema violation error
"""
# 'document.yaml' contains a single YAML document.
y = yaml_load(filename)
_yaml_validate(y, schema)
return y
| apache-2.0 | -7,141,337,390,780,891,000 | 31.319444 | 94 | 0.670821 | false |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/compat/__init__.py | 1 | 2502 | """
compat
======
Cross-compatible functions for different versions of Python.
Other items:
* platform checker
"""
import platform
import struct
import sys
import warnings
PY35 = sys.version_info[:2] == (3, 5)
PY36 = sys.version_info >= (3, 6)
PY37 = sys.version_info >= (3, 7)
PY38 = sys.version_info >= (3, 8)
PYPY = platform.python_implementation() == "PyPy"
# ----------------------------------------------------------------------------
# functions largely based / taken from the six module
# Much of the code in this module comes from Benjamin Peterson's six library.
# The license for this library can be found in LICENSES/SIX and the code can be
# found at https://bitbucket.org/gutworth/six
def set_function_name(f, name, cls):
"""
Bind the name/qualname attributes of the function
"""
f.__name__ = name
f.__qualname__ = "{klass}.{name}".format(klass=cls.__name__, name=name)
f.__module__ = cls.__module__
return f
def raise_with_traceback(exc, traceback=Ellipsis):
"""
Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback.
"""
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
# https://github.com/pandas-dev/pandas/pull/9123
def is_platform_little_endian():
""" am I little endian """
return sys.byteorder == "little"
def is_platform_windows():
return sys.platform == "win32" or sys.platform == "cygwin"
def is_platform_linux():
return sys.platform == "linux2"
def is_platform_mac():
return sys.platform == "darwin"
def is_platform_32bit():
return struct.calcsize("P") * 8 < 64
def _import_lzma():
"""Attempts to import lzma, warning the user when lzma is not available.
"""
try:
import lzma
return lzma
except ImportError:
msg = (
"Could not import the lzma module. "
"Your installed Python is incomplete. "
"Attempting to use lzma compression will result in a RuntimeError."
)
warnings.warn(msg)
def _get_lzma_file(lzma):
"""Returns the lzma method LZMAFile when the module was correctly imported.
Otherwise, raises a RuntimeError.
"""
if lzma is None:
raise RuntimeError(
"lzma module not available. "
"A Python re-install with the proper "
"dependencies might be required to solve this issue."
)
return lzma.LZMAFile
| apache-2.0 | 7,622,572,007,694,950,000 | 24.530612 | 79 | 0.623102 | false |
indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/third_party/WebKit/Tools/TestResultServer/handlers/buildershandler_unittest.py | 1 | 7276 | #!/usr/bin/env python
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import buildershandler
import json
import logging
import pprint
import unittest
class BuildersHandlerTest(unittest.TestCase):
def test_master_json_url(self):
self.assertEqual(buildershandler.master_json_url('http://base'), 'http://base/json/builders')
def test_builder_json_url(self):
self.assertEqual(buildershandler.builder_json_url('http://base', 'dummybuilder'), 'http://base/json/builders/dummybuilder')
def test_cached_build_json_url(self):
self.assertEqual(buildershandler.cached_build_json_url('http://base', 'dummybuilder', 12345), 'http://base/json/builders/dummybuilder/builds/12345')
self.assertEqual(buildershandler.cached_build_json_url('http://base', 'dummybuilder', '12345'), 'http://base/json/builders/dummybuilder/builds/12345')
def test_get_latest_build(self):
build_data = {'cachedBuilds': ['1', '2', '3'],
'currentBuilds': ['3'],
'basedir': 'fake'}
latest_build = buildershandler.get_latest_build(build_data)
self.assertEqual(latest_build, '2')
build_data = {'cachedBuilds': [],
'currentBuilds': ['1', '2', '3'],
'basedir': 'fake'}
latest_build = buildershandler.get_latest_build(build_data)
self.assertEqual(latest_build, '1')
build_data = {'cachedBuilds': ['1', '2', '3'],
'currentBuilds': ['1', '2', '3'],
'basedir': 'fake'}
latest_build = buildershandler.get_latest_build(build_data)
self.assertEqual(latest_build, '1')
build_data = {'cachedBuilds': [],
'currentBuilds': [],
'basedir': 'fake'}
latest_build = buildershandler.get_latest_build(build_data)
self.assertEqual(latest_build, None)
def test_fetch_buildbot_data(self):
try:
fetched_urls = []
def fake_fetch_json(url):
fetched_urls.append(url)
if url == 'http://build.chromium.org/p/chromium.webkit/json/builders':
return {'WebKit Win': None, 'WebKit Linux': None, 'WebKit Mac': None, 'WebKit Empty': None}
if url == 'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Linux':
return {'cachedBuilds': [1, 2], 'currentBuilds': []}
if url == 'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Win':
return {'cachedBuilds': [1, 2], 'currentBuilds': []}
if url == 'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Mac':
return {'cachedBuilds': [1, 2], 'currentBuilds': []}
if url == 'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Empty':
return {'cachedBuilds': [], 'currentBuilds': []}
if url == 'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Linux/builds/2':
return {'steps': [{'name': 'webkit_tests'}, {'name': 'browser_tests'}, {'name': 'mini_installer_test'}, {'name': 'archive_test_results'}, {'name': 'compile'}]}
if url == 'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Win/builds/2':
return {'steps': [{'name': 'webkit_tests'}, {'name': 'mini_installer_test'}, {'name': 'archive_test_results'}, {'name': 'compile'}]}
if url == 'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Mac/builds/2':
return {'steps': [{'name': 'browser_tests'}, {'name': 'mini_installer_test'}, {'name': 'archive_test_results'}, {'name': 'compile'}]}
logging.error('Cannot fetch fake url: %s' % url)
old_fetch_json = buildershandler.fetch_json
buildershandler.fetch_json = fake_fetch_json
masters = [
{'name': 'ChromiumWebkit', 'url': 'http://build.chromium.org/p/chromium.webkit'},
]
buildbot_data = buildershandler.fetch_buildbot_data(masters)
expected_fetched_urls = [
'http://build.chromium.org/p/chromium.webkit/json/builders',
'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Linux',
'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Linux/builds/2',
'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Mac',
'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Mac/builds/2',
'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Win',
'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Win/builds/2',
'http://build.chromium.org/p/chromium.webkit/json/builders/WebKit%20Empty',
]
self.assertEqual(fetched_urls, expected_fetched_urls)
expected_masters = {
'masters': [{
'url': 'http://build.chromium.org/p/chromium.webkit',
'tests': {
'browser_tests': {'builders': ['WebKit Linux', 'WebKit Mac']},
'mini_installer_test': {'builders': ['WebKit Linux', 'WebKit Mac', 'WebKit Win']},
'layout-tests': {'builders': ['WebKit Linux', 'WebKit Win']}},
'name': 'ChromiumWebkit'}]}
expected_json = buildershandler.dump_json(expected_masters)
self.assertEqual(buildbot_data, expected_json)
finally:
buildershandler.fetch_json = old_fetch_json
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -6,678,782,997,288,826,000 | 50.239437 | 179 | 0.619709 | false |
pymonger/prov-0.5.4 | prov/persistence/tests.py | 1 | 1453 | """Test cases for the prov.persistence Django app
@author: Trung Dong Huynh <[email protected]>
@copyright: University of Southampton 2014
"""
import unittest
import logging
from prov.model.test import examples
from prov.persistence.models import save_bundle, PDBundle
logger = logging.getLogger(__name__)
class SaveLoadTest(unittest.TestCase):
def __init__(self, methodName='runTest'):
self.bundles = {}
self.bundle_db_id_map = dict()
unittest.TestCase.__init__(self, methodName=methodName)
def setUp(self):
for bundle_id, create_document in examples.tests:
logger.debug('Creating bundle: %s...' % bundle_id)
self.bundles[bundle_id] = create_document()
logger.debug('Saving bundle: %s...' % bundle_id)
pdbundle = save_bundle(self.bundles[bundle_id], bundle_id)
self.bundle_db_id_map[bundle_id] = pdbundle.pk
def tearDown(self):
logger.debug('Deleting all test bundles (%d in total)' % len(self.bundle_db_id_map))
PDBundle.objects.filter(pk__in=self.bundle_db_id_map.values()).delete()
def testDBLoading(self):
for bundle_id in self.bundles:
logger.debug('Loading bundle from DB: %s...' % bundle_id)
pdbundle = PDBundle.objects.get(pk=self.bundle_db_id_map[bundle_id])
prov_bundle = pdbundle.get_prov_bundle()
assert(prov_bundle == self.bundles[bundle_id])
| mit | -1,127,868,869,633,103,200 | 35.325 | 92 | 0.653131 | false |
h3biomed/ansible | lib/ansible/modules/cloud/ovirt/ovirt_vnic_profile.py | 2 | 11780 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_vnic_profile
short_description: Module to manage vNIC profile of network in oVirt/RHV
version_added: "2.8"
author:
- "Ondra Machacek (@machacekondra)"
- "Martin Necas (@mnecas)"
description:
- "Module to manage vNIC profile of network in oVirt/RHV"
options:
name:
description:
- "A human-readable name in plain text."
required: true
state:
description:
- "Should the vNIC be absent/present."
choices: ['absent', 'present']
default: present
description:
description:
- "A human-readable description in plain text."
data_center:
description:
- "Datacenter name where network reside."
required: true
network:
description:
- "Name of network to which is vNIC attached."
required: true
network_filter:
description:
- "The network filter enables to filter packets send to/from the VM's nic according to defined rules."
custom_properties:
description:
- "Custom properties applied to the vNIC profile."
- "Custom properties is a list of dictionary which can have following values:"
suboptions:
name:
description:
- "Name of the custom property. For example: I(hugepages), I(vhost), I(sap_agent), etc."
regexp:
description:
- Regular expression to set for custom property.
value:
description:
- Value to set for custom property.
qos:
description:
- "Quality of Service attributes regulate inbound and outbound network traffic of the NIC."
port_mirroring:
description:
- "Enables port mirroring."
type: bool
pass_through:
description:
- "Enables passthrough to an SR-IOV-enabled host NIC."
- "When enabled C(qos) and C(network_filter) are automatically set to None and C(port_mirroring) to False."
- "When enabled and C(migratable) not specified then C(migratable) is enabled."
- "Port mirroring, QoS and network filters are not supported on passthrough profiles."
choices: ['disabled', 'enabled']
migratable:
description:
- "Marks whether pass_through NIC is migratable or not."
type: bool
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
- name: Add vNIC
ovirt_vnic_profile:
name: myvnic
network: mynetwork
state: present
data_center: datacenter
- name: Editing vNICs network_filter, custom_properties, qos
ovirt_vnic_profile:
name: myvnic
network: mynetwork
data_center: datacenter
qos: myqos
custom_properties:
- name: SecurityGroups
value: 9bd9bde9-39da-44a8-9541-aa39e1a81c9d
network_filter: allow-dhcp
- name: Dont use migratable
ovirt_vnic_profile:
name: myvnic
network: mynetwork
data_center: datacenter
migratable: False
pass_through: enabled
- name: Remove vNIC
ovirt_vnic_profile:
name: myvnic
network: mynetwork
state: absent
data_center: datacenter
'''
RETURN = '''
id:
description: ID of the vNIC profile which is managed
returned: On success if vNIC profile is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
vnic:
description: "Dictionary of all the vNIC profile attributes. Network interface attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
returned: On success if vNIC profile is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_link_name,
ovirt_full_argument_spec,
search_by_name,
get_id_by_name
)
class EntityVnicPorfileModule(BaseModule):
def __init__(self, *args, **kwargs):
super(EntityVnicPorfileModule, self).__init__(*args, **kwargs)
def _get_dcs_service(self):
return self._connection.system_service().data_centers_service()
def _get_dcs_id(self):
return get_id_by_name(self._get_dcs_service(), self.param('data_center'))
def _get_network_id(self):
networks_service = self._get_dcs_service().service(self._get_dcs_id()).networks_service()
return get_id_by_name(networks_service, self.param('network'))
def _get_qos_id(self):
if self.param('qos'):
qoss_service = self._get_dcs_service().service(self._get_dcs_id()).qoss_service()
return get_id_by_name(qoss_service, self.param('qos')) if self.param('qos') else None
return None
def _get_network_filter_id(self):
nf_service = self._connection.system_service().network_filters_service()
return get_id_by_name(nf_service, self.param('network_filter')) if self.param('network_filter') else None
def _get_network_filter(self):
network_filter = None
# The order of these condition is necessary.
# When would network_filter and pass_through specified it would try to create and network_filter and fail on engine.
if self.param('network_filter') == '' or self.param('pass_through') == 'enabled':
network_filter = otypes.NetworkFilter()
elif self.param('network_filter'):
network_filter = otypes.NetworkFilter(id=self._get_network_filter_id())
return network_filter
def _get_qos(self):
qos = None
# The order of these condition is necessary. When would qos and pass_through specified it would try to create and qos and fail on engine.
if self.param('qos') == '' or self.param('pass_through') == 'enabled':
qos = otypes.Qos()
elif self.param('qos'):
qos = otypes.Qos(id=self._get_qos_id())
return qos
def _get_port_mirroring(self):
if self.param('pass_through') == 'enabled':
return False
return self.param('port_mirroring')
def _get_migratable(self):
if self.param('migratable') is not None:
return self.param('migratable')
if self.param('pass_through') == 'enabled':
return True
def build_entity(self):
return otypes.VnicProfile(
name=self.param('name'),
network=otypes.Network(id=self._get_network_id()),
description=self.param('description') if self.param('description') is not None else None,
pass_through=otypes.VnicPassThrough(mode=otypes.VnicPassThroughMode(self.param('pass_through'))) if self.param('pass_through') else None,
custom_properties=[
otypes.CustomProperty(
name=cp.get('name'),
regexp=cp.get('regexp'),
value=str(cp.get('value')),
) for cp in self.param('custom_properties') if cp
] if self.param('custom_properties') else None,
migratable=self._get_migratable(),
qos=self._get_qos(),
port_mirroring=self._get_port_mirroring(),
network_filter=self._get_network_filter()
)
def update_check(self, entity):
def check_custom_properties():
if self.param('custom_properties'):
current = []
if entity.custom_properties:
current = [(cp.name, cp.regexp, str(cp.value)) for cp in entity.custom_properties]
passed = [(cp.get('name'), cp.get('regexp'), str(cp.get('value'))) for cp in self.param('custom_properties') if cp]
return sorted(current) == sorted(passed)
return True
pass_through = getattr(entity.pass_through.mode, 'name', None)
return (
check_custom_properties() and
# The reason why we can't use equal method, is we get None from _get_network_filter_id or _get_qos_id method, when passing empty string.
# And when first param of equal method is None it returns true.
self._get_network_filter_id() == getattr(entity.network_filter, 'id', None) and
self._get_qos_id() == getattr(entity.qos, 'id', None) and
equal(self.param('migratable'), getattr(entity, 'migratable', None)) and
equal(self.param('pass_through'), pass_through.lower() if pass_through else None) and
equal(self.param('description'), entity.description) and
equal(self.param('port_mirroring'), getattr(entity, 'port_mirroring', None))
)
def get_entity(vnic_services, entitynics_module):
vnic_profiles = vnic_services.list()
network_id = entitynics_module._get_network_id()
for vnic in vnic_profiles:
# When vNIC already exist update it, when not create it
if vnic.name == entitynics_module.param('name') and network_id == vnic.network.id:
return vnic
def check_params(module):
if (module.params.get('port_mirroring') or module.params.get('network_filter') or module.params.get('qos'))\
and module.params.get('pass_through') == 'enabled':
module.fail_json(msg="Cannot edit VM network interface profile. 'Port Mirroring,'Qos' and 'Network Filter' are not supported on passthrough profiles.")
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(type='str', default='present', choices=['absent', 'present']),
network=dict(type='str', required=True),
data_center=dict(type='str', required=True),
description=dict(type='str'),
name=dict(type='str', required=True),
network_filter=dict(type='str'),
custom_properties=dict(type='list'),
qos=dict(type='str'),
pass_through=dict(type='str', choices=['disabled', 'enabled']),
port_mirroring=dict(type='bool'),
migratable=dict(type='bool'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vnic_services = connection.system_service().vnic_profiles_service()
entitynics_module = EntityVnicPorfileModule(
connection=connection,
module=module,
service=vnic_services,
)
state = module.params['state']
entity = get_entity(vnic_services, entitynics_module)
if state == 'present':
ret = entitynics_module.create(entity=entity, force_create=entity is None)
elif state == 'absent':
if entity is not None:
ret = entitynics_module.remove(entity=entity)
else:
raise Exception("Vnic profile '%s' in network '%s' was not found." % (module.params['name'], module.params['network']))
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 | 1,622,481,138,571,564,500 | 36.75641 | 159 | 0.621477 | false |
Wyss/mascpcr | tests/test_pipeline.py | 1 | 2863 | import filecmp
import os
import unittest
from mascpcr import pipeline, genbankfeatures
from ._common import RECODED_GENOME_FP, REFERENCE_GENOME_FP, TEST_OUTPUT_DIR, \
TEST_CACHE_DIR, REFERENCE_GB_STR, RECODED_GB_STR, \
REFERENCE_GB, RECODED_GB
class TestPipeline(unittest.TestCase):
def test_findMascPrimers(self):
start_idx, end_idx = genbankfeatures.findAggregateBoundaries(
# `sr_obj` expects a SeqRecord object
sr_obj=RECODED_GB,
# These are the features types that we want to look for
feature_types=['synth_fragment'],
# We will look up the qualifier 'label' in each Feature object and check
# to make sure that the regular expression "seg23.*" matches its contents
# (regex refresher: seg23.* will match "seg23" followed by any characters,
# e.g., seg23_001)
qualifier_regexs={'label':'seg23.*'}
)
genome_str, ref_genome_str, idx_lut, edge_lut, mismatch_lut, \
border_lut = pipeline.generateLUTs(
genome_fp=RECODED_GENOME_FP,
ref_genome_fp=REFERENCE_GENOME_FP,
start_idx=start_idx,
end_idx=end_idx,
border_feature_types=['synth_fragment'],
cache_luts=True,
cache_dir=TEST_CACHE_DIR
)
# We have to prevent the output file from containing the parameters
# as it will dump the absolute filepaths, which makes file comparisons
# more difficult
params = {
'dump_params': False,
'output_fp': TEST_CACHE_DIR,
'output_basename': 'seg23'
}
pipeline.findMascPrimers(
idx_lut=idx_lut,
genome_str=RECODED_GB_STR,
ref_genome_str=REFERENCE_GB_STR,
start_idx=start_idx,
end_idx=end_idx,
edge_lut=edge_lut,
mismatch_lut=mismatch_lut,
border_lut=border_lut,
params=params
)
# Now compare the output files to the expected output files
output_report_fp = os.path.join(TEST_CACHE_DIR,
'seg23_masc_report.csv')
check_output_report_fp = os.path.join(TEST_OUTPUT_DIR,
'seg23_masc_report.csv')
# NOTE: Uncomment the following code to re-create expected output.
# If code breaks, you should figure out whether there is really a bug
# before uncommenting the following and changing the expected output.
# TODO: Come up with more robust test strategy.
# import shutil
# shutil.copyfile(output_report_fp, check_output_report_fp)
self.assertTrue(filecmp.cmp(output_report_fp, check_output_report_fp))
| gpl-2.0 | 6,397,947,026,232,318,000 | 40.492754 | 86 | 0.5854 | false |
meatough/Marks-Programs | cs 2430/Assignment12 Germany Trip/TrainObjects.py | 1 | 13138 | '''
***********************************************************
* Discrete Structures
* Trip Through Germany Program
* Programmer: Mark Eatough
* Course: CSIS 2430
* Created Novermber 3, 2013
***********************************************************
'''
#database called GermanyDB for all objects
#table called train in GermanyDB
#create all needed train objects
#all flights found via http://www.raileurope.com/train-faq/european-trains/ice/how-to-book.h...
#with october 18 as travel date in economy class, prices appeared to be given in dollars
from TrainClasses import*
import MySQLdb as mdb
import sys
trainList = []
rostockToHamburg = TrainTravel("Rostock", "Hamburg", Time(1,47), 82)
print "\n\nTrain trip from Rostock to Hamburg\n"
rostockToHamburg.displayTrainTrip()
trainList.append(rostockToHamburg)
rostockToLubeck = TrainTravel("Rostock", "Lubeck", Time(2,32), 108)
print "\n\nTrain trip from Rostock to Hamburg\n"
rostockToLubeck.displayTrainTrip()
trainList.append(rostockToLubeck)
hamburgToLubeck = TrainTravel("Hamburg", "Lubeck", Time(0,45), 26)
print "\n\nTrain trip from Hamburg to Lubeck\n"
hamburgToLubeck.displayTrainTrip()
trainList.append(hamburgToLubeck)
wiesbadenToKassel = TrainTravel("Wiesbaden", "Kassel", Time(1,18), 104)
print "\n\n Train trip from Munich to Nuremburg\n"
#munichToNuremburg.displayTrainTrip()
trainList.append(wiesbadenToKassel)
# hannoverToRostock = TrainTravel("Hannover", "Rostock", Time(2,05), 104)
# print "\n\n Train trip from Munich to Nuremburg\n"
# #munichToNuremburg.displayTrainTrip()
# trainList.append(hannoverToRostock)
hamburgToBerlin = TrainTravel("Hamburg", "Berlin", Time(1,51), 109)
print "\n\nTrain trip from Hamburg to Berlin\n"
hamburgToBerlin.displayTrainTrip()
trainList.append(hamburgToBerlin)
# rostockToBerlin = TrainTravel("Rostock", "Berlin", Time(4,47), 234)
# print "\n\nTrain trip from Hamburg to Berlin\n"
# hamburgToBerlin.displayTrainTrip()
# trainList.append(rostockToBerlin)
#
# lubeckToBerlin = TrainTravel("Lubeck", "Berlin", Time(2,17), 154)
# print "\n\nTrain trip from Hamburg to Berlin\n"
# hamburgToBerlin.displayTrainTrip()
# trainList.append(lubeckToBerlin)
#
# bremenToBerlin = TrainTravel("Bremen", "Berlin", Time(3,0), 152)
# print "\n\nTrain trip from Hamburg to Berlin\n"
# hamburgToBerlin.displayTrainTrip()
# trainList.append(bremenToBerlin)
#
# hannoverToBerlin = TrainTravel("Hannover", "Berlin", Time(2,11), 183)
# print "\n\nTrain trip from Hamburg to Berlin\n"
# hamburgToBerlin.displayTrainTrip()
# trainList.append(hannoverToBerlin)
hamburgToBremen = TrainTravel("Hamburg", "Bremen", Time(1,9), 43)
print "\n\nTrain trip from Hamburg to Bremen\n"
hamburgToBremen.displayTrainTrip()
trainList.append(hamburgToBremen)
rostockToBremen = TrainTravel("Rostock", "Bremen", Time(2,56), 125)
trainList.append(rostockToBremen)
bremenToLubeck = TrainTravel("Bremen", "Lubeck", Time(1,54), 69)
print "\n\nTrain trip from Rostock to Hamburg\n"
bremenToLubeck.displayTrainTrip()
trainList.append(bremenToLubeck)
hamburgToHannover = TrainTravel("Hamburg", "Hannover", Time(1,20), 78)
print "\n\nTrain trip from Hamburg to Hannover\n"
hamburgToHannover.displayTrainTrip()
trainList.append(hamburgToHannover)
hannoverToKassel = TrainTravel("Hannover", "Kassel", Time(0,55), 65)
print "\n\nTrain trip from Hannover to Kassel\n"
hannoverToKassel.displayTrainTrip()
trainList.append(hannoverToKassel)
hannoverToDusseldorf = TrainTravel("Hannover", "Dusseldorf", Time(2,26), 101)
print "\n\nTrain trip from Hannover to Kassel\n"
hannoverToDusseldorf.displayTrainTrip()
trainList.append(hannoverToDusseldorf)
kasselToFrankfurt = TrainTravel("Kassel", "Frankfurt", Time(1,32), 75)
print "\n\nTrain trip from Kassel to Frankfurt\n"
kasselToFrankfurt.displayTrainTrip()
trainList.append(kasselToFrankfurt)
dusseldorfToKoln = TrainTravel("Dusseldorf", "Koln", Time(0,24), 26)
print "\n\nTrain trip from Dusseldorf to Koln\n"
dusseldorfToKoln.displayTrainTrip()
trainList.append(dusseldorfToKoln)
dusseldorfToBonn = TrainTravel("Dusseldorf", "Bonn", Time(0,47), 48)
print "\n\nTrain trip from Dusseldorf to Bonn\n"
dusseldorfToBonn.displayTrainTrip()
trainList.append(dusseldorfToBonn)
wiesbadenToMannhiem = TrainTravel("Wiesbaden", "Mannhiem", Time(1,27), 83)
print "\n\nTrain trip from Wiesbaden to Mannhiem\n"
wiesbadenToMannhiem.displayTrainTrip()
trainList.append(wiesbadenToMannhiem)
kolnToBonn = TrainTravel("Koln", "Bonn", Time(0, 23), 22)
print "\n\nTrain trip from Koln to Bonn\n"
kolnToBonn.displayTrainTrip()
trainList.append(kolnToBonn)
kolnToFrankfurt = TrainTravel("Koln", "Frankfurt", Time(1,05), 112)
print "\n\n Train trip from Koln to Frankfurt\n"
kolnToFrankfurt.displayTrainTrip()
trainList.append(kolnToFrankfurt)
bonnToFrankfurt = TrainTravel("Bonn", "Frankfurt", Time(1,58), 72)
print "\n\n Train trip from Bonn to Frankfurt\n"
bonnToFrankfurt.displayTrainTrip()
trainList.append(bonnToFrankfurt)
frankfurtToWiesbaden = TrainTravel("Frankfurt", "Wiesbaden", Time(0,49), 29)
print "\n\n Train trip from Frankfurt to Wiesbaden\n"
frankfurtToWiesbaden.displayTrainTrip()
trainList.append(frankfurtToWiesbaden)
# wiesbadenToStuttgart = TrainTravel("Wiesbaden", "Stuttgart", Time(2,23), 130)
# print "\n\n Train trip from Frankfurt to Stuggart\n"
# #frankfurtToStuttgart.displayTrainTrip()
# trainList.append(wiesbadenToStuttgart)
frankfurtToMannhiem = TrainTravel("Frankfurt", "Mannhiem", Time(0,38), 54)
print "\n\n Train trip from Frankfurt to Mannhiem\n"
frankfurtToMannhiem.displayTrainTrip()
trainList.append(frankfurtToMannhiem)
frankfurtToKarlsruhe = TrainTravel("Frankfurt", "Karlsruhe", Time(1,3), 69)
print "\n\n Train trip from Frankfurt to Karlsruhe\n"
frankfurtToKarlsruhe.displayTrainTrip()
trainList.append(frankfurtToKarlsruhe)
# badenbadenToStuttgart = TrainTravel("Baden Baden", "Stuttgart", Time(2,53), 179)
# print "\n\n Train trip from Frankfurt to Stuggart\n"
# #frankfurtToStuttgart.displayTrainTrip()
# trainList.append(badenbadenToStuttgart)
frankfurtToBadenBaden = TrainTravel("Frankfurt", "Baden Baden", Time(1,19), 78)
print "\n\n Train trip from Frankfurt to Baden Baden\n"
frankfurtToBadenBaden.displayTrainTrip()
trainList.append(frankfurtToBadenBaden)
frankfurtToStuttgart = TrainTravel("Frankfurt", "Stuttgart", Time(1,34), 101)
print "\n\n Train trip from Frankfurt to Stuggart\n"
frankfurtToStuttgart.displayTrainTrip()
trainList.append(frankfurtToStuttgart)
frankfurtToNuremburg = TrainTravel("Frankfurt", "Nurnberg", Time(2,06), 89)
print "\n\n Train trip from Frankfurt to Nuremburg\n"
frankfurtToNuremburg.displayTrainTrip()
trainList.append(frankfurtToNuremburg)
# kasselToStuttgart = TrainTravel("Kassel", "Stuttgart", Time(2,06), 176)
# print "\n\n Train trip from Frankfurt to Stuggart\n"
# frankfurtToStuttgart.displayTrainTrip()
# trainList.append(kasselToStuttgart)
#
# kolnToStuttgart = TrainTravel("Koln", "Stuttgart", Time(2,39), 213)
# print "\n\n Train trip from Frankfurt to Stuggart\n"
# frankfurtToStuttgart.displayTrainTrip()
# trainList.append(kolnToStuttgart)
#
# bonnToStuttgart = TrainTravel("Bonn", "Stuttgart", Time(3,32), 173)
# print "\n\n Train trip from Frankfurt to Stuggart\n"
# frankfurtToStuttgart.displayTrainTrip()
# trainList.append(bonnToStuttgart)
#
# mannhiemToStuttgart = TrainTravel("Mannhiem", "Stuttgart", Time(0,38), 55)
# print "\n\n Train trip from Mannhiem to Stuttgart\n"
# mannhiemToStuttgart.displayTrainTrip()
# trainList.append(mannhiemToStuttgart)
mannhiemToKarlsruhe = TrainTravel("Mannhiem", "Karlsruhe", Time(1,07), 32)
print "\n\n Train trip from Mannhiem to Karlsruhe\n"
mannhiemToKarlsruhe.displayTrainTrip()
trainList.append(mannhiemToKarlsruhe)
mannhiemToBadenBaden = TrainTravel("Mannhiem", "Baden Baden", Time(0,39), 54)
print "\n\n Train trip from Mannhiem to Baden Baden\n"
mannhiemToBadenBaden.displayTrainTrip()
trainList.append(mannhiemToBadenBaden)
karlsruheToBadenBaden = TrainTravel("Karlsruhe", "Baden Baden", Time(0,20), 23)
print "\n\n Train trip from Karlsruhe to Baden Baden\n"
karlsruheToBadenBaden.displayTrainTrip()
trainList.append(karlsruheToBadenBaden)
karlsruheToStuttgart = TrainTravel("Karlsruhe", "Stuttgart", Time(0,55), 36)
print "\n\n Train trip from Karlsruhe to Stuttgart\n"
karlsruheToStuttgart.displayTrainTrip()
trainList.append(karlsruheToStuttgart)
basilToKarlsruhe = TrainTravel("Basil", "Karlsruhe", Time(1,48), 100)
print "\n\nTrain trip from Dusseldorf to Koln\n"
dusseldorfToKoln.displayTrainTrip()
trainList.append(basilToKarlsruhe)
basilToMannhiem = TrainTravel("Basil", "Mannhiem", Time(2,7), 131)
print "\n\nTrain trip from Dusseldorf to Koln\n"
dusseldorfToKoln.displayTrainTrip()
trainList.append(basilToMannhiem)
badenBadenToBasil = TrainTravel("Baden Baden", "Basil", Time(1,28), 77)
print "\n\n Train trip from Baden Baden to basil\n"
badenBadenToBasil.displayTrainTrip()
trainList.append(badenBadenToBasil)
stuttgartToNuremburg = TrainTravel("Stuttgart", "Nurnberg", Time(2,11), 59)
print "\n\n Train trip from Stuttgart to Nuremburg\n"
stuttgartToNuremburg.displayTrainTrip()
trainList.append(stuttgartToNuremburg)
stuttgartToMunich = TrainTravel("Stuttgart", "Munich", Time(2,15), 92)
print "\n\n Train trip from Stuttgart to Munich\n"
stuttgartToMunich.displayTrainTrip()
trainList.append(stuttgartToMunich)
munichToNuremburg = TrainTravel("Munich", "Nurnberg", Time(1,14), 91)
print "\n\n Train trip from Munich to Nuremburg\n"
munichToNuremburg.displayTrainTrip()
trainList.append(munichToNuremburg)
# munichToDresden = TrainTravel("Munich", "Dresden", Time(5,28), 204)
# print "\n\n Train trip from Munich to Nuremburg\n"
# munichToNuremburg.displayTrainTrip()
# trainList.append(munichToDresden)
munichToFrankfurt = TrainTravel("Munich", "Frankfurt", Time(3,49), 192)
print "\n\n Train trip from Munich to Nuremburg\n"
munichToNuremburg.displayTrainTrip()
trainList.append(munichToFrankfurt)
nuremburgToDresden = TrainTravel("Nurnberg", "Dresden", Time(4,14), 113)
print "\n\n Train trip from Nuremburg to Dresden\n"
nuremburgToDresden.displayTrainTrip()
trainList.append(nuremburgToDresden)
nuremburgToLeipzig = TrainTravel("Nurnberg", "Leipzig", Time(3,36), 136)
print "\n\n Train trip from Nuremburg to Dresden\n"
nuremburgToLeipzig.displayTrainTrip()
trainList.append(nuremburgToLeipzig)
munichToLeipzig = TrainTravel("Munich", "Leipzig", Time(2,51), 134)
print "\n\n Train trip from Munich to Nuremburg\n"
munichToNuremburg.displayTrainTrip()
trainList.append(munichToLeipzig)
dresdenToLeipzig = TrainTravel("Dresden", "Leipzig", Time(1,37), 43)
print "\n\n Train trip from Dresden to Leipzig\n"
dresdenToLeipzig.displayTrainTrip()
trainList.append(dresdenToLeipzig)
dresdenToBerlin = TrainTravel("Dresden", "Berlin", Time(2,10), 76)
print "\n\n Train trip from Dresden to Berlin\n"
dresdenToBerlin.displayTrainTrip()
trainList.append(dresdenToBerlin)
leipzigToBerlin = TrainTravel("Leipzig", "Berlin", Time(1,8), 68)
print "\n\n Train trip from Leipzig to Berlin\n"
leipzigToBerlin.displayTrainTrip()
trainList.append(leipzigToBerlin)
kolnToStAugustin = TaxiTravel("Koln", "St. Augustin", 9.18)
trainList.append(kolnToStAugustin)
bonnToStAugustin = TaxiTravel("Bonn", "St. Augustin", 32.25)
trainList.append(bonnToStAugustin)
dusseldorfToStAugustin = TaxiTravel("Dusseldorf", "St. Augustin", 69.9)
trainList.append(dusseldorfToStAugustin)
wiesbadenToStAugustin = TaxiTravel("Wiesbaden", "St. Augustin", 135.9)
trainList.append(wiesbadenToStAugustin)
kolnToCastle = TaxiTravel("Koln", "Castle", 10.0)
#trainList.append(kolnToCastle)
headers = ("CREATE TABLE Train(Id INT PRIMARY KEY AUTO_INCREMENT,"
"StartCity VARCHAR(25), EndCity VARCHAR(25), Time VARCHAR(25),"
"Dollars VARCHAR(25), Euros VARCHAR(25), Trans VARCHAR(25))")
connection = mdb.connect('localhost', 'Mark', 'test623', 'GermanyDB');
with connection:
cur = connection.cursor()
cur.execute("DROP TABLE IF EXISTS Train")
cur.execute(headers)
i = 1
for t in trainList:
cur.execute("INSERT INTO Train(StartCity) VALUES(%s)", t.cityFrom)
cur.execute("UPDATE Train SET EndCity = %s WHERE Id = %s",
(t.cityTo, i))
cur.execute("UPDATE Train SET Time = %s WHERE Id = %s",
(Time.timeToMinutes(t.time), i))
cur.execute("UPDATE Train SET Dollars = %s WHERE Id = %s",
(t.dollars.dollars, i))
cur.execute("UPDATE Train SET Euros = %s WHERE Id = %s",
(t.euros.euros, i))
cur.execute("UPDATE Train SET Trans = %s WHERE Id = %s",
(t.travelBy, i))
i+=1
cur.execute("INSERT INTO Train(StartCity) VALUES(%s)", t.cityTo)
cur.execute("UPDATE Train SET EndCity = %s WHERE Id = %s",
(t.cityFrom, i))
cur.execute("UPDATE Train SET Time = %s WHERE Id = %s",
(Time.timeToMinutes(t.time), i))
cur.execute("UPDATE Train SET Dollars = %s WHERE Id = %s",
(t.dollars.dollars, i))
cur.execute("UPDATE Train SET Euros = %s WHERE Id = %s",
(t.euros.euros, i))
cur.execute("UPDATE Train SET Trans = %s WHERE Id = %s",
(t.travelBy, i))
i+=1 | gpl-3.0 | -2,412,943,273,274,023,000 | 38.575301 | 96 | 0.751484 | false |
avocado-framework-tests/avocado-misc-tests | memory/pmem_dm.py | 4 | 9214 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2020 IBM
# Author: Harish <[email protected]>
"""
Ndctl user space tooling for Linux, which handles NVDIMM devices.
This Suite works with various options of ndctl on a NVDIMM device.
"""
import os
import avocado
from avocado import Test
from avocado.utils import process, archive, distro, build
from avocado.utils import genio, pmem, disk, memory, partition
from avocado.utils.software_manager import SoftwareManager
class PmemDeviceMapper(Test):
"""
Ndctl user space tooling for Linux, which handles NVDIMM devices.
"""
def get_size_alignval(self):
"""
Return the size align restriction based on platform
"""
if not os.path.exists("/sys/bus/nd/devices/region0/align"):
self.cancel("Test cannot execute without the size alignment value")
return int(genio.read_one_line("/sys/bus/nd/devices/region0/align"), 16)
def build_fio(self):
"""
Install fio or build if not possible
"""
pkg = "fio"
if process.system("which %s" % pkg, ignore_status=True):
if not self.smm.check_installed(pkg) \
and not self.smm.install(pkg):
for package in ["autoconf", "libtool", "make"]:
if not self.smm.check_installed(package) \
and not self.smm.install(package):
self.cancel(
"Fail to install %s required for this test."
"" % package)
url = self.params.get("fio_url", default="http://brick.kernel"
".dk/snaps/fio-2.1.10"
".tar.gz")
tarball = self.fetch_asset(url)
archive.extract(tarball, self.teststmpdir)
fio_version = os.path.basename(tarball.split('.tar.')[0])
sourcedir = os.path.join(self.teststmpdir, fio_version)
build.make(sourcedir)
return os.path.join(sourcedir, "fio")
return pkg
def setUp(self):
"""
Build 'ndctl' and setup the binary.
"""
deps = []
self.dist = distro.detect()
package = self.params.get('package', default='distro')
self.preserve_dm = self.params.get('preserve_dm', default=False)
if self.dist.name not in ['SuSE', 'rhel']:
self.cancel('Unsupported OS %s' % self.dist.name)
self.smm = SoftwareManager()
if package == 'upstream':
deps.extend(['gcc', 'make', 'automake',
'autoconf', 'device-mapper'])
if self.dist.name == 'SuSE':
deps.extend(['libtool',
'libkmod-devel', 'libudev-devel', 'systemd-devel',
'libuuid-devel-static', 'libjson-c-devel',
'keyutils-devel', 'kmod-bash-completion'])
elif self.dist.name == 'rhel':
deps.extend(['libtool',
'kmod-devel', 'libuuid-devel', 'json-c-devel',
'systemd-devel', 'keyutils-libs-devel', 'jq',
'parted', 'libtool'])
for pkg in deps:
if not self.smm.check_installed(pkg) and not \
self.smm.install(pkg):
self.cancel('%s is needed for the test to be run' % pkg)
locations = ["https://github.com/pmem/ndctl/archive/master.zip"]
tarball = self.fetch_asset("ndctl.zip", locations=locations,
expire='7d')
archive.extract(tarball, self.teststmpdir)
os.chdir("%s/ndctl-master" % self.teststmpdir)
process.run('./autogen.sh', sudo=True, shell=True)
process.run("./configure CFLAGS='-g -O2' --prefix=/usr "
"--disable-docs "
"--sysconfdir=/etc --libdir="
"/usr/lib64", shell=True, sudo=True)
build.make(".")
self.ndctl = os.path.abspath('./ndctl/ndctl')
self.daxctl = os.path.abspath('./daxctl/daxctl')
else:
deps.extend(['ndctl'])
if self.dist.name == 'rhel':
deps.extend(['daxctl'])
for pkg in deps:
if not self.smm.check_installed(pkg) and not \
self.smm.install(pkg):
self.cancel('%s is needed for the test to be run' % pkg)
self.ndctl = 'ndctl'
self.daxctl = 'daxctl'
self.plib = pmem.PMem(self.ndctl, self.daxctl)
if not self.plib.check_buses():
self.cancel("Test needs atleast one region")
@avocado.fail_on(pmem.PMemException)
def test(self):
self.plib.enable_region()
regions = self.plib.run_ndctl_list('-R')
self.plib.destroy_namespace(force=True)
region = self.plib.run_ndctl_list_val(regions[0], 'dev')
split = self.params.get('split_ns', default=False)
if len(regions) == 1:
if self.plib.is_region_legacy(region):
self.cancel("Cannot create DM with single pmem device")
if not split:
self.cancel("Cannot run test without split option enabled")
if split:
if self.plib.is_region_legacy(region):
self.cancel("Cannot split pmem device on legacy hardware")
size_align = self.get_size_alignval()
self.log.info("Creating namespace with existing regions")
for reg_json in regions:
region = self.plib.run_ndctl_list_val(reg_json, 'dev')
slot_count = self.plib.get_slot_count(region)
reg_size = self.plib.run_ndctl_list_val(
self.plib.run_ndctl_list('-r %s' % region)[0], 'size')
namespace_size = reg_size // slot_count
# Now align the namespace size
namespace_size = (namespace_size //
size_align) * size_align
if namespace_size <= size_align:
self.log.warn("Skipping namespace size less than pagesize")
continue
for _ in range(0, slot_count):
self.plib.create_namespace(
region=region, size=namespace_size)
else:
self.log.info("Creating namespace with full size")
for reg_json in regions:
region = self.plib.run_ndctl_list_val(reg_json, 'dev')
self.plib.create_namespace(region=region)
devices = self.plib.run_ndctl_list('-N')
blk_cmd = ""
bdev = None
blk_size1 = 0
for cnt, dev in enumerate(devices):
bdev = self.plib.run_ndctl_list_val(dev, 'blockdev')
bdev = "/dev/%s" % bdev
blk_size2 = process.system_output(
"blockdev --getsz %s" % bdev).decode()
blk_cmd += ' %s %s linear %s 0 "\\\\n"' % (
blk_size1, blk_size2, bdev)
blk_size1 += int(blk_size2)
if cnt == len(devices) - 1:
break
dm_cmd = 'echo -e "%s" | dmsetup create linear-pmem' % blk_cmd
if process.system(dm_cmd, shell=True, sudo=True, ignore_status=True):
self.fail("Creating DM failed")
self.log.info("Running FIO on device-mapper")
dm_disk = "/dev/mapper/linear-pmem"
self.part = partition.Partition(dm_disk)
self.part.mkfs(fstype='xfs', args='-b size=%s -s size=512 -m reflink=0' %
memory.get_page_size())
mnt_path = self.params.get('mnt_point', default='/pmem')
if not os.path.exists(mnt_path):
os.makedirs(mnt_path)
self.part.mount(mountpoint=mnt_path, args='-o dax')
self.log.info("Test will run on %s", mnt_path)
fio_job = self.params.get('fio_job', default='ndctl-fio.job')
size = disk.freespace(mnt_path) * 0.9
cmd = '%s --directory %s --filename mmap-pmem --size %s %s' % (
self.build_fio(), mnt_path, size, self.get_data(fio_job))
if process.system(cmd, ignore_status=True):
self.fail("FIO mmap workload on fsdax failed")
@avocado.fail_on(pmem.PMemException)
def tearDown(self):
self.part.unmount()
if not self.preserve_dm:
process.system('dmsetup remove linear-pmem',
sudo=True, ignore_status=True)
self.plib.destroy_namespace(force=True)
self.plib.disable_region()
| gpl-2.0 | 5,300,290,207,276,827,000 | 42.87619 | 81 | 0.543412 | false |
losnikitos/googleads-python-lib | examples/dfp/v201508/creative_template_service/get_all_creative_templates.py | 4 | 1984 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all creative templates.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
creative_template_service = client.GetService(
'CreativeTemplateService', version='v201508')
# Create a filter statement.
statement = dfp.FilterStatement()
# Get creative templates by statement.
while True:
response = creative_template_service.getCreativeTemplatesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for template in response['results']:
print ('Creative template with id \'%s\', name \'%s\', and type \'%s\' '
'was found.' % (template['id'],
template['name'],
template['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 | 5,598,378,245,823,768,000 | 32.627119 | 80 | 0.696573 | false |
Songbee/beerializer | setup.py | 1 | 1763 | """
A lightweight library for serialization of arbitrary Python objects into dicts.
"""
import re
from setuptools import setup
def find_version(fname):
"""
Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
"""
version = ""
with open(fname, "r") as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError("Cannot find version information")
return version
setup(
name="beerializer",
author="Alexander Pushkov",
author_email="[email protected]",
url="https://beerializer.songbee.net/",
version=find_version("beerializer/__init__.py"),
description=__doc__.replace("\n", " ").strip(),
long_description=open("README.rst").read(),
keywords=[
"serialization", "rest", "json", "api", "marshal",
"marshalling", "deserialization", "validation", "schema"
],
packages=["beerializer"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: Public Domain",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries",
"Topic :: Software Development",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
]
)
| unlicense | 6,408,753,673,676,982,000 | 31.648148 | 82 | 0.581395 | false |
ccastell/Transfer-System | Website/env/lib/python3.5/site-packages/django/db/migrations/writer.py | 58 | 11569 | from __future__ import unicode_literals
import os
import re
from importlib import import_module
from django import get_version
from django.apps import apps
from django.db import migrations
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.serializer import serializer_factory
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.inspect import get_func_args
from django.utils.module_loading import module_dir
from django.utils.timezone import now
try:
import enum
except ImportError:
# No support on Python 2 if enum34 isn't installed.
enum = None
class SettingsReference(str):
"""
Special subclass of string which actually references a current settings
value. It's treated as the value in memory, but serializes out to a
settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class OperationWriter(object):
def __init__(self, operation, indentation=2):
self.operation = operation
self.buff = []
self.indentation = indentation
def serialize(self):
def _write(_arg_name, _arg_value):
if (_arg_name in self.operation.serialization_expand_args and
isinstance(_arg_value, (list, tuple, dict))):
if isinstance(_arg_value, dict):
self.feed('%s={' % _arg_name)
self.indent()
for key, value in _arg_value.items():
key_string, key_imports = MigrationWriter.serialize(key)
arg_string, arg_imports = MigrationWriter.serialize(value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed('%s: %s' % (key_string, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s: %s,' % (key_string, arg_string))
imports.update(key_imports)
imports.update(arg_imports)
self.unindent()
self.feed('},')
else:
self.feed('%s=[' % _arg_name)
self.indent()
for item in _arg_value:
arg_string, arg_imports = MigrationWriter.serialize(item)
args = arg_string.splitlines()
if len(args) > 1:
for arg in args[:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s,' % arg_string)
imports.update(arg_imports)
self.unindent()
self.feed('],')
else:
arg_string, arg_imports = MigrationWriter.serialize(_arg_value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed('%s=%s' % (_arg_name, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s=%s,' % (_arg_name, arg_string))
imports.update(arg_imports)
imports = set()
name, args, kwargs = self.operation.deconstruct()
operation_args = get_func_args(self.operation.__init__)
# See if this operation is in django.db.migrations. If it is,
# We can just use the fact we already have that imported,
# otherwise, we need to add an import for the operation class.
if getattr(migrations, name, None) == self.operation.__class__:
self.feed('migrations.%s(' % name)
else:
imports.add('import %s' % (self.operation.__class__.__module__))
self.feed('%s.%s(' % (self.operation.__class__.__module__, name))
self.indent()
for i, arg in enumerate(args):
arg_value = arg
arg_name = operation_args[i]
_write(arg_name, arg_value)
i = len(args)
# Only iterate over remaining arguments
for arg_name in operation_args[i:]:
if arg_name in kwargs: # Don't sort to maintain signature order
arg_value = kwargs[arg_name]
_write(arg_name, arg_value)
self.unindent()
self.feed('),')
return self.render(), imports
def indent(self):
self.indentation += 1
def unindent(self):
self.indentation -= 1
def feed(self, line):
self.buff.append(' ' * (self.indentation * 4) + line)
def render(self):
return '\n'.join(self.buff)
class MigrationWriter(object):
"""
Takes a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
self.needs_manual_porting = False
def as_string(self):
"""
Returns a string of the file contents.
"""
items = {
"replaces_str": "",
"initial_str": "",
}
imports = set()
# Deconstruct operations
operations = []
for operation in self.migration.operations:
operation_string, operation_imports = OperationWriter(operation).serialize()
imports.update(operation_imports)
operations.append(operation_string)
items["operations"] = "\n".join(operations) + "\n" if operations else ""
# Format dependencies and write out swappable dependencies right
dependencies = []
for dependency in self.migration.dependencies:
if dependency[0] == "__setting__":
dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1])
imports.add("from django.conf import settings")
else:
# No need to output bytestrings for dependencies
dependency = tuple(force_text(s) for s in dependency)
dependencies.append(" %s," % self.serialize(dependency)[0])
items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else ""
# Format imports nicely, swapping imports of functions from migration files
# for comments
migration_imports = set()
for line in list(imports):
if re.match(r"^import (.*)\.\d+[^\s]*$", line):
migration_imports.add(line.split("import")[1].strip())
imports.remove(line)
self.needs_manual_porting = True
# django.db.migrations is always used, but models import may not be.
# If models import exists, merge it with migrations import.
if "from django.db import models" in imports:
imports.discard("from django.db import models")
imports.add("from django.db import migrations, models")
else:
imports.add("from django.db import migrations")
# Sort imports by the package / module to be imported (the part after
# "from" in "from ... import ..." or after "import" in "import ...").
sorted_imports = sorted(imports, key=lambda i: i.split()[1])
items["imports"] = "\n".join(sorted_imports) + "\n" if imports else ""
if migration_imports:
items["imports"] += (
"\n\n# Functions from the following migrations need manual "
"copying.\n# Move them and any dependencies into this file, "
"then update the\n# RunPython operations to refer to the local "
"versions:\n# %s"
) % "\n# ".join(sorted(migration_imports))
# If there's a replaces, make a string for it
if self.migration.replaces:
items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0]
# Hinting that goes into comment
items.update(
version=get_version(),
timestamp=now().strftime("%Y-%m-%d %H:%M"),
)
if self.migration.initial:
items['initial_str'] = "\n initial = True\n"
return MIGRATION_TEMPLATE % items
@property
def basedir(self):
migrations_package_name, _ = MigrationLoader.migrations_module(self.migration.app_label)
if migrations_package_name is None:
raise ValueError(
"Django can't create migrations for app '%s' because "
"migrations have been disabled via the MIGRATION_MODULES "
"setting." % self.migration.app_label
)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
except ImportError:
pass
else:
try:
return upath(module_dir(migrations_module))
except ValueError:
pass
# Alright, see if it's a direct submodule of the app
app_config = apps.get_app_config(self.migration.app_label)
maybe_app_name, _, migrations_package_basename = migrations_package_name.rpartition(".")
if app_config.name == maybe_app_name:
return os.path.join(app_config.path, migrations_package_basename)
# In case of using MIGRATION_MODULES setting and the custom package
# doesn't exist, create one, starting from an existing package
existing_dirs, missing_dirs = migrations_package_name.split("."), []
while existing_dirs:
missing_dirs.insert(0, existing_dirs.pop(-1))
try:
base_module = import_module(".".join(existing_dirs))
except ImportError:
continue
else:
try:
base_dir = upath(module_dir(base_module))
except ValueError:
continue
else:
break
else:
raise ValueError(
"Could not locate an appropriate location to create "
"migrations package %s. Make sure the toplevel "
"package exists and can be imported." %
migrations_package_name)
final_dir = os.path.join(base_dir, *missing_dirs)
if not os.path.isdir(final_dir):
os.makedirs(final_dir)
for missing_dir in missing_dirs:
base_dir = os.path.join(base_dir, missing_dir)
with open(os.path.join(base_dir, "__init__.py"), "w"):
pass
return final_dir
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
return os.path.join(self.basedir, self.filename)
@classmethod
def serialize(cls, value):
return serializer_factory(value).serialize()
MIGRATION_TEMPLATE = """\
# -*- coding: utf-8 -*-
# Generated by Django %(version)s on %(timestamp)s
from __future__ import unicode_literals
%(imports)s
class Migration(migrations.Migration):
%(replaces_str)s%(initial_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
| apache-2.0 | -7,991,597,523,595,459,000 | 36.080128 | 108 | 0.552252 | false |
gangadhar-kadam/sms-erpnext | stock/doctype/delivery_note/delivery_note.py | 5 | 15852 | # ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cstr, flt, getdate, cint
from webnotes.model.bean import getlist
from webnotes.model.code import get_obj
from webnotes import msgprint, _
import webnotes.defaults
sql = webnotes.conn.sql
from controllers.selling_controller import SellingController
class DocType(SellingController):
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
self.tname = 'Delivery Note Item'
self.fname = 'delivery_note_details'
def validate_fiscal_year(self):
get_obj('Sales Common').validate_fiscal_year(self.doc.fiscal_year,self.doc.posting_date,'Posting Date')
def get_contact_details(self):
return get_obj('Sales Common').get_contact_details(self,0)
def get_comm_rate(self, sales_partner):
"""Get Commission rate of Sales Partner"""
return get_obj('Sales Common').get_comm_rate(sales_partner, self)
def pull_sales_order_details(self):
self.validate_prev_docname()
self.doclist = self.doc.clear_table(self.doclist,'other_charges')
if self.doc.sales_order_no:
get_obj('DocType Mapper', 'Sales Order-Delivery Note').dt_map('Sales Order', 'Delivery Note', self.doc.sales_order_no, self.doc, self.doclist, "[['Sales Order', 'Delivery Note'],['Sales Order Item', 'Delivery Note Item'],['Sales Taxes and Charges','Sales Taxes and Charges'],['Sales Team','Sales Team']]")
else:
msgprint("Please select Sales Order No. whose details need to be pulled")
return cstr(self.doc.sales_order_no)
def validate_prev_docname(self):
"""Validates that Sales Order is not pulled twice"""
for d in getlist(self.doclist, 'delivery_note_details'):
if self.doc.sales_order_no == d.prevdoc_docname:
msgprint(cstr(self.doc.sales_order_no) + " sales order details have already been pulled. ")
raise Exception, "Validation Error. "
def set_actual_qty(self):
for d in getlist(self.doclist, 'delivery_note_details'):
if d.item_code and d.warehouse:
actual_qty = sql("select actual_qty from `tabBin` where item_code = '%s' and warehouse = '%s'" % (d.item_code, d.warehouse))
d.actual_qty = actual_qty and flt(actual_qty[0][0]) or 0
def get_tc_details(self):
return get_obj('Sales Common').get_tc_details(self)
def get_item_details(self, args=None):
import json
args = args and json.loads(args) or {}
if args.get('item_code'):
return get_obj('Sales Common').get_item_details(args, self)
else:
obj = get_obj('Sales Common')
for doc in self.doclist:
if doc.fields.get('item_code'):
arg = {
'item_code':doc.fields.get('item_code'),
'expense_account':doc.fields.get('expense_account'),
'cost_center': doc.fields.get('cost_center'),
'warehouse': doc.fields.get('warehouse')};
ret = obj.get_item_defaults(arg)
for r in ret:
if not doc.fields.get(r):
doc.fields[r] = ret[r]
def get_barcode_details(self, barcode):
return get_obj('Sales Common').get_barcode_details(barcode)
def get_adj_percent(self, arg=''):
"""Re-calculates Basic Rate & amount based on Price List Selected"""
get_obj('Sales Common').get_adj_percent(self)
def get_actual_qty(self,args):
"""Get Actual Qty of item in warehouse selected"""
return get_obj('Sales Common').get_available_qty(eval(args))
def get_rate(self,arg):
return get_obj('Sales Common').get_rate(arg)
def load_default_taxes(self):
self.doclist = get_obj('Sales Common').load_default_taxes(self)
def get_other_charges(self):
"""Pull details from Sales Taxes and Charges Master"""
self.doclist = get_obj('Sales Common').get_other_charges(self)
def so_required(self):
"""check in manage account if sales order required or not"""
if webnotes.conn.get_value('Global Defaults', 'Global Defaults', 'so_required') == 'Yes':
for d in getlist(self.doclist,'delivery_note_details'):
if not d.prevdoc_docname:
msgprint("Sales Order No. required against item %s"%d.item_code)
raise Exception
def validate(self):
super(DocType, self).validate()
import utilities
utilities.validate_status(self.doc.status, ["Draft", "Submitted", "Cancelled"])
self.so_required()
self.validate_fiscal_year()
self.validate_proj_cust()
sales_com_obj = get_obj(dt = 'Sales Common')
sales_com_obj.check_stop_sales_order(self)
sales_com_obj.check_active_sales_items(self)
sales_com_obj.get_prevdoc_date(self)
self.validate_mandatory()
self.validate_reference_value()
self.validate_for_items()
self.validate_warehouse()
sales_com_obj.validate_max_discount(self, 'delivery_note_details')
sales_com_obj.get_allocated_sum(self)
sales_com_obj.check_conversion_rate(self)
# Set actual qty for each item in selected warehouse
self.update_current_stock()
self.doc.status = 'Draft'
if not self.doc.billing_status: self.doc.billing_status = 'Not Billed'
if not self.doc.installation_status: self.doc.installation_status = 'Not Installed'
def validate_mandatory(self):
if self.doc.amended_from and not self.doc.amendment_date:
msgprint("Please Enter Amendment Date")
raise Exception, "Validation Error. "
def validate_proj_cust(self):
"""check for does customer belong to same project as entered.."""
if self.doc.project_name and self.doc.customer:
res = sql("select name from `tabProject` where name = '%s' and (customer = '%s' or ifnull(customer,'')='')"%(self.doc.project_name, self.doc.customer))
if not res:
msgprint("Customer - %s does not belong to project - %s. \n\nIf you want to use project for multiple customers then please make customer details blank in project - %s."%(self.doc.customer,self.doc.project_name,self.doc.project_name))
raise Exception
def validate_reference_value(self):
"""Validate values with reference document with previous document"""
validate_ref = any([d.prevdoc_docname for d in self.doclist.get({"parentfield": self.fname})
if d.prevdoc_doctype == "Sales Order"])
if validate_ref:
get_obj('DocType Mapper', 'Sales Order-Delivery Note',
with_children = 1).validate_reference_value(self, self.doc.name)
def validate_for_items(self):
check_list, chk_dupl_itm = [], []
for d in getlist(self.doclist,'delivery_note_details'):
ch = sql("select is_stock_item from `tabItem` where name = '%s'"%d.item_code)
if d.prevdoc_doctype and d.prevdoc_detail_docname and ch and ch[0][0]=='Yes':
self.validate_items_with_prevdoc(d)
# validates whether item is not entered twice
e = [d.item_code, d.description, d.warehouse, d.prevdoc_docname or '', d.batch_no or '']
f = [d.item_code, d.description, d.prevdoc_docname or '']
if ch and ch[0][0] == 'Yes':
if e in check_list:
msgprint("Please check whether item %s has been entered twice wrongly." % d.item_code)
else:
check_list.append(e)
elif ch and ch[0][0] == 'No':
if f in chk_dupl_itm:
msgprint("Please check whether item %s has been entered twice wrongly." % d.item_code)
else:
chk_dupl_itm.append(f)
def validate_warehouse(self):
for d in self.get_item_list():
if webnotes.conn.get_value("Item", d['item_code'], "is_stock_item") == "Yes":
if not d['warehouse']:
msgprint("Please enter Warehouse for item %s as it is stock item"
% d['item_code'], raise_exception=1)
def validate_items_with_prevdoc(self, d):
"""check if same item, warehouse present in prevdoc"""
prev_item_dt = (d.prevdoc_doctype == 'Sales Order') and 'Sales Order Item' or 'Purchase Receipt Item'
data = sql("select item_code from `tab%s` where parent = '%s' and name = '%s'"\
% (prev_item_dt, d.prevdoc_docname, d.prevdoc_detail_docname))
if not data or data[0][0] != d.item_code:
msgprint("Item: %s is not matching with Sales Order: %s. Sales Order might be modified after \
fetching data from it. Please delete items and fetch again." \
% (d.item_code, d.prevdoc_docname), raise_exception=1)
def update_current_stock(self):
for d in getlist(self.doclist, 'delivery_note_details'):
bin = sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
for d in getlist(self.doclist, 'packing_details'):
bin = sql("select actual_qty, projected_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
d.projected_qty = bin and flt(bin[0]['projected_qty']) or 0
def on_update(self):
self.doclist = get_obj('Sales Common').make_packing_list(self,'delivery_note_details')
sl = get_obj('Stock Ledger')
sl.scrub_serial_nos(self)
sl.scrub_serial_nos(self, 'packing_details')
def on_submit(self):
self.validate_packed_qty()
# Check for Approving Authority
get_obj('Authorization Control').validate_approving_authority(self.doc.doctype, self.doc.company, self.doc.grand_total, self)
# validate serial no for item table (non-sales-bom item) and packing list (sales-bom item)
sl_obj = get_obj("Stock Ledger")
sl_obj.validate_serial_no(self, 'delivery_note_details')
sl_obj.validate_serial_no_warehouse(self, 'delivery_note_details')
sl_obj.validate_serial_no(self, 'packing_details')
sl_obj.validate_serial_no_warehouse(self, 'packing_details')
# update delivery details in serial no
sl_obj.update_serial_record(self, 'delivery_note_details', is_submit = 1, is_incoming = 0)
sl_obj.update_serial_record(self, 'packing_details', is_submit = 1, is_incoming = 0)
# update delivered qty in sales order
get_obj("Sales Common").update_prevdoc_detail(1,self)
# create stock ledger entry
self.update_stock_ledger(update_stock = 1)
self.credit_limit()
self.set_buying_amount()
self.make_gl_entries()
# set DN status
webnotes.conn.set(self.doc, 'status', 'Submitted')
def validate_packed_qty(self):
"""
Validate that if packed qty exists, it should be equal to qty
"""
if not any([flt(d.fields.get('packed_qty')) for d in self.doclist if
d.doctype=='Delivery Note Item']):
return
packing_error_list = []
for d in self.doclist:
if d.doctype != 'Delivery Note Item': continue
if flt(d.fields.get('qty')) != flt(d.fields.get('packed_qty')):
packing_error_list.append([
d.fields.get('item_code', ''),
d.fields.get('qty', 0),
d.fields.get('packed_qty', 0)
])
if packing_error_list:
from webnotes.utils import cstr
err_msg = "\n".join([("Item: " + d[0] + ", Qty: " + cstr(d[1]) \
+ ", Packed: " + cstr(d[2])) for d in packing_error_list])
webnotes.msgprint("Packing Error:\n" + err_msg, raise_exception=1)
def on_cancel(self):
sales_com_obj = get_obj(dt = 'Sales Common')
sales_com_obj.check_stop_sales_order(self)
self.check_next_docstatus()
# remove delivery details from serial no
sl = get_obj('Stock Ledger')
sl.update_serial_record(self, 'delivery_note_details', is_submit = 0, is_incoming = 0)
sl.update_serial_record(self, 'packing_details', is_submit = 0, is_incoming = 0)
sales_com_obj.update_prevdoc_detail(0,self)
self.update_stock_ledger(update_stock = -1)
webnotes.conn.set(self.doc, 'status', 'Cancelled')
self.cancel_packing_slips()
self.make_cancel_gl_entries()
def check_next_docstatus(self):
submit_rv = sql("select t1.name from `tabSales Invoice` t1,`tabSales Invoice Item` t2 where t1.name = t2.parent and t2.delivery_note = '%s' and t1.docstatus = 1" % (self.doc.name))
if submit_rv:
msgprint("Sales Invoice : " + cstr(submit_rv[0][0]) + " has already been submitted !")
raise Exception , "Validation Error."
submit_in = sql("select t1.name from `tabInstallation Note` t1, `tabInstallation Note Item` t2 where t1.name = t2.parent and t2.prevdoc_docname = '%s' and t1.docstatus = 1" % (self.doc.name))
if submit_in:
msgprint("Installation Note : "+cstr(submit_in[0][0]) +" has already been submitted !")
raise Exception , "Validation Error."
def cancel_packing_slips(self):
"""
Cancel submitted packing slips related to this delivery note
"""
res = webnotes.conn.sql("""SELECT name FROM `tabPacking Slip` WHERE delivery_note = %s
AND docstatus = 1""", self.doc.name)
if res:
from webnotes.model.bean import Bean
for r in res:
ps = Bean(dt='Packing Slip', dn=r[0])
ps.cancel()
webnotes.msgprint(_("Packing Slip(s) Cancelled"))
def update_stock_ledger(self, update_stock):
self.values = []
for d in self.get_item_list():
if webnotes.conn.get_value("Item", d['item_code'], "is_stock_item") == "Yes":
# this happens when item is changed from non-stock to stock item
if not d["warehouse"]:
continue
if d['reserved_qty'] < 0 :
# Reduce reserved qty from reserved warehouse mentioned in so
args = {
"item_code": d['item_code'],
"voucher_type": self.doc.doctype,
"voucher_no": self.doc.name,
"reserved_qty": flt(update_stock) * flt(d['reserved_qty']),
"posting_date": self.doc.posting_date,
"is_amended": self.doc.amended_from and 'Yes' or 'No'
}
get_obj("Warehouse", d["reserved_warehouse"]).update_bin(args)
# Reduce actual qty from warehouse
self.make_sl_entry(d, d['warehouse'], - flt(d['qty']) , 0, update_stock)
get_obj('Stock Ledger', 'Stock Ledger').update_stock(self.values)
def get_item_list(self):
return get_obj('Sales Common').get_item_list(self)
def make_sl_entry(self, d, wh, qty, in_value, update_stock):
self.values.append({
'item_code' : d['item_code'],
'warehouse' : wh,
'posting_date' : self.doc.posting_date,
'posting_time' : self.doc.posting_time,
'voucher_type' : 'Delivery Note',
'voucher_no' : self.doc.name,
'voucher_detail_no' : d['name'],
'actual_qty' : qty,
'stock_uom' : d['uom'],
'incoming_rate' : in_value,
'company' : self.doc.company,
'fiscal_year' : self.doc.fiscal_year,
'is_cancelled' : (update_stock==1) and 'No' or 'Yes',
'batch_no' : d['batch_no'],
'serial_no' : d['serial_no'],
"project" : self.doc.project_name
})
def credit_limit(self):
"""check credit limit of items in DN Detail which are not fetched from sales order"""
amount, total = 0, 0
for d in getlist(self.doclist, 'delivery_note_details'):
if not d.prevdoc_docname:
amount += d.amount
if amount != 0:
total = (amount/self.doc.net_total)*self.doc.grand_total
get_obj('Sales Common').check_credit(self, total)
def make_gl_entries(self):
if not cint(webnotes.defaults.get_global_default("auto_inventory_accounting")):
return
gl_entries = []
for item in self.doclist.get({"parentfield": "delivery_note_details"}):
self.check_expense_account(item)
if item.buying_amount:
gl_entries += self.get_gl_entries_for_stock(item.expense_account, -1*item.buying_amount,
cost_center=item.cost_center)
if gl_entries:
from accounts.general_ledger import make_gl_entries
make_gl_entries(gl_entries, cancel=(self.doc.docstatus == 2)) | agpl-3.0 | 9,202,620,955,723,405,000 | 36.566351 | 308 | 0.682248 | false |
andersk/zulip | zerver/management/commands/get_migration_status.py | 6 | 1223 | import argparse
import os
from typing import Any
from django.core.management.base import BaseCommand
from django.db import DEFAULT_DB_ALIAS
from scripts.lib.zulip_tools import get_dev_uuid_var_path
from zerver.lib.test_fixtures import get_migration_status
class Command(BaseCommand):
help = "Get status of migrations."
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"app_label", nargs="?", help="App label of an application to synchronize the state."
)
parser.add_argument(
"--database",
default=DEFAULT_DB_ALIAS,
help="Nominates a database to synchronize. " 'Defaults to the "default" database.',
)
parser.add_argument("--output", help="Path to store the status to (default to stdout).")
def handle(self, *args: Any, **options: Any) -> None:
result = get_migration_status(**options)
if options["output"] is not None:
uuid_var_path = get_dev_uuid_var_path()
path = os.path.join(uuid_var_path, options["output"])
with open(path, "w") as f:
f.write(result)
else:
self.stdout.write(result)
| apache-2.0 | -6,637,216,148,468,718,000 | 32.972222 | 96 | 0.631235 | false |
renqianluo/DLT2T | DLT2T/data_generators/algorithmic_math.py | 1 | 22201 | # coding=utf-8
# Copyright 2017 The DLT2T Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Algorithmic data generators for symbolic math tasks.
See go/symbolic-math-dataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import random
# Dependency imports
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import sympy
class ExprOp(object):
"""Represents an algebraic operation, such as '+', '-', etc."""
def __init__(self, symbol, precedence, associative=False):
"""Constructor.
Args:
symbol: The character which represents this operation, such as '+' for
addition.
precedence: Operator precedence. This will determine where parentheses
are used.
associative: If true, the order of the operands does not matter.
"""
self.symbol = symbol
self.precedence = precedence
self.associative = associative
def __str__(self):
return self.symbol
def __eq__(self, other):
return isinstance(other, ExprOp) and self.symbol == other.symbol
class ExprNode(object):
"""A node in an expression tree.
ExprNode always holds an operator. Leaves are strings.
"""
def __init__(self, left, right, op):
self.left = left
self.right = right
self.op = op
left_depth = left.depth if isinstance(left, ExprNode) else 0
right_depth = right.depth if isinstance(right, ExprNode) else 0
self.depth = max(left_depth, right_depth) + 1
def __str__(self):
left_str = str(self.left)
right_str = str(self.right)
left_use_parens = (isinstance(self.left, ExprNode) and
self.left.op.precedence < self.op.precedence)
right_use_parens = (isinstance(self.right, ExprNode) and
self.right.op.precedence <= self.op.precedence and
not (self.op.associative and self.right.op == self.op))
left_final = "(" + left_str + ")" if left_use_parens else left_str
right_final = "(" + right_str + ")" if right_use_parens else right_str
return left_final + str(self.op) + right_final
def is_in(self, expr):
"""Returns True if `expr` is a subtree."""
if expr == self:
return True
is_in_left = is_in_expr(self.left, expr)
is_in_right = is_in_expr(self.right, expr)
return is_in_left or is_in_right
def is_in_expr(expr, find):
"""Returns True if `find` is a subtree of `expr`."""
return expr == find or (isinstance(expr, ExprNode) and expr.is_in(find))
def random_expr_with_required_var(depth, required_var, optional_list, ops):
"""Generate a random expression tree with a required variable.
The required variable appears exactly once in the expression.
Args:
depth: At least one leaf will be this many levels down from the top.
required_var: A char. This char is guaranteed to be placed exactly once at
a leaf somewhere in the tree. This is the var to solve for.
optional_list: A list of chars. These chars are randomly selected as leaf
values. These are constant vars.
ops: A list of ExprOp instances.
Returns:
An ExprNode instance which is the root of the generated expression tree.
"""
if not depth:
if required_var:
return required_var
return str(optional_list[random.randrange(len(optional_list))])
max_depth_side = random.randrange(2)
other_side_depth = random.randrange(depth)
required_var_side = random.randrange(2)
left = random_expr_with_required_var(
depth - 1 if max_depth_side else other_side_depth, required_var
if required_var_side else None, optional_list, ops)
right = random_expr_with_required_var(
depth - 1 if not max_depth_side else other_side_depth, required_var
if not required_var_side else None, optional_list, ops)
op = ops[random.randrange(len(ops))]
return ExprNode(left, right, op)
def random_expr(depth, vlist, ops):
"""Generate a random expression tree.
Args:
depth: At least one leaf will be this many levels down from the top.
vlist: A list of chars. These chars are randomly selected as leaf values.
ops: A list of ExprOp instances.
Returns:
An ExprNode instance which is the root of the generated expression tree.
"""
if not depth:
return str(vlist[random.randrange(len(vlist))])
max_depth_side = random.randrange(2)
other_side_depth = random.randrange(depth)
left = random_expr(depth - 1
if max_depth_side else other_side_depth, vlist, ops)
right = random_expr(depth - 1
if not max_depth_side else other_side_depth, vlist, ops)
op = ops[random.randrange(len(ops))]
return ExprNode(left, right, op)
def algebra_inverse_solve(left, right, var, solve_ops):
"""Solves for the value of the given var in an expression.
See go/symbolic-math-dataset.
Args:
left: The root of the ExprNode tree on the left side of the equals sign.
right: The root of the ExprNode tree on the right side of the equals sign.
var: A char. The variable to solve for.
solve_ops: A dictionary with the following properties.
* For each operator in the expression, there is a rule that determines
how to cancel out a value either to the left or the right of that
operator.
* For each rule, there is an entry in the dictionary. The key is two
chars- the op char, and either 'l' or 'r' meaning rule for canceling
out the left or right sides. For example, '+l', '+r', '-l', '-r'.
* The value of each entry is a function with the following signature:
(left, right, to_tree) -> (new_from_tree, new_to_tree)
left- Expression on left side of the op.
right- Expression on the right side of the op.
to_tree- The tree on the other side of the equal sign. The canceled
out expression will be moved here.
new_from_tree- The resuling from_tree after the algebraic
manipulation.
new_to_tree- The resulting to_tree after the algebraic manipulation.
Returns:
The root of an ExprNode tree which holds the value of `var` after solving.
Raises:
ValueError: If `var` does not appear exactly once in the equation (which
includes the left and right sides).
"""
is_in_left = is_in_expr(left, var)
is_in_right = is_in_expr(right, var)
if is_in_left == is_in_right:
if is_in_left:
raise ValueError("Solve-variable '%s' is on both sides of the equation. "
"Only equations where the solve variable-appears once "
"are supported by this solver. Left: '%s', right: '%s'" %
(var, str(left), str(right)))
else:
raise ValueError("Solve-variable '%s' is not present in the equation. It "
"must appear once. Left: '%s', right: '%s'" %
(var, str(left), str(right)))
from_tree = left if is_in_left else right
to_tree = left if not is_in_left else right
while from_tree != var:
is_in_left = is_in_expr(from_tree.left, var)
is_in_right = is_in_expr(from_tree.right, var)
from_tree, to_tree = (solve_ops[str(from_tree.op)
+ ("l" if is_in_left else "r")](
from_tree.left, from_tree.right,
to_tree))
return to_tree
def format_sympy_expr(sympy_expr, functions=None):
"""Convert sympy expression into a string which can be encoded.
Args:
sympy_expr: Any sympy expression tree or string.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
Returns:
A string representation of the expression suitable for encoding as a
sequence input.
"""
if functions is None:
functions = {}
str_expr = str(sympy_expr)
result = str_expr.replace(" ", "")
for fn_name, char in six.iteritems(functions):
result = result.replace(fn_name, char)
return result
def generate_algebra_inverse_sample(vlist, ops, solve_ops, min_depth,
max_depth):
"""Randomly generate an algebra inverse dataset sample.
Given an input equation and variable, produce the expression equal to the
variable.
See go/symbolic-math-dataset.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
solve_ops: See `solve_ops` documentation in `algebra_inverse_solve`.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
Returns:
sample: String representation of the input. Will be of the form
'solve_var:left_side=right_side'.
target: String representation of the solution.
"""
side = random.randrange(2)
left_depth = random.randrange(min_depth if side else 0, max_depth + 1)
right_depth = random.randrange(min_depth if not side else 0, max_depth + 1)
var_index = random.randrange(len(vlist))
var = vlist[var_index]
consts = vlist[:var_index] + vlist[var_index + 1:]
left = random_expr_with_required_var(left_depth, var
if side else None, consts, ops)
right = random_expr_with_required_var(right_depth, var
if not side else None, consts, ops)
left_str = str(left)
right_str = str(right)
target = str(algebra_inverse_solve(left, right, var, solve_ops))
sample = "%s:%s=%s" % (var, left_str, right_str)
return sample, target
def generate_algebra_simplify_sample(vlist, ops, min_depth, max_depth):
"""Randomly generate an algebra simplify dataset sample.
Given an input expression, produce the simplified expression.
See go/symbolic-math-dataset.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
Returns:
sample: String representation of the input.
target: String representation of the solution.
"""
depth = random.randrange(min_depth, max_depth + 1)
expr = random_expr(depth, vlist, ops)
sample = str(expr)
target = format_sympy_expr(sympy.simplify(sample))
return sample, target
def generate_calculus_integrate_sample(vlist, ops, min_depth, max_depth,
functions):
"""Randomly generate a symbolic integral dataset sample.
Given an input expression, produce the indefinite integral.
See go/symbolic-math-dataset.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
Returns:
sample: String representation of the input. Will be of the form
'var:expression'.
target: String representation of the solution.
"""
var_index = random.randrange(len(vlist))
var = vlist[var_index]
consts = vlist[:var_index] + vlist[var_index + 1:]
depth = random.randrange(min_depth, max_depth + 1)
expr = random_expr_with_required_var(depth, var, consts, ops)
expr_str = str(expr)
sample = var + ":" + expr_str
target = format_sympy_expr(
sympy.integrate(expr_str, sympy.Symbol(var)), functions=functions)
return sample, target
# AlgebraConfig holds objects required to generate the algebra inverse
# dataset. See go/symbolic-math-dataset.
# vlist: Variable list. A list of chars.
# dlist: Numberical digit list. A list of chars.
# flist: List of special function names. A list of chars.
# functions: Dict of special function names. Maps human readable string names to
# single char names used in flist.
# ops: Dict mapping op symbols (chars) to ExprOp instances.
# solve_ops: Encodes rules for how to algebraicly cancel out each operation. See
# doc-string for `algebra_inverse_solve`.
# int_encoder: Function that maps a string to a list of tokens. Use this to
# encode an expression to feed into a model.
# int_decoder: Function that maps a list of tokens to a string. Use this to
# convert model input or output into a human readable string.
AlgebraConfig = namedtuple("AlgebraConfig", [
"vlist", "dlist", "flist", "functions", "ops", "solve_ops", "int_encoder",
"int_decoder"
])
def math_dataset_init(alphabet_size=26, digits=None, functions=None):
"""Initializes required objects to generate symbolic math datasets.
See go/symbolic-math-dataset.
Produces token set, ExprOp instances, solve_op dictionary, encoders, and
decoders needed to generate the algebra inverse dataset.
Args:
alphabet_size: How many possible variables there are. Max 52.
digits: How many numerical digits to encode as tokens, "0" throuh
str(digits-1), or None to encode no digits.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
WARNING, Make sure these tokens do not conflict with the list of
possible variable names.
Returns:
AlgebraConfig instance holding all the objects listed above.
Raises:
ValueError: If `alphabet_size` is not in range [2, 52].
"""
ops_list = ["+", "-", "*", "/"]
ops = {
"+": ExprOp("+", 0, True),
"-": ExprOp("-", 0, False),
"*": ExprOp("*", 1, True),
"/": ExprOp("/", 1, False)
}
solve_ops = {
"+l": lambda l, r, to: (l, ExprNode(to, r, ops["-"])),
"+r": lambda l, r, to: (r, ExprNode(to, l, ops["-"])),
"-l": lambda l, r, to: (l, ExprNode(to, r, ops["+"])),
"-r": lambda l, r, to: (r, ExprNode(l, to, ops["-"])),
"*l": lambda l, r, to: (l, ExprNode(to, r, ops["/"])),
"*r": lambda l, r, to: (r, ExprNode(to, l, ops["/"])),
"/l": lambda l, r, to: (l, ExprNode(to, r, ops["*"])),
"/r": lambda l, r, to: (r, ExprNode(l, to, ops["/"])),
}
alphabet = (
[six.int2byte(ord("a") + c).decode("utf-8") for c in range(26)] +
[six.int2byte(ord("A") + c).decode("utf-8") for c in range(26)])
if alphabet_size > 52:
raise ValueError(
"alphabet_size cannot be greater than 52. Got %s." % alphabet_size)
if alphabet_size < 2:
raise ValueError(
"alphabet_size cannot be less than 2. Got %s." % alphabet_size)
if digits is not None and not 1 <= digits <= 10:
raise ValueError("digits cannot must be between 1 and 10. Got %s." % digits)
vlist = alphabet[:alphabet_size]
if digits is not None:
dlist = [str(d) for d in xrange(digits)]
else:
dlist = []
if functions is None:
functions = {}
flist = sorted(functions.values())
pad = "_"
tokens = [pad] + [":", "(", ")", "="] + ops_list + vlist + dlist + flist
if len(tokens) != len(set(tokens)):
raise ValueError("Duplicate token. Tokens: %s" % tokens)
token_map = dict([(t, i) for i, t in enumerate(tokens)])
def int_encoder(sequence):
return [token_map[s] for s in sequence]
def int_decoder(tensor_1d):
return "".join([tokens[i] for i in tensor_1d])
return AlgebraConfig(
vlist=vlist,
dlist=dlist,
flist=flist,
functions=functions,
ops=ops,
solve_ops=solve_ops,
int_encoder=int_encoder,
int_decoder=int_decoder)
def algebra_inverse(alphabet_size=26, min_depth=0, max_depth=2,
nbr_cases=10000):
"""Generate the algebra inverse dataset.
Each sample is a symbolic math equation involving unknown variables. The
task is to solve for the given variable. The target is the resulting
expression.
Args:
alphabet_size: How many possible variables there are. Max 52.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the variable to solve for and the math
equation, and target-list is a list of tokens encoding the resulting math
expression after solving for the variable.
Raises:
ValueError: If `max_depth` < `min_depth`.
"""
if max_depth < min_depth:
raise ValueError("max_depth must be greater than or equal to min_depth. "
"Got max_depth=%s, min_depth=%s" % (max_depth, min_depth))
alg_cfg = math_dataset_init(alphabet_size)
for _ in xrange(nbr_cases):
sample, target = generate_algebra_inverse_sample(
alg_cfg.vlist,
list(alg_cfg.ops.values()), alg_cfg.solve_ops, min_depth, max_depth)
yield {
"inputs": alg_cfg.int_encoder(sample),
"targets": alg_cfg.int_encoder(target)
}
def algebra_simplify(alphabet_size=26,
min_depth=0,
max_depth=2,
nbr_cases=10000):
"""Generate the algebra simplify dataset.
Each sample is a symbolic math expression involving unknown variables. The
task is to simplify the expression. The target is the resulting expression.
Args:
alphabet_size: How many possible variables there are. Max 52.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the expression to simplify, and
target-list is a list of tokens encoding the resulting math expression after
simplifying.
Raises:
ValueError: If `max_depth` < `min_depth`.
"""
if max_depth < min_depth:
raise ValueError("max_depth must be greater than or equal to min_depth. "
"Got max_depth=%s, min_depth=%s" % (max_depth, min_depth))
alg_cfg = math_dataset_init(alphabet_size, digits=5)
for _ in xrange(nbr_cases):
sample, target = generate_algebra_simplify_sample(
alg_cfg.vlist, list(alg_cfg.ops.values()), min_depth, max_depth)
yield {
"inputs": alg_cfg.int_encoder(sample),
"targets": alg_cfg.int_encoder(target)
}
def calculus_integrate(alphabet_size=26,
min_depth=0,
max_depth=2,
nbr_cases=10000):
"""Generate the calculus integrate dataset.
Each sample is a symbolic math expression involving unknown variables. The
task is to take the indefinite integral of the expression. The target is the
resulting expression.
Args:
alphabet_size: How many possible variables there are. Max 26.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the variable to integrate with respect
to and the expression to integrate, and target-list is a list of tokens
encoding the resulting math expression after integrating.
Raises:
ValueError: If `max_depth` < `min_depth`, or if alphabet_size > 26.
"""
if max_depth < min_depth:
raise ValueError("max_depth must be greater than or equal to min_depth. "
"Got max_depth=%s, min_depth=%s" % (max_depth, min_depth))
# Don't allow alphabet to use capital letters. Those are reserved for function
# names.
if alphabet_size > 26:
raise ValueError(
"alphabet_size must not be greater than 26. Got %s." % alphabet_size)
functions = {"log": "L"}
alg_cfg = math_dataset_init(alphabet_size, digits=5, functions=functions)
nbr_case = 0
while nbr_case < nbr_cases:
try:
sample, target = generate_calculus_integrate_sample(
alg_cfg.vlist,
list(alg_cfg.ops.values()), min_depth, max_depth, alg_cfg.functions)
yield {
"inputs": alg_cfg.int_encoder(sample),
"targets": alg_cfg.int_encoder(target)
}
except: # pylint:disable=bare-except
continue
if nbr_case % 10000 == 0:
print(" calculus_integrate: generating case %d." % nbr_case)
nbr_case += 1
| apache-2.0 | 1,216,758,889,239,109,600 | 36.821124 | 80 | 0.658844 | false |
suriya/rasikapriya | rasikapriya/migrations/0002_auto__add_field_venue_create_date__add_field_venue_modify_date__add_fi.py | 1 | 10694 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.utils import timezone
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Venue.create_date'
db.add_column(u'rasikapriya_venue', 'create_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=timezone.now(), blank=True),
keep_default=False)
# Adding field 'Venue.modify_date'
db.add_column(u'rasikapriya_venue', 'modify_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=timezone.now(), blank=True),
keep_default=False)
# Adding field 'Organization.create_date'
db.add_column(u'rasikapriya_organization', 'create_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=timezone.now(), blank=True),
keep_default=False)
# Adding field 'Organization.modify_date'
db.add_column(u'rasikapriya_organization', 'modify_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=timezone.now(), blank=True),
keep_default=False)
# Adding field 'Performance.create_date'
db.add_column(u'rasikapriya_performance', 'create_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=timezone.now(), blank=True),
keep_default=False)
# Adding field 'Performance.modify_date'
db.add_column(u'rasikapriya_performance', 'modify_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=timezone.now(), blank=True),
keep_default=False)
# Adding field 'Festival.create_date'
db.add_column(u'rasikapriya_festival', 'create_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=timezone.now(), blank=True),
keep_default=False)
# Adding field 'Festival.modify_date'
db.add_column(u'rasikapriya_festival', 'modify_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=timezone.now(), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Venue.create_date'
db.delete_column(u'rasikapriya_venue', 'create_date')
# Deleting field 'Venue.modify_date'
db.delete_column(u'rasikapriya_venue', 'modify_date')
# Deleting field 'Organization.create_date'
db.delete_column(u'rasikapriya_organization', 'create_date')
# Deleting field 'Organization.modify_date'
db.delete_column(u'rasikapriya_organization', 'modify_date')
# Deleting field 'Performance.create_date'
db.delete_column(u'rasikapriya_performance', 'create_date')
# Deleting field 'Performance.modify_date'
db.delete_column(u'rasikapriya_performance', 'modify_date')
# Deleting field 'Festival.create_date'
db.delete_column(u'rasikapriya_festival', 'create_date')
# Deleting field 'Festival.modify_date'
db.delete_column(u'rasikapriya_festival', 'modify_date')
models = {
u'rasikapriya.artist': {
'Meta': {'object_name': 'Artist'},
'band_members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['rasikapriya.Artist']", 'symmetrical': 'False', 'blank': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'home_page': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'modify_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'native_place': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'full_name'", 'unique_with': '()'})
},
u'rasikapriya.concert': {
'Meta': {'object_name': 'Concert'},
'artists': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['rasikapriya.Artist']", 'through': u"orm['rasikapriya.Performance']", 'symmetrical': 'False'}),
'cached_venue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rasikapriya.Venue']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'end_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'festival': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rasikapriya.Festival']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rasikapriya.Organization']", 'null': 'True', 'blank': 'True'}),
'start_time': ('django.db.models.fields.TimeField', [], {}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['rasikapriya.Venue']"})
},
u'rasikapriya.festival': {
'Meta': {'object_name': 'Festival'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'home_page': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rasikapriya.Organization']", 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'full_name'", 'unique_with': '()'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rasikapriya.Venue']", 'null': 'True', 'blank': 'True'})
},
u'rasikapriya.instrument': {
'Meta': {'object_name': 'Instrument'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'name'", 'unique_with': '()'})
},
u'rasikapriya.organization': {
'Meta': {'object_name': 'Organization'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'home_page': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'name'", 'unique_with': '()'}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rasikapriya.Venue']", 'null': 'True', 'blank': 'True'})
},
u'rasikapriya.performance': {
'Meta': {'object_name': 'Performance'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rasikapriya.Artist']"}),
'concert': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rasikapriya.Concert']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instrument': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rasikapriya.Instrument']"}),
'modify_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'rasikapriya.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.TextField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'home_page': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'full_address'", 'unique_with': '()'})
}
}
complete_apps = ['rasikapriya']
| mit | -3,990,114,308,811,552,300 | 65.8375 | 187 | 0.571722 | false |
xen0n/python-oauth2 | oauth2/exc.py | 1 | 1504 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
| mit | -4,831,147,452,655,628,000 | 34.809524 | 77 | 0.75 | false |
carlosalberto/carbon-fork | lib/carbon/routers.py | 3 | 3016 | import imp
from carbon.relayrules import loadRelayRules
from carbon.hashing import ConsistentHashRing
class DatapointRouter:
"Interface for datapoint routing logic implementations"
def addDestination(self, destination):
"destination is a (host, port, instance) triple"
def removeDestination(self, destination):
"destination is a (host, port, instance) triple"
def getDestinations(self, key):
"""Generate the destinations where the given routing key should map to. Only
destinations which are configured (addDestination has been called for it)
may be generated by this method."""
class RelayRulesRouter(DatapointRouter):
def __init__(self, rules_path):
self.rules_path = rules_path
self.rules = loadRelayRules(rules_path)
self.destinations = set()
def addDestination(self, destination):
self.destinations.add(destination)
def removeDestination(self, destination):
self.destinations.discard(destination)
def getDestinations(self, key):
for rule in self.rules:
if rule.matches(key):
for destination in rule.destinations:
if destination in self.destinations:
yield destination
if not rule.continue_matching:
return
class ConsistentHashingRouter(DatapointRouter):
def __init__(self, replication_factor=1):
self.replication_factor = int(replication_factor)
self.instance_ports = {} # { (server, instance) : port }
self.ring = ConsistentHashRing([])
def addDestination(self, destination):
(server, port, instance) = destination
if (server, instance) in self.instance_ports:
raise Exception("destination instance (%s, %s) already configured" % (server, instance))
self.instance_ports[ (server, instance) ] = port
self.ring.add_node( (server, instance) )
def removeDestination(self, destination):
(server, port, instance) = destination
if (server, instance) not in self.instance_ports:
raise Exception("destination instance (%s, %s) not configured" % (server, instance))
del self.instance_ports[ (server, instance) ]
self.ring.remove_node( (server, instance) )
def getDestinations(self, metric):
key = self.getKey(metric)
used_servers = set()
for (server, instance) in self.ring.get_nodes(key):
if server in used_servers:
continue
else:
used_servers.add(server)
port = self.instance_ports[ (server, instance) ]
yield (server, port, instance)
if len(used_servers) >= self.replication_factor:
return
def getKey(self, metric):
return metric
def setKeyFunction(self, func):
self.getKey = func
def setKeyFunctionFromModule(self, keyfunc_spec):
module_path, func_name = keyfunc_spec.rsplit(':', 1)
module_file = open(module_path, 'U')
description = ('.py', 'U', imp.PY_SOURCE)
module = imp.load_module('keyfunc_module', module_file, module_path, description)
keyfunc = getattr(module, func_name)
self.setKeyFunction(keyfunc)
| apache-2.0 | -7,642,043,862,873,826,000 | 32.511111 | 94 | 0.695292 | false |
sbrunner/QGIS | python/PyQt/PyQt5/QtWebKit.py | 17 | 1132 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QtWebKit.py
---------------------
Date : November 2015
Copyright : (C) 2015 by Matthias Kuhn
Email : matthias at opengis dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'November 2015'
__copyright__ = '(C) 2015, Matthias Kuhn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt5.QtWebKit import *
| gpl-2.0 | 7,597,989,326,831,827,000 | 42.538462 | 75 | 0.381625 | false |
kreatorkodi/repository.torrentbr | plugin.video.yatp/libs/client/actions.py | 1 | 8310 | # coding: utf-8
# Module: actions
# Created on: 27.07.2015
# Author: Roman Miroshnychenko aka Roman V.M. ([email protected])
# Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html
import os
import xbmcgui
import xbmcplugin
from simpleplugin import Plugin
import json_requests as jsonrq
from buffering import buffer_torrent, stream_torrent, add_torrent, get_videofiles
plugin = Plugin()
_ = plugin.initialize_gettext()
icons = os.path.join(plugin.path, 'resources', 'icons')
commands = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'commands.py')
def _play(path):
"""
Play a videofile
:param path:
:return:
"""
plugin.log_notice('Path to play: {0}'.format(path))
return plugin.resolve_url(path, succeeded=bool(path))
@plugin.action()
def root():
"""
Plugin root
"""
return [{'label': _('Play .torrent file...'),
'thumb': os.path.join(icons, 'play.png'),
'url': plugin.get_url(action='select_torrent', target='play')},
{'label': _('Download torrent from .torrent file...'),
'thumb': os.path.join(icons, 'down.png'),
'url': plugin.get_url(action='select_torrent', target='download'),
'is_folder': False},
{'label': _('Torrents'),
'thumb': plugin.icon,
'url': plugin.get_url(action='torrents')}]
@plugin.action()
def select_torrent(params):
"""
Select .torrent file to play
:param params:
:return:
"""
torrent = xbmcgui.Dialog().browse(1, _('Select .torrent file'), 'video', mask='.torrent')
if torrent:
plugin.log_notice('Torrent selected: {0}'.format(torrent))
if params['target'] == 'play':
return list_files({'torrent': torrent})
else:
download_torrent({'torrent': torrent})
@plugin.action('play')
def play_torrent(params):
"""
Play torrent
:param params:
:return:
"""
file_index = params.get('file_index')
if file_index is not None and file_index != 'dialog':
file_index = int(file_index)
return _play(buffer_torrent(params['torrent'], file_index))
@plugin.action()
def play_file(params):
"""
Stream a file from torrent by its index
The torrent must be already added to the session!
:param params:
:return:
"""
return _play(stream_torrent(int(params['file_index']), params['info_hash']))
@plugin.action('download')
def download_torrent(params):
"""
Add torrent for downloading
:param params:
:return:
"""
jsonrq.add_torrent(params['torrent'], False)
xbmcgui.Dialog().notification('YATP', _('Torrent added for downloading'), plugin.icon, 3000)
@plugin.action()
def torrents():
"""
Display the list of torrents in the session
"""
torrent_list = sorted(jsonrq.get_all_torrent_info(), key=lambda i: i['added_time'], reverse=True)
for torrent in torrent_list:
if torrent['state'] == 'downloading':
label = '[COLOR=red]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
elif torrent['state'] == 'seeding':
label = '[COLOR=green]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
elif torrent['state'] == 'paused':
label = '[COLOR=gray]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
else:
label = '[COLOR=blue]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
item = {'label': label,
'url': plugin.get_url(action='show_files', info_hash=torrent['info_hash']),
'is_folder': True}
if torrent['state'] == 'downloading':
item['thumb'] = os.path.join(icons, 'down.png')
elif torrent['state'] == 'seeding':
item['thumb'] = os.path.join(icons, 'up.png')
elif torrent['state'] == 'paused':
item['thumb'] = os.path.join(icons, 'pause.png')
else:
item['thumb'] = os.path.join(icons, 'question.png')
context_menu = [(_('Pause all torrents'),
'RunScript({commands},pause_all)'.format(commands=commands)),
(_('Resume all torrents'),
'RunScript({commands},resume_all)'.format(commands=commands)),
(_('Delete torrent'),
'RunScript({commands},delete,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])),
(_('Delete torrent and files'),
'RunScript({commands},delete_with_files,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])),
(_('Torrent info'),
'RunScript({commands},show_info,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])),
]
if torrent['state'] == 'paused':
context_menu.insert(0, (_('Resume torrent'),
'RunScript({commands},resume,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])))
else:
context_menu.insert(0, (_('Pause torrent'),
'RunScript({commands},pause,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])))
if torrent['state'] == 'incomplete':
context_menu.append((_('Complete download'),
'RunScript({commands},restore_finished,{info_hash})'.format(
commands=commands,
info_hash=torrent['info_hash'])))
item['context_menu'] = context_menu
yield item
def _build_file_list(files, info_hash):
"""
Create the list of videofiles in a torrent
:param files:
:param info_hash:
:return:
"""
videofiles = get_videofiles(files)
for file_ in videofiles:
ext = os.path.splitext(file_[1].lower())[1]
if ext == '.avi':
thumb = os.path.join(icons, 'avi.png')
elif ext == '.mp4':
thumb = os.path.join(icons, 'mp4.png')
elif ext == '.mkv':
thumb = os.path.join(icons, 'mkv.png')
elif ext == '.mov':
thumb = os.path.join(icons, 'mov.png')
else:
thumb = os.path.join(icons, 'play.png')
yield {'label': '{name} [{size}{unit}]'.format(name=file_[1].encode('utf-8'),
size=file_[2] / 1048576,
unit=_('MB')),
'thumb': thumb,
'url': plugin.get_url(action='play_file',
info_hash=info_hash,
file_index=file_[0]),
'is_playable': True,
'info': {'video': {'size': file_[2]}},
}
@plugin.action()
def list_files(params):
"""
Add a torrent to the session and display the list of files in a torrent
:param params:
:return:
"""
torrent_data = add_torrent(params['torrent'])
if torrent_data is not None:
return plugin.create_listing(_build_file_list(torrent_data['files'], torrent_data['info_hash']),
cache_to_disk=True,
sort_methods=(xbmcplugin.SORT_METHOD_LABEL, xbmcplugin.SORT_METHOD_SIZE))
xbmcgui.Dialog().notification(plugin.id, _('Playback cancelled.'), plugin.icon, 3000)
return []
@plugin.action()
def show_files(params):
"""
Display the list of videofiles
:param params:
:return:
"""
return plugin.create_listing(_build_file_list(jsonrq.get_files(params['info_hash']), params['info_hash']),
cache_to_disk=True,
sort_methods=(xbmcplugin.SORT_METHOD_LABEL, xbmcplugin.SORT_METHOD_SIZE))
| gpl-2.0 | -5,768,955,779,380,998,000 | 36.60181 | 119 | 0.519495 | false |
jonparrott/compute-hadoop-java-python | cfg.py | 2 | 3945 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config shared between all instances and local tools."""
import json
import httplib2
# These constants are used by the Config object
COORDINATOR = 'coordinator'
HADOOP_NAMENODE = 'hadoop-namenode'
HADOOP_JOBTRACKER = 'hadoop-jobtracker'
PORT = 8888 # The port of both coordinator and snitches
NUM_WORKERS = 20 # Depends on request quota/second
METADATA = 'http://metadata/0.1/meta-data/'
EDISK_LOCATION = '/mnt/hadoop'
class Config(object):
"""Singleton that stores config."""
def __init__(self):
# General communication
self.port = PORT
self.ok_reply = json.dumps({'result': 'ok'})
self.secret = ''
# For instance-to-instance calls, don't need to use util.name_to_ip()
self.ip_via_api = True
# General
self.poll_delay_secs = 2.0
self.project_id = None
# Instance names
self.coordinator = COORDINATOR
self.hadoop_namenode = HADOOP_NAMENODE
self.hadoop_jobtracker = HADOOP_JOBTRACKER
# Instance creation
self.zone = None
self.machine_type = None
self.image = None
self.disk = None
self.rw_disk_instance = None
self.external_ips = True
scope_base = 'https://www.googleapis.com/auth/'
self.rw_storage_scope = scope_base + 'devstorage.read_write'
self.ro_storage_scope = scope_base + 'devstorage.read_only'
self.compute_scope = scope_base + 'compute'
self.download_attempts = 3
self.num_workers = NUM_WORKERS
# Hadoop details
self.hadoop_url = 'archive.apache.org/dist/hadoop/common'
# Use latest stable version of Hadoop, as of 2/4/2013.
self.hadoop_version = '1.1.1'
self.hadoop_fn = 'hadoop-{0}'.format(self.hadoop_version)
self.hadoop_bin = '/home/hadoop/hadoop/bin/'
# This is where ephemeral disk gets mounted. Note this location is hardcoded
# in a few places (the hadoop config, mainly)
self.edisk_location = EDISK_LOCATION
# Depends on hdfs replication value
self.needed_slaves = 3
# Google Storage locations
self.gs_bucket = None
self.gs_hadoop_conf = None
self.gs_hadoop_tarball = None
self.gs_coordinators_tarball = None
self.gs_snitch_tarball = None
self.gs_tools_jar = None
def update_from_metadata(self):
"""Update by querying the metadata server. Only works on instances."""
# This method is only called on instances, meaning we don't need the API to
# lookup an external IP address
self.ip_via_api = False
def get_md(key, base=METADATA + 'attributes/'):
return httplib2.Http().request(base + key, 'GET')[1]
self.project_id = get_md('project-id', base=METADATA)
self.secret = get_md('secret')
self.zone = get_md('zone')
self.machine_type = get_md('machine_type')
self.image = get_md('image')
self.disk = get_md('disk')
self.rw_disk_instance = get_md('rw_disk_instance')
self.set_bucket(get_md('gs_bucket'))
def set_bucket(self, bucket):
"""Set the GS bucket, and update config URLs involving GS."""
self.gs_bucket = bucket
url = 'gs://{0}/'.format(bucket)
self.gs_hadoop_conf = url + 'hadoop_conf.tgz'
self.gs_hadoop_tarball = url + self.hadoop_fn + '.tar.gz'
self.gs_coordinators_tarball = url + 'coordinator-tarball.tgz'
self.gs_snitch_tarball = url + 'snitch-tarball.tgz'
self.gs_tools_jar = url + 'hadoop-tools.jar'
cfg = Config()
| apache-2.0 | -992,113,311,855,062,800 | 31.875 | 80 | 0.684411 | false |
lidavidm/mathics-heroku | mathics/__init__.py | 1 | 1560 | # -*- coding: utf8 -*-
# force utf8 encoding
import sys
import codecs
writer = codecs.getwriter("utf-8")
sys.stdout = writer(sys.stdout)
__version__ = "0.6.0rc1"
def get_version():
version = {}
import sympy
import sympy.mpmath as mpmath
from django.core.exceptions import ImproperlyConfigured
try:
import django
from django.conf import settings
version['django'] = django.get_version()
except (ImportError, ImproperlyConfigured):
pass
version['mathics'] = __version__
version['sympy'] = sympy.__version__
version['mpmath'] = mpmath.__version__
version['python'] = sys.subversion[0] + " " + sys.version.split('\n')[0]
return version
def get_version_string(is_server, newlines=False):
version = get_version()
result = []
result.append(u"Mathics %s" % version['mathics'])
result.append(u"on %s" % version['python'])
libs = []
if 'django' in version and is_server:
libs.append("Django %s" % version['django'])
libs += ["SymPy %s" % version['sympy'], "mpmath %s" % version['mpmath']]
result.append(u"using %s" % ", ".join(libs))
return ("\n" if newlines else " ").join(result)
def print_version(is_server):
print "\n" + get_version_string(is_server, newlines=True)
def print_license():
print u"""
Copyright (C) 2011-2013 The Mathics Team.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions.
See the documentation for the full license.
"""
| gpl-3.0 | 7,199,105,407,389,156,000 | 25.896552 | 76 | 0.648718 | false |
JRock007/Notes-Maker | NotesMaker.py | 1 | 25462 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
from ext import find
from ext import abbreviations
from ext import datetime
from ext import table
from ext import wordcount
class Main(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.filename = ""
self.changesSaved = True
self.initUI()
def initToolbar(self):
self.newAction = QtGui.QAction(QtGui.QIcon("icons/new.png"), "New", self)
self.newAction.setShortcut("Ctrl+N")
self.newAction.setStatusTip("Create a new document from scratch.")
self.newAction.triggered.connect(self.new)
self.openAction = QtGui.QAction(QtGui.QIcon("icons/open.png"), "Open file", self)
self.openAction.setStatusTip("Open existing document")
self.openAction.setShortcut("Ctrl+O")
self.openAction.triggered.connect(self.open)
self.saveAction = QtGui.QAction(QtGui.QIcon("icons/save.png"), "Save", self)
self.saveAction.setStatusTip("Save document")
self.saveAction.setShortcut("Ctrl+S")
self.saveAction.triggered.connect(self.save)
self.convertAction = QtGui.QAction(QtGui.QIcon("icons/abbreviations.png"), "Replace abbreviations", self)
self.convertAction.setStatusTip("Replace abbreviations")
self.convertAction.setShortcut("Ctrl+I")
self.convertAction.triggered.connect(abbreviations.Abbreviations(self).show)
self.printAction = QtGui.QAction(QtGui.QIcon("icons/print.png"), "Print document", self)
self.printAction.setStatusTip("Print document")
self.printAction.setShortcut("Ctrl+P")
self.printAction.triggered.connect(self.printHandler)
self.previewAction = QtGui.QAction(QtGui.QIcon("icons/preview.png"), "Page view", self)
self.previewAction.setStatusTip("Preview page before printing")
self.previewAction.setShortcut("Ctrl+Shift+P")
self.previewAction.triggered.connect(self.preview)
self.findAction = QtGui.QAction(QtGui.QIcon("icons/find.png"), "Find and replace", self)
self.findAction.setStatusTip("Find and replace words in your document")
self.findAction.setShortcut("Ctrl+F")
self.findAction.triggered.connect(find.Find(self).show)
self.undoAction = QtGui.QAction(QtGui.QIcon("icons/undo.png"), "Undo last action", self)
self.undoAction.setStatusTip("Undo last action")
self.undoAction.setShortcut("Ctrl+Z")
self.undoAction.triggered.connect(self.text.undo)
self.redoAction = QtGui.QAction(QtGui.QIcon("icons/redo.png"), "Redo last undone thing", self)
self.redoAction.setStatusTip("Redo last undone thing")
self.redoAction.setShortcut("Ctrl+Y")
self.redoAction.triggered.connect(self.text.redo)
tableAction = QtGui.QAction(QtGui.QIcon("icons/table.png"), "Insert table", self)
tableAction.setStatusTip("Insert table")
tableAction.setShortcut("Ctrl+T")
tableAction.triggered.connect(table.Table(self).show)
imageAction = QtGui.QAction(QtGui.QIcon("icons/image.png"), "Insert image", self)
imageAction.setStatusTip("Insert image")
imageAction.setShortcut("Ctrl+Shift+I")
imageAction.triggered.connect(self.insertImage)
bulletAction = QtGui.QAction(QtGui.QIcon("icons/bullet.png"), "Insert bullet List", self)
bulletAction.setStatusTip("Insert bullet list")
bulletAction.setShortcut("Ctrl+Shift+B")
bulletAction.triggered.connect(self.bulletList)
numberedAction = QtGui.QAction(QtGui.QIcon("icons/number.png"), "Insert numbered List", self)
numberedAction.setStatusTip("Insert numbered list")
numberedAction.setShortcut("Ctrl+Shift+L")
numberedAction.triggered.connect(self.numberList)
fontBox = QtGui.QFontComboBox(self)
fontBox.currentFontChanged.connect(lambda font: self.text.setCurrentFont(font))
fontSize = QtGui.QSpinBox(self)
# Will display " pt" after each value
fontSize.setSuffix(" pt")
fontSize.valueChanged.connect(lambda size: self.text.setFontPointSize(size))
fontSize.setValue(14)
fontColor = QtGui.QAction(QtGui.QIcon("icons/font-color.png"), "Change font color", self)
fontColor.triggered.connect(self.fontColorChanged)
boldAction = QtGui.QAction(QtGui.QIcon("icons/bold.png"), "Bold", self)
boldAction.triggered.connect(self.bold)
italicAction = QtGui.QAction(QtGui.QIcon("icons/italic.png"), "Italic", self)
italicAction.triggered.connect(self.italic)
underlAction = QtGui.QAction(QtGui.QIcon("icons/underline.png"), "Underline", self)
underlAction.triggered.connect(self.underline)
strikeAction = QtGui.QAction(QtGui.QIcon("icons/strike.png"), "Strike-out", self)
strikeAction.triggered.connect(self.strike)
superAction = QtGui.QAction(QtGui.QIcon("icons/superscript.png"), "Superscript", self)
superAction.triggered.connect(self.superScript)
subAction = QtGui.QAction(QtGui.QIcon("icons/subscript.png"), "Subscript", self)
subAction.triggered.connect(self.subScript)
alignLeft = QtGui.QAction(QtGui.QIcon("icons/align-left.png"), "Align left", self)
alignLeft.triggered.connect(self.alignLeft)
alignCenter = QtGui.QAction(QtGui.QIcon("icons/align-center.png"), "Align center", self)
alignCenter.triggered.connect(self.alignCenter)
alignRight = QtGui.QAction(QtGui.QIcon("icons/align-right.png"), "Align right", self)
alignRight.triggered.connect(self.alignRight)
alignJustify = QtGui.QAction(QtGui.QIcon("icons/align-justify.png"), "Align justify", self)
alignJustify.triggered.connect(self.alignJustify)
backColor = QtGui.QAction(QtGui.QIcon("icons/highlight.png"), "Change background color", self)
backColor.triggered.connect(self.highlight)
self.toolbar = self.addToolBar("Options")
self.toolbar.addWidget(fontBox)
self.toolbar.addWidget(fontSize)
self.toolbar.addSeparator()
self.toolbar.addAction(boldAction)
self.toolbar.addAction(italicAction)
self.toolbar.addAction(underlAction)
self.toolbar.addAction(strikeAction)
self.toolbar.addSeparator()
self.toolbar.addAction(fontColor)
self.toolbar.addAction(backColor)
self.toolbar.addSeparator()
self.toolbar.addAction(alignLeft)
self.toolbar.addAction(alignCenter)
self.toolbar.addAction(alignRight)
self.toolbar.addAction(alignJustify)
self.toolbar.addSeparator()
self.toolbar.addAction(superAction)
self.toolbar.addAction(subAction)
self.toolbar.addSeparator()
self.toolbar.addAction(self.findAction)
self.toolbar.addAction(self.convertAction)
self.toolbar.addSeparator()
self.toolbar.addAction(tableAction)
self.toolbar.addAction(imageAction)
self.toolbar.addSeparator()
self.toolbar.addAction(bulletAction)
self.toolbar.addAction(numberedAction)
def initMenubar(self):
menubar = self.menuBar()
file = menubar.addMenu("File")
edit = menubar.addMenu("Edit")
view = menubar.addMenu("View")
indentAction = QtGui.QAction(QtGui.QIcon("icons/indent.png"), "Indent Area", self)
indentAction.setShortcut("Ctrl+Tab")
indentAction.triggered.connect(self.indentAction)
dedentAction = QtGui.QAction(QtGui.QIcon("icons/dedent.png"), "Dedent Area", self)
dedentAction.setShortcut("Shift+Tab")
dedentAction.triggered.connect(self.dedentAction)
dateTimeAction = QtGui.QAction(QtGui.QIcon("icons/calender.png"), "Insert current date/time", self)
dateTimeAction.setStatusTip("Insert current date/time")
dateTimeAction.setShortcut("Ctrl+D")
dateTimeAction.triggered.connect(datetime.DateTime(self).show)
rightArrowAction = QtGui.QAction(QtGui.QIcon("icons/number.png"), "Insert right arrow", self)
rightArrowAction.setShortcut(Qt.CTRL + Qt.Key_Right)
rightArrowAction.triggered.connect(self.rightArrowAction)
leftArrowAction = QtGui.QAction(QtGui.QIcon("icons/number.png"), "Insert left arrow", self)
leftArrowAction.setShortcut(Qt.CTRL + Qt.Key_Left)
leftArrowAction.triggered.connect(self.leftArrowAction)
wordCountAction = QtGui.QAction(QtGui.QIcon("icons/count.png"), "See word/symbol count", self)
wordCountAction.setStatusTip("See word/symbol count")
wordCountAction.setShortcut("Ctrl+Shift+W")
wordCountAction.triggered.connect(self.wordCount)
# Add the most important actions to the menubar
file.addAction(self.newAction)
file.addAction(self.openAction)
file.addAction(self.saveAction)
file.addSeparator()
file.addAction(self.printAction)
file.addAction(self.previewAction)
edit.addAction(self.undoAction)
edit.addAction(self.redoAction)
edit.addSeparator()
edit.addAction(self.findAction)
edit.addAction(self.convertAction)
edit.addSeparator()
edit.addAction(indentAction)
edit.addAction(dedentAction)
edit.addAction(dateTimeAction)
edit.addAction(rightArrowAction)
edit.addAction(leftArrowAction)
edit.addAction(wordCountAction)
# Toggling actions for the various bars
toolbarAction = QtGui.QAction("Toggle Toolbar", self)
toolbarAction.triggered.connect(self.toggleToolbar)
statusbarAction = QtGui.QAction("Toggle Statusbar", self)
statusbarAction.triggered.connect(self.toggleStatusbar)
view.addAction(toolbarAction)
view.addAction(statusbarAction)
def initUI(self):
self.text = QtGui.QTextEdit(self)
# Set the tab stop width to around 33 pixels which is
# more or less 8 spaces
self.text.setTabStopWidth(33)
self.initToolbar()
self.initMenubar()
self.setCentralWidget(self.text)
# Initialize a statusbar for the window
self.statusbar = self.statusBar()
# If the cursor position changes, call the function that displays
# the line and column number
self.text.cursorPositionChanged.connect(self.cursorPosition)
# We need our own context menu for tables
self.text.setContextMenuPolicy(Qt.CustomContextMenu)
self.text.customContextMenuRequested.connect(self.context)
self.text.textChanged.connect(self.changed)
self.setGeometry(100, 100, 1030, 800)
self.setWindowTitle("Notes Maker")
self.setWindowIcon(QtGui.QIcon("icons/icon.png"))
self.setWindowState(QtCore.Qt.WindowMaximized)
self.show()
self.raise_()
def changed(self):
self.changesSaved = False
def closeEvent(self, event):
if self.changesSaved:
event.accept()
else:
popup = QtGui.QMessageBox(self)
popup.setIcon(QtGui.QMessageBox.Warning)
popup.setText("The document has been modified")
popup.setInformativeText("Do you want to save your changes?")
saveButton = QtGui.QPushButton("Save")
discardButton = QtGui.QPushButton("Discard")
cancelButton = QtGui.QPushButton("Cancel")
popup.addButton(saveButton, QtGui.QMessageBox.YesRole)
popup.addButton(discardButton, QtGui.QMessageBox.NoRole)
popup.addButton(cancelButton, QtGui.QMessageBox.RejectRole)
popup.setDefaultButton(saveButton)
answer = popup.exec_()
if answer == 0: # Save button
self.save()
elif answer == 1: # Discard button
event.accept()
else: # Cancel button
event.ignore()
def rightArrowAction(self):
# Grab the text cursor
cursor = self.text.textCursor()
# Security
if cursor:
# We insert the arrow
cursor.insertText(" -> ")
def leftArrowAction(self):
# Grab the text cursor
cursor = self.text.textCursor()
# Security
if cursor:
# We insert the arrow
cursor.insertText(" <- ")
def context(self, pos):
# Grab the cursor
cursor = self.text.textCursor()
# Grab the current table, if there is one
table = cursor.currentTable()
# Above will return 0 if there is no current table, in which case
# we call the normal context menu. If there is a table, we create
# our own context menu specific to table interaction
if table:
menu = QtGui.QMenu(self)
appendRowAction = QtGui.QAction("Append row", self)
appendRowAction.triggered.connect(lambda: table.appendRows(1))
appendColAction = QtGui.QAction("Append column", self)
appendColAction.triggered.connect(lambda: table.appendColumns(1))
removeRowAction = QtGui.QAction("Remove row", self)
removeRowAction.triggered.connect(self.removeRow)
removeColAction = QtGui.QAction("Remove column", self)
removeColAction.triggered.connect(self.removeCol)
insertRowAction = QtGui.QAction("Insert row", self)
insertRowAction.triggered.connect(self.insertRow)
insertColAction = QtGui.QAction("Insert column", self)
insertColAction.triggered.connect(self.insertCol)
mergeAction = QtGui.QAction("Merge cells", self)
mergeAction.triggered.connect(lambda: table.mergeCells(cursor))
# Only allow merging if there is a selection
if not cursor.hasSelection():
mergeAction.setEnabled(False)
splitAction = QtGui.QAction("Split cells", self)
cell = table.cellAt(cursor)
# Only allow splitting if the current cell is larger
# than a normal cell
if cell.rowSpan() > 1 or cell.columnSpan() > 1:
splitAction.triggered.connect(lambda: table.splitCell(cell.row(), cell.column(), 1, 1))
else:
splitAction.setEnabled(False)
menu.addAction(appendRowAction)
menu.addAction(appendColAction)
menu.addSeparator()
menu.addAction(removeRowAction)
menu.addAction(removeColAction)
menu.addSeparator()
menu.addAction(insertRowAction)
menu.addAction(insertColAction)
menu.addSeparator()
menu.addAction(mergeAction)
menu.addAction(splitAction)
# Convert the widget coordinates into global coordinates
pos = self.mapToGlobal(pos)
# Add pixels for the tool and formatbars, which are not included
# in mapToGlobal(), but only if the two are currently visible and
# not toggled by the user
if self.toolbar.isVisible():
pos.setY(pos.y() + 45)
# Move the menu to the new position
menu.move(pos)
menu.show()
else:
event = QtGui.QContextMenuEvent(QtGui.QContextMenuEvent.Mouse, QtCore.QPoint())
self.text.contextMenuEvent(event)
def removeRow(self):
# Grab the cursor
cursor = self.text.textCursor()
# Grab the current table (we assume there is one, since
# this is checked before calling)
table = cursor.currentTable()
# Get the current cell
cell = table.cellAt(cursor)
# Delete the cell's row
table.removeRows(cell.row(), 1)
def removeCol(self):
# Grab the cursor
cursor = self.text.textCursor()
# Grab the current table (we assume there is one, since
# this is checked before calling)
table = cursor.currentTable()
# Get the current cell
cell = table.cellAt(cursor)
# Delete the cell's column
table.removeColumns(cell.column(), 1)
def insertRow(self):
# Grab the cursor
cursor = self.text.textCursor()
# Grab the current table (we assume there is one, since
# this is checked before calling)
table = cursor.currentTable()
# Get the current cell
cell = table.cellAt(cursor)
# Insert a new row at the cell's position
table.insertRows(cell.row(), 1)
def insertCol(self):
# Grab the cursor
cursor = self.text.textCursor()
# Grab the current table (we assume there is one, since
# this is checked before calling)
table = cursor.currentTable()
# Get the current cell
cell = table.cellAt(cursor)
# Insert a new row at the cell's position
table.insertColumns(cell.column(), 1)
def toggleToolbar(self):
state = self.toolbar.isVisible()
# Set the visibility to its inverse
self.toolbar.setVisible(not state)
def toggleStatusbar(self):
state = self.statusbar.isVisible()
# Set the visibility to its inverse
self.statusbar.setVisible(not state)
def new(self):
spawn = Main()
spawn.show()
def open(self):
# Get filename and show only .nmkr files
self.filename = QtGui.QFileDialog.getOpenFileName(self, "Open File", ".", "(*.nmkr)")
if self.filename:
with open(self.filename, "r") as file:
self.text.setText(file.read())
def save(self):
# Only open dialog if there is no filename yet
if not self.filename:
self.filename = QtGui.QFileDialog.getSaveFileName(self, "Save File")
if self.filename:
# Append extension if not there yet
if not self.filename.endsWith(".nmkr"):
self.filename += ".nmkr"
# We just store the contents of the text file along with the
# format in html, which Qt does in a very nice way for us
with open(self.filename, "w") as file:
file.write(self.text.toHtml())
self.changesSaved = True
def preview(self):
# Open preview dialog
preview = QtGui.QPrintPreviewDialog()
# If a print is requested, open print dialog
preview.paintRequested.connect(lambda p: self.text.print_(p))
preview.exec_()
def printHandler(self):
# Open printing dialog
dialog = QtGui.QPrintDialog()
if dialog.exec_() == QtGui.QDialog.Accepted:
self.text.document().print_(dialog.printer())
def cursorPosition(self):
cursor = self.text.textCursor()
# Mortals like 1-indexed things
line = cursor.blockNumber() + 1
col = cursor.columnNumber()
self.statusbar.showMessage("Line: {} | Column: {}".format(line, col))
def wordCount(self):
wc = wordcount.WordCount(self)
wc.getText()
wc.show()
def insertImage(self):
# Get image file name
filename = QtGui.QFileDialog.getOpenFileName(self, "Insert image", ".", "Images (*.png *.xpm *.jpg *.bmp *.gif)")
if filename:
# Create image object
image = QtGui.QImage(filename)
# Error if unloadable
if image.isNull():
popup = QtGui.QMessageBox(QtGui.QMessageBox.Critical,
"Image load error",
"Could not load image file!",
QtGui.QMessageBox.Ok,
self)
popup.show()
else:
cursor = self.text.textCursor()
cursor.insertImage(image, filename)
def fontColorChanged(self):
# Get a color from the text dialog
color = QtGui.QColorDialog.getColor()
# Set it as the new text color
self.text.setTextColor(color)
def highlight(self):
color = QtGui.QColorDialog.getColor()
self.text.setTextBackgroundColor(color)
def bold(self):
if self.text.fontWeight() == QtGui.QFont.Bold:
self.text.setFontWeight(QtGui.QFont.Normal)
else:
self.text.setFontWeight(QtGui.QFont.Bold)
def italic(self):
state = self.text.fontItalic()
self.text.setFontItalic(not state)
def underline(self):
state = self.text.fontUnderline()
self.text.setFontUnderline(not state)
def strike(self):
# Grab the text's format
fmt = self.text.currentCharFormat()
# Set the fontStrikeOut property to its opposite
fmt.setFontStrikeOut(not fmt.fontStrikeOut())
# And set the next char format
self.text.setCurrentCharFormat(fmt)
def superScript(self):
# Grab the current format
fmt = self.text.currentCharFormat()
# And get the vertical alignment property
align = fmt.verticalAlignment()
# Toggle the state
if align == QtGui.QTextCharFormat.AlignNormal:
fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignSuperScript)
else:
fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignNormal)
# Set the new format
self.text.setCurrentCharFormat(fmt)
def subScript(self):
# Grab the current format
fmt = self.text.currentCharFormat()
# And get the vertical alignment property
align = fmt.verticalAlignment()
# Toggle the state
if align == QtGui.QTextCharFormat.AlignNormal:
fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignSubScript)
else:
fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignNormal)
# Set the new format
self.text.setCurrentCharFormat(fmt)
def alignLeft(self):
self.text.setAlignment(Qt.AlignLeft)
def alignRight(self):
self.text.setAlignment(Qt.AlignRight)
def alignCenter(self):
self.text.setAlignment(Qt.AlignCenter)
def alignJustify(self):
self.text.setAlignment(Qt.AlignJustify)
def indentAction(self):
# Grab the cursor
cursor = self.text.textCursor()
if cursor.hasSelection():
# Store the current line/block number
temp = cursor.blockNumber()
# Move to the selection's end
cursor.setPosition(cursor.anchor())
# Calculate range of selection
diff = cursor.blockNumber() - temp
direction = QtGui.QTextCursor.Up if diff > 0 else QtGui.QTextCursor.Down
# Iterate over lines (diff absolute value)
for n in range(abs(diff) + 1):
# Move to start of each line
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
# Insert tabbing
cursor.insertText("\t")
# And move back up
cursor.movePosition(direction)
# If there is no selection, just insert a tab
else:
cursor.insertText("\t")
def handleDedent(self, cursor):
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
# Grab the current line
line = cursor.block().text()
# If the line starts with a tab character, delete it
if line.startsWith("\t"):
# Delete next character
cursor.deleteChar()
# Otherwise, delete all spaces until a non-space character is met
else:
for char in line[:8]:
if char != " ":
break
cursor.deleteChar()
def dedentAction(self):
cursor = self.text.textCursor()
if cursor.hasSelection():
# Store the current line/block number
temp = cursor.blockNumber()
# Move to the selection's last line
cursor.setPosition(cursor.anchor())
# Calculate range of selection
diff = cursor.blockNumber() - temp
direction = QtGui.QTextCursor.Up if diff > 0 else QtGui.QTextCursor.Down
# Iterate over lines
for n in range(abs(diff) + 1):
self.handleDedent(cursor)
# Move up
cursor.movePosition(direction)
else:
self.handleDedent(cursor)
def bulletList(self):
cursor = self.text.textCursor()
# Insert bulleted list
cursor.insertList(QtGui.QTextListFormat.ListDisc)
def numberList(self):
cursor = self.text.textCursor()
# Insert list with numbers
cursor.insertList(QtGui.QTextListFormat.ListDecimal)
def main():
app = QtGui.QApplication(sys.argv)
main = Main()
main.show()
sys.exit(app.exec_())
if __name__ == "__main__":
# Remove line to keep default style from platform
QtGui.QApplication.setStyle(QtGui.QStyleFactory.create("cleanlooks"))
if QtGui.QApplication.style():
QtGui.QApplication.setPalette(QtGui.QApplication.style().standardPalette())
main()
| mit | 7,842,574,968,400,065,000 | 29.938032 | 121 | 0.631765 | false |
kubernetes-incubator/external-storage | repo-infra/defs/gcs_uploader.py | 6 | 2889 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import atexit
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
def _workspace_status_dict(root):
d = {}
for f in ("stable-status.txt", "volatile-status.txt"):
with open(os.path.join(root, f)) as info_file:
for info_line in info_file:
info_line = info_line.strip("\n")
key, value = info_line.split(" ")
d[key] = value
return d
def main(argv):
scratch = tempfile.mkdtemp(prefix="bazel-gcs.")
atexit.register(lambda: shutil.rmtree(scratch))
workspace_status = _workspace_status_dict(argv.root)
with open(argv.manifest) as manifest:
for artifact in manifest:
artifact = artifact.strip("\n")
src_file, dest_dir = artifact.split("\t")
dest_dir = dest_dir.format(**workspace_status)
scratch_dest_dir = os.path.join(scratch, dest_dir)
try:
os.makedirs(scratch_dest_dir)
except (OSError):
# skip directory already exists errors
pass
src = os.path.join(argv.root, src_file)
dest = os.path.join(scratch_dest_dir, os.path.basename(src_file))
os.symlink(src, dest)
ret = 0
uploaded_paths = []
for gcs_path in argv.gcs_paths:
gcs_path = gcs_path.format(**workspace_status)
local_path = None
if gcs_path.startswith("file://"):
local_path = gcs_path[len("file://"):]
elif "://" not in gcs_path:
local_path = gcs_path
if local_path and not os.path.exists(local_path):
os.makedirs(local_path)
ret |= subprocess.call(["gsutil", "-m", "rsync", "-C", "-r", scratch, gcs_path])
uploaded_paths.append(gcs_path)
print "Uploaded to %s" % " ".join(uploaded_paths)
sys.exit(ret)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Upload build targets to GCS.')
parser.add_argument("--manifest", required=True, help="path to manifest of targets")
parser.add_argument("--root", required=True, help="path to root of workspace")
parser.add_argument("gcs_paths", nargs="+", help="path in gcs to push targets")
main(parser.parse_args())
| apache-2.0 | -6,298,574,388,053,577,000 | 34.666667 | 88 | 0.628937 | false |
zapier/django-pipeline | tests/tests/test_compressor.py | 1 | 9034 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
try:
from mock import patch, MagicMock
except ImportError:
from unittest.mock import patch, MagicMock # noqa
from django.test import TestCase
from pipeline.compressors import Compressor, TEMPLATE_FUNC
from pipeline.compressors.yuglify import YuglifyCompressor
from pipeline.compressors.uglifyjs import UglifyJSCompressor
from pipeline.exceptions import CompressorError
from tests.utils import _
class CompressorTest(TestCase):
def setUp(self):
self.maxDiff = None
self.compressor = Compressor()
def test_js_compressor_class(self):
self.assertEqual(self.compressor.js_compressor, YuglifyCompressor)
def test_css_compressor_class(self):
self.assertEqual(self.compressor.css_compressor, YuglifyCompressor)
def test_concatenate_and_rewrite(self):
css = self.compressor.concatenate_and_rewrite([
_('pipeline/css/first.css'),
_('pipeline/css/second.css')
], 'css/screen.css')
self.assertEqual(""".concat {\n display: none;\n}\n\n.concatenate {\n display: block;\n}\n""", css)
def test_concatenate(self):
js = self.compressor.concatenate([
_('pipeline/js/first.js'),
_('pipeline/js/second.js')
])
self.assertEqual("""function concat() {\n console.log(arguments);\n}\n\nfunction cat() {\n console.log("hello world");\n}\n""", js)
@patch.object(base64, 'b64encode')
def test_encoded_content(self, mock):
self.compressor.encoded_content(_('pipeline/images/arrow.png'))
self.assertTrue(mock.called)
mock.reset_mock()
self.compressor.encoded_content(_('pipeline/images/arrow.png'))
self.assertFalse(mock.called)
def test_relative_path(self):
relative_path = self.compressor.relative_path("images/sprite.png", 'css/screen.css')
self.assertEqual(relative_path, '../images/sprite.png')
def test_base_path(self):
base_path = self.compressor.base_path([
_('js/templates/form.jst'), _('js/templates/field.jst')
])
self.assertEqual(base_path, _('js/templates'))
def test_absolute_path(self):
absolute_path = self.compressor.absolute_path('../../images/sprite.png',
'css/plugins/')
self.assertEqual(absolute_path, 'images/sprite.png')
absolute_path = self.compressor.absolute_path('/images/sprite.png',
'css/plugins/')
self.assertEqual(absolute_path, '/images/sprite.png')
def test_template_name(self):
name = self.compressor.template_name('templates/photo/detail.jst',
'templates/')
self.assertEqual(name, 'photo_detail')
name = self.compressor.template_name('templates/photo_edit.jst', '')
self.assertEqual(name, 'photo_edit')
name = self.compressor.template_name('templates\photo\detail.jst',
'templates\\')
self.assertEqual(name, 'photo_detail')
def test_compile_templates(self):
templates = self.compressor.compile_templates([_('pipeline/templates/photo/list.jst')])
self.assertEqual(templates, """window.JST = window.JST || {};\n%s\nwindow.JST[\'list\'] = template(\'<div class="photo">\\n <img src="<%%= src %%>" />\\n <div class="caption">\\n <%%= caption %%>\\n </div>\\n</div>\');\n""" % TEMPLATE_FUNC)
templates = self.compressor.compile_templates([
_('pipeline/templates/video/detail.jst'),
_('pipeline/templates/photo/detail.jst')
])
self.assertEqual(templates, """window.JST = window.JST || {};\n%s\nwindow.JST[\'video_detail\'] = template(\'<div class="video">\\n <video src="<%%= src %%>" />\\n <div class="caption">\\n <%%= description %%>\\n </div>\\n</div>\');\nwindow.JST[\'photo_detail\'] = template(\'<div class="photo">\\n <img src="<%%= src %%>" />\\n <div class="caption">\\n <%%= caption %%> by <%%= author %%>\\n </div>\\n</div>\');\n""" % TEMPLATE_FUNC)
def test_embeddable(self):
self.assertFalse(self.compressor.embeddable(_('pipeline/images/sprite.png'), None))
self.assertFalse(self.compressor.embeddable(_('pipeline/images/arrow.png'), 'datauri'))
self.assertTrue(self.compressor.embeddable(_('pipeline/images/embed/arrow.png'), 'datauri'))
self.assertFalse(self.compressor.embeddable(_('pipeline/images/arrow.dat'), 'datauri'))
def test_construct_asset_path(self):
asset_path = self.compressor.construct_asset_path("../../images/sprite.png",
"css/plugins/gallery.css", "css/gallery.css")
self.assertEqual(asset_path, "../images/sprite.png")
asset_path = self.compressor.construct_asset_path("/images/sprite.png",
"css/plugins/gallery.css", "css/gallery.css")
self.assertEqual(asset_path, "/images/sprite.png")
def test_compress_js(self):
with patch.object(self.compressor.js_compressor, 'compress_js') as mock_method:
paths = []
mock_method.return_value = 'asdf'
(js, source_map) = self.compressor.compress_js(paths)
self.assertEqual(js, 'asdf')
self.assertEqual(source_map, '')
mock_method.assert_called_with(u'(function() { }).call(this);')
@patch('pipeline.compressors.yuglify.YuglifyCompressor')
def test_compress_js_with_source_map(self, mock_constructor):
mock_js_compressor = MagicMock()
mock_constructor.return_value = mock_js_compressor
mock_js_compressor.compress_js_with_source_map.return_value = ['code', 'map']
paths = [
_('pipeline/js/first.js'),
_('pipeline/js/second.js')
]
(js, source_map) = self.compressor.compress_js(paths, source_map_filename='map.js')
self.assertEqual(js, 'code')
self.assertEqual(source_map, 'map')
call = mock_js_compressor.compress_js_with_source_map.call_args_list[0]
call_args = call[0]
self.assertRegexpMatches(call_args[0][0], 'first.js')
self.assertRegexpMatches(call_args[0][1], 'second.js')
self.assertEquals(call_args[1], 'map.js')
self.assertEquals(call_args[2], '/static/')
self.assertEquals(call_args[3], 'tests/static/')
# Uncomment if you need a fully working version
# May also need to tweak pipeline/conf/settings.py to point to real uglify binary
# @patch('pipeline.compressors.yuglify.YuglifyCompressor')
# def test_compress_js_with_source_map_real(self, mock_constructor):
# mock_constructor.return_value = UglifyJSCompressor(False)
# paths = [
# _('pipeline/js/first.js'),
# _('pipeline/js/second.js')
# ]
# (js, source_map) = self.compressor.compress_js(paths, source_map_filename='wakawaka.js')
# self.assertRegexpMatches(js, 'function concat.*function cat')
# self.assertRegexpMatches(js, '@ sourceMappingURL=/static/wakawaka.js') # Bunch of newlines..easier to do 2 asserts
# self.assertTrue(len(source_map) > 0)
@patch('pipeline.compressors.yuglify.YuglifyCompressor')
def test_compress_js_with_source_map_on_non_compatible_compressor(self, mock_constructor):
mock_js_compressor = MagicMock()
mock_constructor.return_value = mock_js_compressor
del mock_js_compressor.compress_js_with_source_map
with self.assertRaisesRegexp(CompressorError, 'cannot make source maps'):
self.compressor.compress_js([], source_map_filename='map.js')
@patch('pipeline.compressors.yuglify.YuglifyCompressor')
def test_compress_js_with_source_map_and_templates(self, mock_constructor):
mock_js_compressor = MagicMock()
mock_constructor.return_value = mock_js_compressor
with self.assertRaisesRegexp(CompressorError, 'Templates cannot be part of a group'):
self.compressor.compress_js([], source_map_filename='map.js', templates=['foo.jst'])
def test_url_rewrite(self):
output = self.compressor.concatenate_and_rewrite([
_('pipeline/css/urls.css'),
], 'css/screen.css')
self.assertEqual("""@font-face {
font-family: 'Pipeline';
src: url(../pipeline/fonts/pipeline.eot);
src: url(../pipeline/fonts/pipeline.eot?#iefix) format('embedded-opentype');
src: local('☺'), url(../pipeline/fonts/pipeline.woff) format('woff'), url(../pipeline/fonts/pipeline.ttf) format('truetype'), url(../pipeline/fonts/pipeline.svg#IyfZbseF) format('svg');
font-weight: normal;
font-style: normal;
}
.relative-url {
background-image: url(../pipeline/images/sprite-buttons.png);
}
.relative-url-querystring {
background-image: url(../pipeline/images/sprite-buttons.png?v=1.0#foo=bar);
}
.absolute-url {
background-image: url(/images/sprite-buttons.png);
}
.absolute-full-url {
background-image: url(http://localhost/images/sprite-buttons.png);
}
.no-protocol-url {
background-image: url(//images/sprite-buttons.png);
}""", output)
| mit | -8,613,514,646,227,856,000 | 46.041667 | 444 | 0.649469 | false |
caisq/tensorflow | tensorflow/python/kernel_tests/boosted_trees/training_ops_test.py | 6 | 47713 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for boosted_trees training kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.kernels.boosted_trees import boosted_trees_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.ops import boosted_trees_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
class UpdateTreeEnsembleOpTest(test_util.TensorFlowTestCase):
"""Tests for growing tree ensemble from split candidates."""
def testGrowWithEmptyEnsemble(self):
"""Test growing an empty ensemble."""
with self.test_session() as session:
# Create empty ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble('ensemble')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_ids = [0, 2, 6]
# Prepare feature inputs.
# Note that features 1 & 3 have the same gain but different splits.
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([7.62], dtype=np.float32)
feature1_thresholds = np.array([52], dtype=np.int32)
feature1_left_node_contribs = np.array([[-4.375]], dtype=np.float32)
feature1_right_node_contribs = np.array([[7.143]], dtype=np.float32)
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([0.63], dtype=np.float32)
feature2_thresholds = np.array([23], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24]], dtype=np.float32)
# Feature split with the highest gain.
feature3_nodes = np.array([0], dtype=np.int32)
feature3_gains = np.array([7.65], dtype=np.float32)
feature3_thresholds = np.array([7], dtype=np.int32)
feature3_left_node_contribs = np.array([[-4.89]], dtype=np.float32)
feature3_right_node_contribs = np.array([[5.3]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
# Tree will be finalized now, since we will reach depth 1.
max_depth=1,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
])
session.run(grow_op)
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
# Note that since the tree is finalized, we added a new dummy tree.
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 6
threshold: 7
left_id: 1
right_id: 2
}
metadata {
gain: 7.65
}
}
nodes {
leaf {
scalar: -0.489
}
}
nodes {
leaf {
scalar: 0.53
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
def testBiasCenteringOnEmptyEnsemble(self):
"""Test growing with bias centering on an empty ensemble."""
with self.test_session() as session:
# Create empty ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble('ensemble')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
gradients = np.array([[5.]], dtype=np.float32)
hessians = np.array([[24.]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.center_bias(
tree_ensemble_handle,
mean_gradients=gradients,
mean_hessians=hessians,
l1=0.0,
l2=1.0
)
session.run(grow_op)
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
leaf {
scalar: -0.2
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 0
is_finalized: false
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
def testGrowExistingEnsembleTreeNotFinalized(self):
"""Test growing an existing ensemble with the last tree not finalized."""
with self.test_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 0.714
}
}
nodes {
leaf {
scalar: -0.4375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
# feature 1 only has a candidate for node 1, feature 2 has candidates
# for both nodes and feature 3 only has a candidate for node 2.
feature_ids = [0, 1, 0]
feature1_nodes = np.array([1], dtype=np.int32)
feature1_gains = np.array([1.4], dtype=np.float32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
feature2_nodes = np.array([1, 2], dtype=np.int32)
feature2_gains = np.array([0.63, 2.7], dtype=np.float32)
feature2_thresholds = np.array([23, 7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6], [-1.5]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24], [2.3]], dtype=np.float32)
feature3_nodes = np.array([2], dtype=np.int32)
feature3_gains = np.array([1.7], dtype=np.float32)
feature3_thresholds = np.array([3], dtype=np.int32)
feature3_left_node_contribs = np.array([[-0.75]], dtype=np.float32)
feature3_right_node_contribs = np.array([[1.93]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
# tree is going to be finalized now, since we reach depth 2.
max_depth=2,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
])
session.run(grow_op)
# Expect the split for node 1 to be chosen from feature 1 and
# the split for node 2 to be chosen from feature 2.
# The grown tree should be finalized as max tree depth is 2 and we have
# grown 2 layers.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
bucketized_split {
threshold: 21
left_id: 3
right_id: 4
}
metadata {
gain: 1.4
original_leaf {
scalar: 0.714
}
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 7
left_id: 5
right_id: 6
}
metadata {
gain: 2.7
original_leaf {
scalar: -0.4375
}
}
}
nodes {
leaf {
scalar: 0.114
}
}
nodes {
leaf {
scalar: 0.879
}
}
nodes {
leaf {
scalar: -0.5875
}
}
nodes {
leaf {
scalar: -0.2075
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
def testGrowExistingEnsembleTreeFinalized(self):
"""Test growing an existing ensemble with the last tree finalized."""
with self.test_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 0.15
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
feature_ids = [75]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.4], dtype=np.float32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
learning_rate=0.1,
max_depth=2,
feature_ids=feature_ids,
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs])
session.run(grow_op)
# Expect a new tree added, with a split on feature 75
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 75
threshold: 21
left_id: 1
right_id: 2
}
metadata {
gain: -1.4
}
}
nodes {
leaf {
scalar: -0.6
}
}
nodes {
leaf {
scalar: 0.165
}
}
}
tree_weights: 0.15
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 2
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
def testPrePruning(self):
"""Test growing an existing ensemble with pre-pruning."""
with self.test_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
tree_weights: 0.1
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
# For node 1, the best split is on feature 2 (gain -0.63), but the gain
# is negative so node 1 will not be split.
# For node 2, the best split is on feature 3, gain is positive.
feature_ids = [0, 1, 0]
feature1_nodes = np.array([1], dtype=np.int32)
feature1_gains = np.array([-1.4], dtype=np.float32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
feature2_nodes = np.array([1, 2], dtype=np.int32)
feature2_gains = np.array([-0.63, 2.7], dtype=np.float32)
feature2_thresholds = np.array([23, 7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6], [-1.5]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24], [2.3]], dtype=np.float32)
feature3_nodes = np.array([2], dtype=np.int32)
feature3_gains = np.array([2.8], dtype=np.float32)
feature3_thresholds = np.array([3], dtype=np.int32)
feature3_left_node_contribs = np.array([[-0.75]], dtype=np.float32)
feature3_right_node_contribs = np.array([[1.93]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.PRE_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
])
session.run(grow_op)
# Expect the split for node 1 to be chosen from feature 1 and
# the split for node 2 to be chosen from feature 2.
# The grown tree should not be finalized as max tree depth is 3 and
# it's only grown 2 layers.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 3
left_id: 3
right_id: 4
}
metadata {
gain: 2.8
original_leaf {
scalar: -4.375
}
}
}
nodes {
leaf {
scalar: -4.45
}
}
nodes {
leaf {
scalar: -4.182
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: false
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 3
last_layer_node_end: 5
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
def testMetadataWhenCantSplitDueToEmptySplits(self):
"""Test that the metadata is updated even though we can't split."""
with self.test_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 0.714
}
}
nodes {
leaf {
scalar: -0.4375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
# feature 1 only has a candidate for node 1, feature 2 has candidates
# for both nodes and feature 3 only has a candidate for node 2.
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
max_depth=2,
# No splits are available.
feature_ids=[],
node_ids=[],
gains=[],
thresholds=[],
left_node_contribs=[],
right_node_contribs=[])
session.run(grow_op)
# Expect no new splits created, but attempted (global) stats updated. Meta
# data for this tree should not be updated (we didn't succeed building a
# layer. Node ranges don't change.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 0.714
}
}
nodes {
leaf {
scalar: -0.4375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
def testMetadataWhenCantSplitDuePrePruning(self):
"""Test metadata is updated correctly when no split due to prepruning."""
with self.test_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
feature_ids = [0, 1, 0]
# All the gains are negative.
feature1_nodes = np.array([1], dtype=np.int32)
feature1_gains = np.array([-1.4], dtype=np.float32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
feature2_nodes = np.array([1, 2], dtype=np.int32)
feature2_gains = np.array([-0.63, -2.7], dtype=np.float32)
feature2_thresholds = np.array([23, 7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6], [-1.5]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24], [2.3]], dtype=np.float32)
feature3_nodes = np.array([2], dtype=np.int32)
feature3_gains = np.array([-2.8], dtype=np.float32)
feature3_thresholds = np.array([3], dtype=np.int32)
feature3_left_node_contribs = np.array([[-0.75]], dtype=np.float32)
feature3_right_node_contribs = np.array([[1.93]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.PRE_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
])
session.run(grow_op)
# Expect that no new split was created because all the gains were negative
# Global metadata should be updated, tree metadata should not be updated.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
def testPostPruningOfSomeNodes(self):
"""Test growing an ensemble with post-pruning."""
with self.test_session() as session:
# Create empty ensemble.
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare inputs.
# Second feature has larger (but still negative gain).
feature_ids = [0, 1]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.3], dtype=np.float32)
feature1_thresholds = np.array([7], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.013]], dtype=np.float32)
feature1_right_node_contribs = np.array([[0.0143]], dtype=np.float32)
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([-0.2], dtype=np.float32)
feature2_thresholds = np.array([33], dtype=np.int32)
feature2_left_node_contribs = np.array([[0.01]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.0143]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
])
session.run(grow_op)
# Expect the split from second features to be chosen despite the negative
# gain.
# No pruning happened just yet.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
leaf {
scalar: 0.0143
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, res_ensemble)
# Prepare the second layer.
# Note that node 1 gain is negative and node 2 gain is positive.
feature_ids = [3]
feature1_nodes = np.array([1, 2], dtype=np.int32)
feature1_gains = np.array([-0.2, 0.5], dtype=np.float32)
feature1_thresholds = np.array([7, 5], dtype=np.int32)
feature1_left_node_contribs = np.array(
[[0.07], [0.041]], dtype=np.float32)
feature1_right_node_contribs = np.array(
[[0.083], [0.064]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs])
session.run(grow_op)
# After adding this layer, the tree will not be finalized
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id:1
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
bucketized_split {
feature_id: 3
threshold: 7
left_id: 3
right_id: 4
}
metadata {
gain: -0.2
original_leaf {
scalar: 0.01
}
}
}
nodes {
bucketized_split {
feature_id: 3
threshold: 5
left_id: 5
right_id: 6
}
metadata {
gain: 0.5
original_leaf {
scalar: 0.0143
}
}
}
nodes {
leaf {
scalar: 0.08
}
}
nodes {
leaf {
scalar: 0.093
}
}
nodes {
leaf {
scalar: 0.0553
}
}
nodes {
leaf {
scalar: 0.0783
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 2
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 3
last_layer_node_end: 7
}
"""
self.assertEqual(new_stamp, 2)
self.assertProtoEquals(expected_result, res_ensemble)
# Now split the leaf 3, again with negative gain. After this layer, the
# tree will be finalized, and post-pruning happens. The leafs 3,4,7,8 will
# be pruned out.
# Prepare the third layer.
feature_ids = [92]
feature1_nodes = np.array([3], dtype=np.int32)
feature1_gains = np.array([-0.45], dtype=np.float32)
feature1_thresholds = np.array([11], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.15]], dtype=np.float32)
feature1_right_node_contribs = np.array([[0.5]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs])
session.run(grow_op)
# After adding this layer, the tree will be finalized
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
# Node that nodes 3, 4, 7 and 8 got deleted, so metadata stores has ids
# mapped to their parent node 1, with the respective change in logits.
expected_result = """
trees {
nodes {
bucketized_split {
feature_id:1
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
bucketized_split {
feature_id: 3
threshold: 5
left_id: 3
right_id: 4
}
metadata {
gain: 0.5
original_leaf {
scalar: 0.0143
}
}
}
nodes {
leaf {
scalar: 0.0553
}
}
nodes {
leaf {
scalar: 0.0783
}
}
}
trees {
nodes {
leaf {
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 3
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 2
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.07
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.083
}
post_pruned_nodes_meta {
new_node_id: 3
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 4
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.22
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.57
}
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 3
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 3)
self.assertProtoEquals(expected_result, res_ensemble)
def testPostPruningOfAllNodes(self):
"""Test growing an ensemble with post-pruning, with all nodes are pruned."""
with self.test_session() as session:
# Create empty ensemble.
# Create empty ensemble.
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare inputs. All have negative gains.
feature_ids = [0, 1]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.3], dtype=np.float32)
feature1_thresholds = np.array([7], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.013]], dtype=np.float32)
feature1_right_node_contribs = np.array([[0.0143]], dtype=np.float32)
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([-0.62], dtype=np.float32)
feature2_thresholds = np.array([33], dtype=np.int32)
feature2_left_node_contribs = np.array([[0.01]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.0143]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=2,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
])
session.run(grow_op)
# Expect the split from feature 2 to be chosen despite the negative gain.
# The grown tree should not be finalized as max tree depth is 2 so no
# pruning occurs.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.62
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
leaf {
scalar: 0.0143
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, res_ensemble)
# Prepare inputs.
# All have negative gain.
feature_ids = [3]
feature1_nodes = np.array([1, 2], dtype=np.int32)
feature1_gains = np.array([-0.2, -0.5], dtype=np.float32)
feature1_thresholds = np.array([77, 79], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.023], [0.3]], dtype=np.float32)
feature1_right_node_contribs = np.array(
[[0.012343], [24]], dtype=np.float32)
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=2,
feature_ids=feature_ids,
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs])
session.run(grow_op)
# Expect the split from feature 1 to be chosen despite the negative gain.
# The grown tree should be finalized. Since all nodes have negative gain,
# the whole tree is pruned.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
# Expect the ensemble to be empty as post-pruning will prune
# the entire finalized tree.
self.assertEqual(new_stamp, 2)
self.assertProtoEquals(
"""
trees {
nodes {
leaf {
}
}
}
trees {
nodes {
leaf {
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata{
num_layers_grown: 2
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.01
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.0143
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.033
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.022343
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.3143
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -24.014299
}
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 0
last_layer_node_end: 1
}
""", res_ensemble)
def testPostPruningChangesNothing(self):
"""Test growing an ensemble with post-pruning with all gains >0."""
with self.test_session() as session:
# Create empty ensemble.
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare inputs.
# Second feature has larger (but still negative gain).
feature_ids = [3, 4]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([7.62], dtype=np.float32)
feature1_thresholds = np.array([52], dtype=np.int32)
feature1_left_node_contribs = np.array([[-4.375]], dtype=np.float32)
feature1_right_node_contribs = np.array([[7.143]], dtype=np.float32)
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([0.63], dtype=np.float32)
feature2_thresholds = np.array([23], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=1,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
])
session.run(grow_op)
# Expect the split from the first feature to be chosen.
# Pruning got triggered but changed nothing.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 3
threshold: 52
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: -4.375
}
}
nodes {
leaf {
scalar: 7.143
}
}
}
trees {
nodes {
leaf {
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, res_ensemble)
if __name__ == '__main__':
googletest.main()
| apache-2.0 | -2,084,426,581,368,099,300 | 30.002599 | 80 | 0.53346 | false |
jfrygeo/solutions-geoprocessing-toolbox | suitability/toolboxes/scripts/NAMDownload.py | 2 | 8637 | # coding: utf-8
'''
------------------------------------------------------------------------------
Copyright 2016 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------
Name: NAMDownload.py
Description: Downloads the most up to date data from the NOAA site by getting the present date.
Script works as follow;
Gets the present time date in UTC
Uses the OPeNDAP to NetCDF tool from Multidimension Supplimental Tool
Downloads the specified variables into NetCDF format files and saves them in the relative location, based on where the script file is located.
The present data is removed from the Mosaic Dataset
The new data is then loaded into the Mosiac Dataset
History:
9/21/2015 - ab - original coding
6/10/2016 - mf - Updates for dimension and formatting
9/12/2016 - mf - fix for Python3 not liking leading zeros
'''
#Import modules
#import arceditor
import arcpy
import os
import sys
import traceback
import datetime
from arcpy import env
from datetime import datetime
from datetime import time
from datetime import timedelta
#Gets the current directory where the script is sitting so that everything else can work off relative paths.
currentFolder = os.path.dirname(__file__)
topFolder = os.path.dirname(currentFolder)
#Names of folders to be added to topFolder generated above
gdb = "Geodatabase"
NetCDFData = "NetCDFdata"
tls = "Tools"
env.workspace = os.path.join(topFolder, gdb, r"MAOWdata.gdb")
env.scratchWorkspace = env.workspace
#Declaration of variables used later
opVariables = "rh2m;tcdcclm;tmpsfc;hgtclb;vissfc;ugrd10m;vgrd10m;ugrdmwl;vgrdmwl;snodsfc;gustsfc;apcpsfc"
windVariables = "ugrd10m;vgrd10m"
geoExtent = "-126 32 -114 43"
timeDimension = "time '2016-01-01 00:00:00' '2016-12-31 00:00:00'"
# Processing flags
REMOVE_EXISTING_RASTERS = True
DEBUG = True # Extra messaging while debugging
def makeOutputFilePath(topFolder, NetCDFData, stringDateNow, paramFN):
'''Set output file paths for op weather and wind'''
opDataFileName = "nam%s%s.nc" % (stringDateNow, paramFN)
outputOpDataFile = os.path.join(topFolder, NetCDFData, opDataFileName)
windDataFileName = "nam%s%sWind.nc" % (stringDateNow, paramFN)
outputWindDataFile = os.path.join(topFolder, NetCDFData, windDataFileName)
return [outputOpDataFile, outputWindDataFile]
def makeSourceURLPath(stringDateNow, paramDL):
'''make the URL to the source forecast data'''
return r"http://nomads.ncep.noaa.gov/dods/nam/nam%s/nam%s" % (stringDateNow, paramDL)
def download(stringDateNow, stringTimeNow, paramFN, paramDL):
'''Download NetCDF data and add to mosaic dataset'''
if DEBUG: print ("datetime to use: %s, %s" % (stringDateNow, stringTimeNow))
#Import required Multidimensional tools
tbxMST = os.path.join(topFolder, tls, r"MultidimensionSupplementalTools\Multidimension Supplemental Tools.pyt")
if DEBUG: print ("Importing %s" % tbxMST)
arcpy.ImportToolbox(tbxMST)
# Get target NetCDF data file names
outputOpDataFile, outputWindDataFile = makeOutputFilePath(topFolder, NetCDFData, stringDateNow, paramFN)
if os.path.exists(outputOpDataFile):
print("removing existing %s" % outputOpDataFile)
os.remove(outputOpDataFile)
if os.path.exists(outputWindDataFile):
print("removing existing %s" % outputWindDataFile)
os.remove(outputWindDataFile)
# Get source URL path
in_url = makeSourceURLPath(stringDateNow, paramDL)
#Run OPeNDAP to NetCDF tool
if DEBUG:
print("in_url: %s" % in_url)
print("variable: %s" % opVariables)
print("dimension: %s" % timeDimension )
print ("OPeNDAP Tool run for Operational Weather variables...")
arcpy.OPeNDAPtoNetCDF_mds(in_url, opVariables, outputOpDataFile, geoExtent, timeDimension, "BY_VALUE")
#Run OPeNDAP to NetCDF tool
print ("OPeNDAP Tool run for Wind variables...")
arcpy.OPeNDAPtoNetCDF_mds(in_url, windVariables, outputWindDataFile, geoExtent, timeDimension, "BY_VALUE")
targetOpDataMosaic = os.path.join(topFolder, gdb, r"OperationalWeather.gdb\OperationalData")
targetWindDataMosaic = os.path.join(topFolder, gdb, r"OperationalWeather.gdb\OperationalWind")
# Remove Rasters From Mosaic Dataset
if REMOVE_EXISTING_RASTERS:
print ("Removing existing rasters from Operational Weather...")
arcpy.RemoveRastersFromMosaicDataset_management(targetOpDataMosaic, "OBJECTID >=0", "NO_BOUNDARY", "NO_MARK_OVERVIEW_ITEMS",
"NO_DELETE_OVERVIEW_IMAGES", "NO_DELETE_ITEM_CACHE", "REMOVE_MOSAICDATASET_ITEMS",
"NO_CELL_SIZES")
print ("Removing existing rasters from Wind...")
arcpy.RemoveRastersFromMosaicDataset_management(targetWindDataMosaic, "OBJECTID >= 0", "UPDATE_BOUNDARY", "MARK_OVERVIEW_ITEMS",
"DELETE_OVERVIEW_IMAGES", "DELETE_ITEM_CACHE", "REMOVE_MOSAICDATASET_ITEMS",
"UPDATE_CELL_SIZES")
# Add Rasters To Mosaic Dataset
print ("Adding new rasters from Operational Weather...")
arcpy.AddRastersToMosaicDataset_management(targetOpDataMosaic, "NetCDF", outputOpDataFile, "UPDATE_CELL_SIZES", "UPDATE_BOUNDARY",
"NO_OVERVIEWS", "", "0", "1500", "", "*.nc", "SUBFOLDERS", "ALLOW_DUPLICATES",
"NO_PYRAMIDS", "NO_STATISTICS", "NO_THUMBNAILS", "", "NO_FORCE_SPATIAL_REFERENCE")
print ("Adding new rasters from Wind...")
arcpy.AddRastersToMosaicDataset_management(targetWindDataMosaic, "NetCDF", outputWindDataFile, "UPDATE_CELL_SIZES", "UPDATE_BOUNDARY",
"NO_OVERVIEWS", "", "0", "1500", "", "*.nc", "SUBFOLDERS", "ALLOW_DUPLICATES",
"NO_PYRAMIDS", "NO_STATISTICS", "NO_THUMBNAILS", "", "NO_FORCE_SPATIAL_REFERENCE")
return
def main():
'''Decide which time period to download'''
try:
now_time = time(int(datetime.utcnow().strftime("%H")), int(datetime.utcnow().strftime("%M")), int(datetime.utcnow().strftime("%S")))
print("UTC time is (now_time): %s" % now_time)
patternDate = '%Y%m%d'
patternTime = '%H:%M:%S'
stringDateNow = datetime.utcnow().strftime(patternDate)
stringTimeNow = datetime.utcnow().strftime(patternTime)
if now_time >= time(2,50,00) and now_time < time(8,50,00):
print("Going to download 1hr_00z...")
download(stringDateNow, stringTimeNow,"1hr00z", "1hr_00z")
elif now_time >= time(8,50,00) and now_time < time(14,50,00):
print("Going to download 1hr_06z...")
download(stringDateNow, stringTimeNow,"1hr06z", "1hr_06z")
elif now_time >= time(14,50,00) and now_time < time(21,00,00):
print("Going to download 1hr_12z...")
download(stringDateNow, stringTimeNow,"1hr12z", "1hr_12z")
elif (now_time >= time(21,00,00) and now_time <= time(23,59,59)):
print("Going to download 1hr_18z...")
download(stringDateNow, stringTimeNow,"1hr18z", "1hr_18z")
elif (now_time >= time(00,00,00) and now_time <= time(2,49,59)):
# Get yesterday's forecast, because today's isn't
# published yet:
stringDateNow = (datetime.utcnow() - timedelta(days=1)).strftime(patternDate)
print("Going to download 1hr_18z for %s..." % stringDateNow)
download(stringDateNow, stringTimeNow,"1hr18z", "1hr_18z")
print("Done.")
except:
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
pymsg = "ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
print(pymsg + "\n")
sys.exit(1)
# MAIN =============================================
if __name__ == "__main__":
main()
| apache-2.0 | -1,590,416,166,516,263,000 | 45.686486 | 146 | 0.646521 | false |
doug-fish/horizon | openstack_dashboard/contrib/sahara/content/data_processing/job_binaries/tests.py | 12 | 4368 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard.contrib.sahara import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:data_processing.job_binaries:index')
DETAILS_URL = reverse(
'horizon:project:data_processing.job_binaries:details', args=['id'])
class DataProcessingJobBinaryTests(test.TestCase):
@test.create_stubs({api.sahara: ('job_binary_list',)})
def test_index(self):
api.sahara.job_binary_list(IsA(http.HttpRequest)) \
.AndReturn(self.job_binaries.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(
res, 'project/data_processing.job_binaries/job_binaries.html')
self.assertContains(res, 'Job Binaries')
self.assertContains(res, 'Name')
self.assertContains(res, 'example.pig')
@test.create_stubs({api.sahara: ('job_binary_get',)})
def test_details(self):
api.sahara.job_binary_get(IsA(http.HttpRequest), IsA(unicode)) \
.MultipleTimes().AndReturn(self.job_binaries.first())
self.mox.ReplayAll()
res = self.client.get(DETAILS_URL)
self.assertTemplateUsed(
res, 'project/data_processing.job_binaries/details.html')
self.assertContains(res, 'Job Binary Details')
@test.create_stubs({api.sahara: ('job_binary_list',
'job_binary_get',
'job_binary_internal_delete',
'job_binary_delete',)})
def test_delete(self):
jb_list = (api.sahara.job_binary_list(IsA(http.HttpRequest))
.AndReturn(self.job_binaries.list()))
api.sahara.job_binary_get(IsA(http.HttpRequest), IsA(unicode)) \
.AndReturn(self.job_binaries.list()[0])
api.sahara.job_binary_delete(IsA(http.HttpRequest), jb_list[0].id)
int_id = jb_list[0].url.split("//")[1]
api.sahara.job_binary_internal_delete(IsA(http.HttpRequest), int_id)
self.mox.ReplayAll()
form_data = {"action": "job_binaries__delete__%s" % jb_list[0].id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.sahara: ('job_binary_get',
'job_binary_get_file')})
def test_download(self):
jb = api.sahara.job_binary_get(IsA(http.HttpRequest), IsA(unicode)) \
.AndReturn(self.job_binaries.list()[0])
api.sahara.job_binary_get_file(IsA(http.HttpRequest), jb.id) \
.AndReturn("TEST FILE CONTENT")
self.mox.ReplayAll()
context = {'job_binary_id': jb.id}
url = reverse('horizon:project:data_processing.job_binaries:download',
kwargs={'job_binary_id': jb.id})
res = self.client.get(url, context)
self.assertTrue(res.has_header('content-disposition'))
@test.create_stubs({api.sahara: ('job_binary_get',
'job_binary_get_file')})
def test_download_with_spaces(self):
jb = api.sahara.job_binary_get(IsA(http.HttpRequest), IsA(unicode)) \
.AndReturn(self.job_binaries.list()[1])
api.sahara.job_binary_get_file(IsA(http.HttpRequest), jb.id) \
.AndReturn("MORE TEST FILE CONTENT")
self.mox.ReplayAll()
context = {'job_binary_id': jb.id}
url = reverse('horizon:project:data_processing.job_binaries:download',
kwargs={'job_binary_id': jb.id})
res = self.client.get(url, context)
self.assertEqual(
res.get('Content-Disposition'),
'attachment; filename="%s"' % jb.name
)
| apache-2.0 | 5,623,169,444,867,991,000 | 43.571429 | 78 | 0.626145 | false |
indianajohn/ycmd | ycmd/tests/javascript/subcommands_test.py | 1 | 13462 | # Copyright (C) 2015 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from nose.tools import eq_
from hamcrest import ( assert_that,
contains,
contains_inanyorder,
has_entries )
from .javascript_handlers_test import Javascript_Handlers_test
from ycmd.utils import ReadFile
from pprint import pformat
import http.client
def LocationMatcher( filepath, column_num, line_num ):
return has_entries( {
'line_num': line_num,
'column_num': column_num,
'filepath': filepath
} )
def ChunkMatcher( replacement_text, start, end ):
return has_entries( {
'replacement_text': replacement_text,
'range': has_entries( {
'start': start,
'end': end
} )
} )
class Javascript_Subcommands_test( Javascript_Handlers_test ):
def _RunTest( self, test ):
contents = ReadFile( test[ 'request' ][ 'filepath' ] )
def CombineRequest( request, data ):
kw = request
request.update( data )
return self._BuildRequest( **kw )
# Because we aren't testing this command, we *always* ignore errors. This
# is mainly because we (may) want to test scenarios where the completer
# throws an exception and the easiest way to do that is to throw from
# within the FlagsForFile function.
self._app.post_json( '/event_notification',
CombineRequest( test[ 'request' ], {
'event_name': 'FileReadyToParse',
'contents': contents,
} ),
expect_errors = True )
# We also ignore errors here, but then we check the response code
# ourself. This is to allow testing of requests returning errors.
response = self._app.post_json(
'/run_completer_command',
CombineRequest( test[ 'request' ], {
'completer_target': 'filetype_default',
'contents': contents,
'filetype': 'javascript',
'command_arguments': ( [ test[ 'request' ][ 'command' ] ]
+ test[ 'request' ].get( 'arguments', [] ) )
} ),
expect_errors = True
)
print( 'completer response: {0}'.format( pformat( response.json ) ) )
eq_( response.status_code, test[ 'expect' ][ 'response' ] )
assert_that( response.json, test[ 'expect' ][ 'data' ] )
def DefinedSubcommands_test( self ):
subcommands_data = self._BuildRequest( completer_target = 'javascript' )
self._WaitUntilTernServerReady()
eq_( sorted( [ 'GoToDefinition',
'GoTo',
'GetDoc',
'GetType',
'StartServer',
'StopServer',
'GoToReferences',
'RefactorRename' ] ),
self._app.post_json( '/defined_subcommands',
subcommands_data ).json )
def GoToDefinition_test( self ):
self._RunTest( {
'description': 'GoToDefinition works within file',
'request': {
'command': 'GoToDefinition',
'line_num': 13,
'column_num': 25,
'filepath': self._PathToTestFile( 'simple_test.js' ),
},
'expect': {
'response': http.client.OK,
'data': has_entries( {
'filepath': self._PathToTestFile( 'simple_test.js' ),
'line_num': 1,
'column_num': 5,
} )
}
} )
def GoTo_test( self ):
self._RunTest( {
'description': 'GoTo works the same as GoToDefinition within file',
'request': {
'command': 'GoTo',
'line_num': 13,
'column_num': 25,
'filepath': self._PathToTestFile( 'simple_test.js' ),
},
'expect': {
'response': http.client.OK,
'data': has_entries( {
'filepath': self._PathToTestFile( 'simple_test.js' ),
'line_num': 1,
'column_num': 5,
} )
}
} )
def GetDoc_test( self ):
self._RunTest( {
'description': 'GetDoc works within file',
'request': {
'command': 'GetDoc',
'line_num': 7,
'column_num': 16,
'filepath': self._PathToTestFile( 'coollib', 'cool_object.js' ),
},
'expect': {
'response': http.client.OK,
'data': has_entries( {
'detailed_info': (
'Name: mine_bitcoin\n'
'Type: fn(how_much: ?) -> number\n\n'
'This function takes a number and invests it in bitcoin. It '
'returns\nthe expected value (in notional currency) after 1 year.'
)
} )
}
} )
def GetType_test( self ):
self._RunTest( {
'description': 'GetType works within file',
'request': {
'command': 'GetType',
'line_num': 11,
'column_num': 14,
'filepath': self._PathToTestFile( 'coollib', 'cool_object.js' ),
},
'expect': {
'response': http.client.OK,
'data': has_entries( {
'message': 'number'
} )
}
} )
def GoToReferences_test( self ):
self._RunTest( {
'description': 'GoToReferences works within file',
'request': {
'command': 'GoToReferences',
'line_num': 17,
'column_num': 29,
'filepath': self._PathToTestFile( 'coollib', 'cool_object.js' ),
},
'expect': {
'response': http.client.OK,
'data': contains_inanyorder(
has_entries( {
'filepath': self._PathToTestFile( 'coollib', 'cool_object.js' ),
'line_num': 17,
'column_num': 29,
} ),
has_entries( {
'filepath': self._PathToTestFile( 'coollib', 'cool_object.js' ),
'line_num': 12,
'column_num': 9,
} )
)
}
} )
def GetDocWithNoItendifier_test( self ):
self._RunTest( {
'description': 'GetDoc works when no identifier',
'request': {
'command': 'GetDoc',
'filepath': self._PathToTestFile( 'simple_test.js' ),
'line_num': 12,
'column_num': 1,
},
'expect': {
'response': http.client.INTERNAL_SERVER_ERROR,
'data': self._ErrorMatcher( RuntimeError, 'TernError: No type found '
'at the given position.' ),
}
} )
def RefactorRename_Simple_test( self ):
filepath = self._PathToTestFile( 'simple_test.js' )
self._RunTest( {
'description': 'RefactorRename works within a single scope/file',
'request': {
'command': 'RefactorRename',
'arguments': [ 'test' ],
'filepath': filepath,
'line_num': 15,
'column_num': 32,
},
'expect': {
'response': http.client.OK,
'data': {
'fixits': contains( has_entries( {
'chunks': contains(
ChunkMatcher( 'test',
LocationMatcher( filepath, 1, 5 ),
LocationMatcher( filepath, 1, 22 ) ),
ChunkMatcher( 'test',
LocationMatcher( filepath, 13, 25 ),
LocationMatcher( filepath, 13, 42 ) ),
ChunkMatcher( 'test',
LocationMatcher( filepath, 14, 24 ),
LocationMatcher( filepath, 14, 41 ) ),
ChunkMatcher( 'test',
LocationMatcher( filepath, 15, 24 ),
LocationMatcher( filepath, 15, 41 ) ),
ChunkMatcher( 'test',
LocationMatcher( filepath, 21, 7 ),
LocationMatcher( filepath, 21, 24 ) ),
# On the same line, ensuring offsets are as expected (as
# unmodified source, similar to clang)
ChunkMatcher( 'test',
LocationMatcher( filepath, 21, 28 ),
LocationMatcher( filepath, 21, 45 ) ),
) ,
'location': LocationMatcher( filepath, 15, 32 )
} ) )
}
}
} )
def RefactorRename_MultipleFiles_test( self ):
file1 = self._PathToTestFile( 'file1.js' )
file2 = self._PathToTestFile( 'file2.js' )
file3 = self._PathToTestFile( 'file3.js' )
self._RunTest( {
'description': 'RefactorRename works across files',
'request': {
'command': 'RefactorRename',
'arguments': [ 'a-quite-long-string' ],
'filepath': file1,
'line_num': 3,
'column_num': 14,
},
'expect': {
'response': http.client.OK,
'data': {
'fixits': contains( has_entries( {
'chunks': contains(
ChunkMatcher(
'a-quite-long-string',
LocationMatcher( file1, 1, 5 ),
LocationMatcher( file1, 1, 11 ) ),
ChunkMatcher(
'a-quite-long-string',
LocationMatcher( file1, 3, 14 ),
LocationMatcher( file1, 3, 19 ) ),
ChunkMatcher(
'a-quite-long-string',
LocationMatcher( file2, 2, 14 ),
LocationMatcher( file2, 2, 19 ) ),
ChunkMatcher(
'a-quite-long-string',
LocationMatcher( file3, 3, 12 ),
LocationMatcher( file3, 3, 17 ) )
) ,
'location': LocationMatcher( file1, 3, 14 )
} ) )
}
}
} )
def RefactorRename_MultipleFiles_OnFileReadyToParse_test( self ):
file1 = self._PathToTestFile( 'file1.js' )
file2 = self._PathToTestFile( 'file2.js' )
file3 = self._PathToTestFile( 'file3.js' )
# This test is roughly the same as the previous one, except here file4.js is
# pushed into the Tern engine via 'opening it in the editor' (i.e.
# FileReadyToParse event). The first 3 are loaded into the tern server
# because they are listed in the .tern-project file's loadEagerly option.
file4 = self._PathToTestFile( 'file4.js' )
self._app.post_json( '/event_notification',
self._BuildRequest( **{
'filetype': 'javascript',
'event_name': 'FileReadyToParse',
'contents': ReadFile( file4 ),
'filepath': file4,
} ),
expect_errors = False )
self._RunTest( {
'description': 'FileReadyToParse loads files into tern server',
'request': {
'command': 'RefactorRename',
'arguments': [ 'a-quite-long-string' ],
'filepath': file1,
'line_num': 3,
'column_num': 14,
},
'expect': {
'response': http.client.OK,
'data': {
'fixits': contains( has_entries( {
'chunks': contains(
ChunkMatcher(
'a-quite-long-string',
LocationMatcher( file1, 1, 5 ),
LocationMatcher( file1, 1, 11 ) ),
ChunkMatcher(
'a-quite-long-string',
LocationMatcher( file1, 3, 14 ),
LocationMatcher( file1, 3, 19 ) ),
ChunkMatcher(
'a-quite-long-string',
LocationMatcher( file2, 2, 14 ),
LocationMatcher( file2, 2, 19 ) ),
ChunkMatcher(
'a-quite-long-string',
LocationMatcher( file3, 3, 12 ),
LocationMatcher( file3, 3, 17 ) ),
ChunkMatcher(
'a-quite-long-string',
LocationMatcher( file4, 4, 22 ),
LocationMatcher( file4, 4, 28 ) )
) ,
'location': LocationMatcher( file1, 3, 14 )
} ) )
}
}
} )
def RefactorRename_Missing_New_Name_test( self ):
self._RunTest( {
'description': 'FixItRename raises an error without new name',
'request': {
'command': 'FixItRename',
'line_num': 17,
'column_num': 29,
'filepath': self._PathToTestFile( 'coollib', 'cool_object.js' ),
},
'expect': {
'response': http.client.INTERNAL_SERVER_ERROR,
'data': {
'exception': self._ErrorMatcher(
ValueError,
'Please specify a new name to rename it to.\n'
'Usage: RefactorRename <new name>' ),
},
}
} )
| gpl-3.0 | -2,018,946,543,884,635,600 | 32.157635 | 80 | 0.515228 | false |
zouyapeng/horizon-newtouch | horizon/test/tests/workflows.py | 7 | 10953 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import forms
from django import http
from horizon import exceptions
from horizon.test import helpers as test
from horizon import workflows
PROJECT_ID = "a23lkjre389fwenj"
INSTANCE_ID = "sdlkjhf9832roiw"
def local_callback_func(request, context):
return "one"
def other_callback_func(request, context):
return "two"
def extra_callback_func(request, context):
return "extra"
class TestActionOne(workflows.Action):
project_id = forms.ChoiceField(label="Project")
user_id = forms.ChoiceField(label="User")
class Meta:
name = "Test Action One"
slug = "test_action_one"
def populate_project_id_choices(self, request, context):
return [(PROJECT_ID, "test_project")]
def populate_user_id_choices(self, request, context):
return [(request.user.id, request.user.username)]
def handle(self, request, context):
return {"foo": "bar"}
class TestActionTwo(workflows.Action):
instance_id = forms.CharField(label="Instance")
class Meta:
name = "Test Action Two"
slug = "test_action_two"
class TestActionThree(workflows.Action):
extra = forms.CharField(widget=forms.widgets.Textarea)
class Meta:
name = "Test Action Three"
slug = "test_action_three"
class AdminAction(workflows.Action):
admin_id = forms.CharField(label="Admin")
class Meta:
name = "Admin Action"
slug = "admin_action"
permissions = ("horizon.test",)
class TestStepOne(workflows.Step):
action_class = TestActionOne
contributes = ("project_id", "user_id")
class TestStepTwo(workflows.Step):
action_class = TestActionTwo
depends_on = ("project_id",)
contributes = ("instance_id",)
connections = {"project_id": (local_callback_func,
"horizon.test.tests.workflows.other_callback_func")}
class TestExtraStep(workflows.Step):
action_class = TestActionThree
depends_on = ("project_id",)
contributes = ("extra_data",)
connections = {"project_id": (extra_callback_func,)}
after = TestStepOne
before = TestStepTwo
class AdminStep(workflows.Step):
action_class = AdminAction
contributes = ("admin_id",)
after = TestStepOne
before = TestStepTwo
class TestWorkflow(workflows.Workflow):
slug = "test_workflow"
default_steps = (TestStepOne, TestStepTwo)
class TestWorkflowView(workflows.WorkflowView):
workflow_class = TestWorkflow
template_name = "workflow.html"
class TestFullscreenWorkflow(workflows.Workflow):
slug = 'test_fullscreen_workflow'
default_steps = (TestStepOne, TestStepTwo)
fullscreen = True
class TestFullscreenWorkflowView(workflows.WorkflowView):
workflow_class = TestFullscreenWorkflow
template_name = "workflow.html"
class WorkflowsTests(test.TestCase):
def setUp(self):
super(WorkflowsTests, self).setUp()
def tearDown(self):
super(WorkflowsTests, self).tearDown()
self._reset_workflow()
def _reset_workflow(self):
TestWorkflow._cls_registry = set([])
def test_workflow_construction(self):
TestWorkflow.register(TestExtraStep)
flow = TestWorkflow(self.request)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestExtraStep: test_action_three>',
'<TestStepTwo: test_action_two>'])
self.assertEqual(set(['project_id']), flow.depends_on)
def test_step_construction(self):
step_one = TestStepOne(TestWorkflow(self.request))
# Action slug is moved from Meta by metaclass, and
# Step inherits slug from action.
self.assertEqual(TestActionOne.name, step_one.name)
self.assertEqual(TestActionOne.slug, step_one.slug)
# Handlers should be empty since there are no connections.
self.assertEqual(step_one._handlers, {})
step_two = TestStepTwo(TestWorkflow(self.request))
# Handlers should be populated since we do have connections.
self.assertEqual([local_callback_func, other_callback_func],
step_two._handlers["project_id"])
def test_step_invalid_connections_handlers_not_list_or_tuple(self):
class InvalidStepA(TestStepTwo):
connections = {'project_id': {}}
class InvalidStepB(TestStepTwo):
connections = {'project_id': ''}
with self.assertRaises(TypeError):
InvalidStepA(TestWorkflow(self.request))
with self.assertRaises(TypeError):
InvalidStepB(TestWorkflow(self.request))
def test_step_invalid_connection_handler_not_string_or_callable(self):
class InvalidStepA(TestStepTwo):
connections = {'project_id': (None,)}
class InvalidStepB(TestStepTwo):
connections = {'project_id': (0,)}
with self.assertRaises(TypeError):
InvalidStepA(TestWorkflow(self.request))
with self.assertRaises(TypeError):
InvalidStepB(TestWorkflow(self.request))
def test_step_invalid_callback(self):
# This should raise an exception
class InvalidStep(TestStepTwo):
connections = {"project_id": ('local_callback_func',)}
with self.assertRaises(ValueError):
InvalidStep(TestWorkflow(self.request))
def test_connection_handlers_called(self):
TestWorkflow.register(TestExtraStep)
flow = TestWorkflow(self.request)
# This should set the value without any errors, but trigger nothing
flow.context['does_not_exist'] = False
self.assertEqual(False, flow.context['does_not_exist'])
# The order here is relevant. Note that we inserted "extra" between
# steps one and two, and one has no handlers, so we should see
# a response from extra, then one from each of step two's handlers.
val = flow.context.set('project_id', PROJECT_ID)
self.assertEqual([('test_action_three', 'extra'),
('test_action_two', 'one'),
('test_action_two', 'two')], val)
def test_workflow_validation(self):
flow = TestWorkflow(self.request)
# Missing items fail validation.
with self.assertRaises(exceptions.WorkflowValidationError):
flow.is_valid()
# All required items pass validation.
seed = {"project_id": PROJECT_ID,
"user_id": self.user.id,
"instance_id": INSTANCE_ID}
req = self.factory.post("/", seed)
req.user = self.user
flow = TestWorkflow(req, context_seed={"project_id": PROJECT_ID})
for step in flow.steps:
if not step.action.is_valid():
self.fail("Step %s was unexpectedly invalid: %s"
% (step.slug, step.action.errors))
self.assertTrue(flow.is_valid())
# Additional items shouldn't affect validation
flow.context.set("extra_data", "foo")
self.assertTrue(flow.is_valid())
def test_workflow_finalization(self):
flow = TestWorkflow(self.request)
self.assertTrue(flow.finalize())
def test_workflow_view(self):
view = TestWorkflowView.as_view()
req = self.factory.get("/")
res = view(req)
self.assertEqual(200, res.status_code)
def test_workflow_registration(self):
req = self.factory.get("/foo")
flow = TestWorkflow(req)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestStepTwo: test_action_two>'])
TestWorkflow.register(TestExtraStep)
flow = TestWorkflow(req)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestExtraStep: test_action_three>',
'<TestStepTwo: test_action_two>'])
def test_workflow_render(self):
TestWorkflow.register(TestExtraStep)
req = self.factory.get("/foo")
flow = TestWorkflow(req)
output = http.HttpResponse(flow.render())
self.assertContains(output, unicode(flow.name))
self.assertContains(output, unicode(TestActionOne.name))
self.assertContains(output, unicode(TestActionTwo.name))
self.assertContains(output, unicode(TestActionThree.name))
def test_has_permissions(self):
self.assertQuerysetEqual(TestWorkflow._cls_registry, [])
TestWorkflow.register(AdminStep)
flow = TestWorkflow(self.request)
step = AdminStep(flow)
self.assertItemsEqual(step.permissions,
("horizon.test",))
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestStepTwo: test_action_two>'])
self.set_permissions(['test'])
self.request.user = self.user
flow = TestWorkflow(self.request)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<AdminStep: admin_action>',
'<TestStepTwo: test_action_two>'])
def test_entry_point(self):
req = self.factory.get("/foo")
flow = TestWorkflow(req)
self.assertEqual("test_action_one", flow.get_entry_point())
flow = TestWorkflow(req, entry_point="test_action_two")
self.assertEqual("test_action_two", flow.get_entry_point())
def test_fullscreenworkflow_view(self):
view = TestFullscreenWorkflowView.as_view()
req = self.factory.get("/")
req.is_ajax = lambda: True
res = view(req)
output = res.render()
self.assertRegexpMatches(str(output),
'class="[^"]*\\bfullscreen\\b[^"]*"')
def test_notfullscreenworkflow_view(self):
view = TestWorkflowView.as_view()
req = self.factory.get("/")
req.is_ajax = lambda: True
res = view(req)
output = res.render()
self.assertNotRegexpMatches(str(output),
'class="[^"]*\\bfullscreen\\b[^"]*"')
| apache-2.0 | 8,271,650,053,066,978,000 | 33.443396 | 78 | 0.621017 | false |
nwjs/chromium.src | third_party/blink/web_tests/external/wpt/webdriver/tests/set_timeouts/user_prompts.py | 42 | 2147 | # META: timeout=long
import pytest
from tests.support.asserts import assert_success
def set_timeouts(session, timeouts):
return session.transport.send(
"POST", "session/{session_id}/timeouts".format(**vars(session)),
timeouts)
@pytest.fixture
def check_user_prompt_not_closed(session, create_dialog):
def check_user_prompt_not_closed(dialog_type):
create_dialog(dialog_type, text=dialog_type)
response = set_timeouts(session, {"script": 100})
assert_success(response)
assert session.alert.text == dialog_type
session.alert.dismiss()
assert session.timeouts.script == 100
return check_user_prompt_not_closed
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_accept(check_user_prompt_not_closed, dialog_type):
check_user_prompt_not_closed(dialog_type)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_accept_and_notify(check_user_prompt_not_closed, dialog_type):
check_user_prompt_not_closed(dialog_type)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_dismiss(check_user_prompt_not_closed, dialog_type):
check_user_prompt_not_closed(dialog_type)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_dismiss_and_notify(check_user_prompt_not_closed, dialog_type):
check_user_prompt_not_closed(dialog_type)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed, dialog_type):
check_user_prompt_not_closed(dialog_type)
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_default(check_user_prompt_not_closed, dialog_type):
check_user_prompt_not_closed(dialog_type)
| bsd-3-clause | 2,597,034,688,995,659,000 | 33.629032 | 76 | 0.722869 | false |
mikewiebe-ansible/ansible | test/units/modules/network/fortios/test_fortios_wireless_controller_wtp_profile.py | 20 | 17499 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_wireless_controller_wtp_profile
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_wireless_controller_wtp_profile.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_wireless_controller_wtp_profile_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_wtp_profile': {
'allowaccess': 'telnet',
'ap_country': 'NA',
'ble_profile': 'test_value_5',
'comment': 'Comment.',
'control_message_offload': 'ebp-frame',
'dtls_in_kernel': 'enable',
'dtls_policy': 'clear-text',
'energy_efficient_ethernet': 'enable',
'ext_info_enable': 'enable',
'handoff_roaming': 'enable',
'handoff_rssi': '13',
'handoff_sta_thresh': '14',
'ip_fragment_preventing': 'tcp-mss-adjust',
'led_state': 'enable',
'lldp': 'enable',
'login_passwd': 'test_value_18',
'login_passwd_change': 'yes',
'max_clients': '20',
'name': 'default_name_21',
'poe_mode': 'auto',
'split_tunneling_acl_local_ap_subnet': 'enable',
'split_tunneling_acl_path': 'tunnel',
'tun_mtu_downlink': '25',
'tun_mtu_uplink': '26',
'wan_port_mode': 'wan-lan'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_wtp_profile.fortios_wireless_controller(input_data, fos_instance)
expected_data = {
'allowaccess': 'telnet',
'ap-country': 'NA',
'ble-profile': 'test_value_5',
'comment': 'Comment.',
'control-message-offload': 'ebp-frame',
'dtls-in-kernel': 'enable',
'dtls-policy': 'clear-text',
'energy-efficient-ethernet': 'enable',
'ext-info-enable': 'enable',
'handoff-roaming': 'enable',
'handoff-rssi': '13',
'handoff-sta-thresh': '14',
'ip-fragment-preventing': 'tcp-mss-adjust',
'led-state': 'enable',
'lldp': 'enable',
'login-passwd': 'test_value_18',
'login-passwd-change': 'yes',
'max-clients': '20',
'name': 'default_name_21',
'poe-mode': 'auto',
'split-tunneling-acl-local-ap-subnet': 'enable',
'split-tunneling-acl-path': 'tunnel',
'tun-mtu-downlink': '25',
'tun-mtu-uplink': '26',
'wan-port-mode': 'wan-lan'
}
set_method_mock.assert_called_with('wireless-controller', 'wtp-profile', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_wireless_controller_wtp_profile_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_wtp_profile': {
'allowaccess': 'telnet',
'ap_country': 'NA',
'ble_profile': 'test_value_5',
'comment': 'Comment.',
'control_message_offload': 'ebp-frame',
'dtls_in_kernel': 'enable',
'dtls_policy': 'clear-text',
'energy_efficient_ethernet': 'enable',
'ext_info_enable': 'enable',
'handoff_roaming': 'enable',
'handoff_rssi': '13',
'handoff_sta_thresh': '14',
'ip_fragment_preventing': 'tcp-mss-adjust',
'led_state': 'enable',
'lldp': 'enable',
'login_passwd': 'test_value_18',
'login_passwd_change': 'yes',
'max_clients': '20',
'name': 'default_name_21',
'poe_mode': 'auto',
'split_tunneling_acl_local_ap_subnet': 'enable',
'split_tunneling_acl_path': 'tunnel',
'tun_mtu_downlink': '25',
'tun_mtu_uplink': '26',
'wan_port_mode': 'wan-lan'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_wtp_profile.fortios_wireless_controller(input_data, fos_instance)
expected_data = {
'allowaccess': 'telnet',
'ap-country': 'NA',
'ble-profile': 'test_value_5',
'comment': 'Comment.',
'control-message-offload': 'ebp-frame',
'dtls-in-kernel': 'enable',
'dtls-policy': 'clear-text',
'energy-efficient-ethernet': 'enable',
'ext-info-enable': 'enable',
'handoff-roaming': 'enable',
'handoff-rssi': '13',
'handoff-sta-thresh': '14',
'ip-fragment-preventing': 'tcp-mss-adjust',
'led-state': 'enable',
'lldp': 'enable',
'login-passwd': 'test_value_18',
'login-passwd-change': 'yes',
'max-clients': '20',
'name': 'default_name_21',
'poe-mode': 'auto',
'split-tunneling-acl-local-ap-subnet': 'enable',
'split-tunneling-acl-path': 'tunnel',
'tun-mtu-downlink': '25',
'tun-mtu-uplink': '26',
'wan-port-mode': 'wan-lan'
}
set_method_mock.assert_called_with('wireless-controller', 'wtp-profile', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_wireless_controller_wtp_profile_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'wireless_controller_wtp_profile': {
'allowaccess': 'telnet',
'ap_country': 'NA',
'ble_profile': 'test_value_5',
'comment': 'Comment.',
'control_message_offload': 'ebp-frame',
'dtls_in_kernel': 'enable',
'dtls_policy': 'clear-text',
'energy_efficient_ethernet': 'enable',
'ext_info_enable': 'enable',
'handoff_roaming': 'enable',
'handoff_rssi': '13',
'handoff_sta_thresh': '14',
'ip_fragment_preventing': 'tcp-mss-adjust',
'led_state': 'enable',
'lldp': 'enable',
'login_passwd': 'test_value_18',
'login_passwd_change': 'yes',
'max_clients': '20',
'name': 'default_name_21',
'poe_mode': 'auto',
'split_tunneling_acl_local_ap_subnet': 'enable',
'split_tunneling_acl_path': 'tunnel',
'tun_mtu_downlink': '25',
'tun_mtu_uplink': '26',
'wan_port_mode': 'wan-lan'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_wtp_profile.fortios_wireless_controller(input_data, fos_instance)
delete_method_mock.assert_called_with('wireless-controller', 'wtp-profile', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_wireless_controller_wtp_profile_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'wireless_controller_wtp_profile': {
'allowaccess': 'telnet',
'ap_country': 'NA',
'ble_profile': 'test_value_5',
'comment': 'Comment.',
'control_message_offload': 'ebp-frame',
'dtls_in_kernel': 'enable',
'dtls_policy': 'clear-text',
'energy_efficient_ethernet': 'enable',
'ext_info_enable': 'enable',
'handoff_roaming': 'enable',
'handoff_rssi': '13',
'handoff_sta_thresh': '14',
'ip_fragment_preventing': 'tcp-mss-adjust',
'led_state': 'enable',
'lldp': 'enable',
'login_passwd': 'test_value_18',
'login_passwd_change': 'yes',
'max_clients': '20',
'name': 'default_name_21',
'poe_mode': 'auto',
'split_tunneling_acl_local_ap_subnet': 'enable',
'split_tunneling_acl_path': 'tunnel',
'tun_mtu_downlink': '25',
'tun_mtu_uplink': '26',
'wan_port_mode': 'wan-lan'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_wtp_profile.fortios_wireless_controller(input_data, fos_instance)
delete_method_mock.assert_called_with('wireless-controller', 'wtp-profile', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_wireless_controller_wtp_profile_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_wtp_profile': {
'allowaccess': 'telnet',
'ap_country': 'NA',
'ble_profile': 'test_value_5',
'comment': 'Comment.',
'control_message_offload': 'ebp-frame',
'dtls_in_kernel': 'enable',
'dtls_policy': 'clear-text',
'energy_efficient_ethernet': 'enable',
'ext_info_enable': 'enable',
'handoff_roaming': 'enable',
'handoff_rssi': '13',
'handoff_sta_thresh': '14',
'ip_fragment_preventing': 'tcp-mss-adjust',
'led_state': 'enable',
'lldp': 'enable',
'login_passwd': 'test_value_18',
'login_passwd_change': 'yes',
'max_clients': '20',
'name': 'default_name_21',
'poe_mode': 'auto',
'split_tunneling_acl_local_ap_subnet': 'enable',
'split_tunneling_acl_path': 'tunnel',
'tun_mtu_downlink': '25',
'tun_mtu_uplink': '26',
'wan_port_mode': 'wan-lan'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_wtp_profile.fortios_wireless_controller(input_data, fos_instance)
expected_data = {
'allowaccess': 'telnet',
'ap-country': 'NA',
'ble-profile': 'test_value_5',
'comment': 'Comment.',
'control-message-offload': 'ebp-frame',
'dtls-in-kernel': 'enable',
'dtls-policy': 'clear-text',
'energy-efficient-ethernet': 'enable',
'ext-info-enable': 'enable',
'handoff-roaming': 'enable',
'handoff-rssi': '13',
'handoff-sta-thresh': '14',
'ip-fragment-preventing': 'tcp-mss-adjust',
'led-state': 'enable',
'lldp': 'enable',
'login-passwd': 'test_value_18',
'login-passwd-change': 'yes',
'max-clients': '20',
'name': 'default_name_21',
'poe-mode': 'auto',
'split-tunneling-acl-local-ap-subnet': 'enable',
'split-tunneling-acl-path': 'tunnel',
'tun-mtu-downlink': '25',
'tun-mtu-uplink': '26',
'wan-port-mode': 'wan-lan'
}
set_method_mock.assert_called_with('wireless-controller', 'wtp-profile', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_wireless_controller_wtp_profile_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_wtp_profile': {
'random_attribute_not_valid': 'tag',
'allowaccess': 'telnet',
'ap_country': 'NA',
'ble_profile': 'test_value_5',
'comment': 'Comment.',
'control_message_offload': 'ebp-frame',
'dtls_in_kernel': 'enable',
'dtls_policy': 'clear-text',
'energy_efficient_ethernet': 'enable',
'ext_info_enable': 'enable',
'handoff_roaming': 'enable',
'handoff_rssi': '13',
'handoff_sta_thresh': '14',
'ip_fragment_preventing': 'tcp-mss-adjust',
'led_state': 'enable',
'lldp': 'enable',
'login_passwd': 'test_value_18',
'login_passwd_change': 'yes',
'max_clients': '20',
'name': 'default_name_21',
'poe_mode': 'auto',
'split_tunneling_acl_local_ap_subnet': 'enable',
'split_tunneling_acl_path': 'tunnel',
'tun_mtu_downlink': '25',
'tun_mtu_uplink': '26',
'wan_port_mode': 'wan-lan'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_wtp_profile.fortios_wireless_controller(input_data, fos_instance)
expected_data = {
'allowaccess': 'telnet',
'ap-country': 'NA',
'ble-profile': 'test_value_5',
'comment': 'Comment.',
'control-message-offload': 'ebp-frame',
'dtls-in-kernel': 'enable',
'dtls-policy': 'clear-text',
'energy-efficient-ethernet': 'enable',
'ext-info-enable': 'enable',
'handoff-roaming': 'enable',
'handoff-rssi': '13',
'handoff-sta-thresh': '14',
'ip-fragment-preventing': 'tcp-mss-adjust',
'led-state': 'enable',
'lldp': 'enable',
'login-passwd': 'test_value_18',
'login-passwd-change': 'yes',
'max-clients': '20',
'name': 'default_name_21',
'poe-mode': 'auto',
'split-tunneling-acl-local-ap-subnet': 'enable',
'split-tunneling-acl-path': 'tunnel',
'tun-mtu-downlink': '25',
'tun-mtu-uplink': '26',
'wan-port-mode': 'wan-lan'
}
set_method_mock.assert_called_with('wireless-controller', 'wtp-profile', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 | 6,349,056,281,766,405,000 | 38.861048 | 142 | 0.566318 | false |
formath/mxnet | example/bi-lstm-sort/lstm_sort.py | 25 | 3168 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C0111,too-many-arguments,too-many-instance-attributes,too-many-locals,redefined-outer-name,fixme
# pylint: disable=superfluous-parens, no-member, invalid-name
import sys
sys.path.insert(0, "../../python")
import numpy as np
import mxnet as mx
from lstm import bi_lstm_unroll
from sort_io import BucketSentenceIter, default_build_vocab
def Perplexity(label, pred):
label = label.T.reshape((-1,))
loss = 0.
for i in range(pred.shape[0]):
loss += -np.log(max(1e-10, pred[i][int(label[i])]))
return np.exp(loss / label.size)
if __name__ == '__main__':
batch_size = 100
buckets = []
num_hidden = 300
num_embed = 512
num_lstm_layer = 2
num_epoch = 1
learning_rate = 0.1
momentum = 0.9
contexts = [mx.context.gpu(i) for i in range(1)]
vocab = default_build_vocab("./data/sort.train.txt")
def sym_gen(seq_len):
return bi_lstm_unroll(seq_len, len(vocab),
num_hidden=num_hidden, num_embed=num_embed,
num_label=len(vocab))
init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_states = init_c + init_h
data_train = BucketSentenceIter("./data/sort.train.txt", vocab,
buckets, batch_size, init_states)
data_val = BucketSentenceIter("./data/sort.valid.txt", vocab,
buckets, batch_size, init_states)
if len(buckets) == 1:
symbol = sym_gen(buckets[0])
else:
symbol = sym_gen
model = mx.model.FeedForward(ctx=contexts,
symbol=symbol,
num_epoch=num_epoch,
learning_rate=learning_rate,
momentum=momentum,
wd=0.00001,
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34))
import logging
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
model.fit(X=data_train, eval_data=data_val,
eval_metric = mx.metric.np(Perplexity),
batch_end_callback=mx.callback.Speedometer(batch_size, 50),)
model.save("sort")
| apache-2.0 | 5,515,932,103,077,509,000 | 36.270588 | 114 | 0.618056 | false |
eBay/restcommander | play-1.2.4/python/Lib/wsgiref/simple_server.py | 58 | 4794 | """BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21)
This is both an example of how WSGI can be implemented, and a basis for running
simple web applications on a local machine, such as might be done when testing
or debugging an application. It has not been reviewed for security issues,
however, and we strongly recommend that you use a "real" web server for
production use.
For example usage, see the 'if __name__=="__main__"' block at the end of the
module. See also the BaseHTTPServer module docs for other API information.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urllib, sys
from wsgiref.handlers import SimpleHandler
__version__ = "0.1"
__all__ = ['WSGIServer', 'WSGIRequestHandler', 'demo_app', 'make_server']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class ServerHandler(SimpleHandler):
server_software = software_version
def close(self):
try:
self.request_handler.log_request(
self.status.split(' ',1)[0], self.bytes_sent
)
finally:
SimpleHandler.close(self)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
HTTPServer.server_bind(self)
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def demo_app(environ,start_response):
from StringIO import StringIO
stdout = StringIO()
print >>stdout, "Hello world!"
print >>stdout
h = environ.items(); h.sort()
for k,v in h:
print >>stdout, k,'=', repr(v)
start_response("200 OK", [('Content-Type','text/plain')])
return [stdout.getvalue()]
def make_server(
host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler
):
"""Create a new WSGI server listening on `host` and `port` for `app`"""
server = server_class((host, port), handler_class)
server.set_app(app)
return server
if __name__ == '__main__':
httpd = make_server('', 8000, demo_app)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
import webbrowser
webbrowser.open('http://localhost:8000/xyz?abc')
httpd.handle_request() # serve one request, then exit
#
| apache-2.0 | -7,448,858,720,460,817,000 | 22.385366 | 79 | 0.601377 | false |
ToxicFrog/lancow | madcow/util/google.py | 3 | 4713 | # Copyright (C) 2007, 2008 Christopher Jones
#
# This file is part of Madcow.
#
# Madcow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Madcow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Madcow. If not, see <http://www.gnu.org/licenses/>.
"""Google interface"""
from urlparse import urljoin
import urllib2
import re
from BeautifulSoup import BeautifulSoup
from madcow.util import strip_html, superscript
from madcow.util.http import UserAgent
from madcow.util.text import encode, decode
__version__ = '0.3'
__author__ = 'cj_ <[email protected]>'
class NonRedirectResponse(Exception):
"""Raised when google doesn't return a redirect"""
class Response(object):
def __init__(self, data=u''):
self.data = data
def read(self, *args, **kwargs):
return self.data
class NoRedirects(urllib2.HTTPRedirectHandler):
"""Override auto-follow of redirects"""
def redirect_request(self, *args, **kwargs):
pass
class NoErrors(urllib2.HTTPDefaultErrorHandler):
"""Don't allow urllib to throw an error on 30x code"""
def http_error_default(self, req, fp, code, msg, headers):
return Response(data=dict(headers.items())[u'location'])
class Google(object):
baseurl = u'http://www.google.com/'
search = urljoin(baseurl, u'/search')
luckyopts = {u'hl': u'en', u'btnI': u'I', u'aq': u'f', u'safe': u'off'}
calcopts = {u'hl': u'en', u'safe': u'off', u'c2coff': 1, u'btnG': u'Search'}
reConversionDetected = re.compile(u'More about (calculator|currency)')
reConversionResult = re.compile(u'<h2 class=r.*?>.*?<b>(.*?)<\/b><\/h2>')
sup_re = re.compile(r'(<sup>.*?</sup>)', re.I | re.DOTALL)
clock_re = re.compile(r'/images/icons/onebox/clock')
sun_re = re.compile(r'/images/icons/onebox/weather_sun')
whitespace_re = re.compile(r'\s{2,}')
def __init__(self):
self.ua = UserAgent(handlers=[NoRedirects, NoErrors], agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.215 Safari/535.1")
def lucky(self, query):
"""Return I'm Feeling Lucky URL for given query"""
opts = dict(self.luckyopts.items())
opts[u'q'] = query
result = self.ua.open(self.search, opts=opts, referer=self.baseurl,
size=1024)
if not result.startswith(u'http'):
raise NonRedirectResponse
return result
calc_re = re.compile(r'calculator')
white_re = re.compile(r'\s+')
def calculator(self, query):
"""Try to use google calculator for given query"""
opts = dict(self.calcopts)
opts[u'q'] = query
doc = self.ua.open(self.search, opts=opts)
soup = BeautifulSoup(doc)
response = soup.find('img', src=self.calc_re).parent.findNext('h2').renderContents()
response = ' '.join(response.splitlines())
response = decode(response, 'utf-8')
# turn super scripts into utf8
parts = []
for part in self.sup_re.split(response):
if self.sup_re.match(part):
part = superscript(part)
parts.append(part)
response = u''.join(parts)
response = self.white_re.sub(' ', strip_html(response).strip())
return response
def sunrise_sunset(self, query, location):
"""Ask google for the sunrise or sunset from location"""
soup = BeautifulSoup(self.ua.open(self.search, {'q': '%s in %s' % (query, location)}))
image = soup.find('img', src=self.sun_re)
row1 = image.findNext('td')
row2 = row1.findNext('td')
result = strip_html(u'%s (%s)' % (self.decode(row1), self.decode(row2)))
return self.whitespace_re.sub(u' ', result.strip())
def clock(self, query):
"""Use google to look up time in a given location"""
try:
doc = self.ua.open(self.search, {'q': 'time in %s' % query})
soup = BeautifulSoup(doc)
table = soup.find('div', 'obcontainer')
time = table.find('td', style='font-size:medium')
return strip_html(self.decode(time).strip())
except:
raise
@staticmethod
def decode(node):
return decode(node.renderContents())
| gpl-3.0 | 6,125,836,033,747,347,000 | 34.43609 | 188 | 0.631021 | false |
PowerShellEmpire/Empire | lib/modules/powershell/exploitation/exploit_jenkins.py | 10 | 3379 | import base64
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Exploit-Jenkins',
'Author': ['@luxcupitor'],
'Description': ("Run command on unauthenticated Jenkins Script consoles."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'Pass a command to run. If windows, you may have to prepend "cmd /c ".'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Rhost' : {
'Description' : 'Specify the host to exploit.',
'Required' : True,
'Value' : ''
},
'Port' : {
'Description' : 'Specify the port to use.',
'Required' : True,
'Value' : '8080'
},
'Cmd' : {
'Description' : 'command to run on remote jenkins script console.',
'Required' : True,
'Value' : 'whoami'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/exploitation/Exploit-Jenkins.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = "\nExploit-Jenkins"
scriptEnd += " -Rhost "+str(self.options['Rhost']['Value'])
scriptEnd += " -Port "+str(self.options['Port']['Value'])
command = str(self.options['Cmd']['Value'])
# if the command contains spaces, wrap it in quotes before passing to ps script
if " " in command:
scriptEnd += " -Cmd \"" + command + "\""
else:
scriptEnd += " -Cmd " + command
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
| bsd-3-clause | 5,114,936,303,125,955,000 | 32.79 | 127 | 0.509026 | false |
yakky/django | tests/backends/tests.py | 16 | 48369 | # -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import unicode_literals
import datetime
import re
import threading
import unittest
import warnings
from decimal import Decimal, Rounded
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connection, connections,
reset_queries, transaction,
)
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.postgresql import version as pg_version
from django.db.backends.signals import connection_created
from django.db.backends.utils import CursorWrapper, format_number
from django.db.models import Avg, StdDev, Sum, Variance
from django.db.models.sql.constants import CURSOR
from django.db.utils import ConnectionHandler
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, mock, override_settings,
skipIfDBFeature, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.six.moves import range
from . import models
class DummyBackendTest(SimpleTestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
with self.assertRaises(ImproperlyConfigured):
conns[DEFAULT_DB_ALIAS].ensure_connection()
@unittest.skipUnless(connection.vendor == 'oracle', "Test only for Oracle")
class OracleTests(unittest.TestCase):
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
with connection.cursor() as cursor:
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
with connection.cursor() as cursor:
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join(six.text_type(x) for x in range(4000))
cursor.execute('INSERT INTO ltext VALUES (%s)', [long_str])
cursor.execute('SELECT text FROM ltext')
row = cursor.fetchone()
self.assertEqual(long_str, row[0].read())
cursor.execute('DROP TABLE ltext')
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
@unittest.skipUnless(connection.vendor == 'sqlite', "Test only for SQLite")
class SQLiteTests(TestCase):
longMessage = True
def test_autoincrement(self):
"""
Check that auto_increment fields are created with the AUTOINCREMENT
keyword in order to be monotonically increasing. Refs #10164.
"""
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(models.Square)
statements = editor.collected_sql
match = re.search('"id" ([^,]+),', statements[0])
self.assertIsNotNone(match)
self.assertEqual('integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1), "Wrong SQL used to create an auto-increment "
"column on SQLite")
def test_aggregation(self):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate,
**{'complex': aggregate('last_modified') + aggregate('last_modified')})
def test_memory_db_test_name(self):
"""
A named in-memory db should be allowed where supported.
"""
from django.db.backends.sqlite3.base import DatabaseWrapper
settings_dict = {
'TEST': {
'NAME': 'file:memorydb_test?mode=memory&cache=shared',
}
}
wrapper = DatabaseWrapper(settings_dict)
creation = wrapper.creation
if creation.connection.features.can_share_in_memory_db:
expected = creation.connection.settings_dict['TEST']['NAME']
self.assertEqual(creation._get_test_db_name(), expected)
else:
msg = (
"Using a shared memory database with `mode=memory` in the "
"database name is not supported in your environment, "
"use `:memory:` instead."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
creation._get_test_db_name()
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class PostgreSQLTests(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 9.3 beta4", 90300)
self.assert_parses("PostgreSQL 9.3", 90300)
self.assert_parses("EnterpriseDB 9.3", 90300)
self.assert_parses("PostgreSQL 9.3.6", 90306)
self.assert_parses("PostgreSQL 9.4beta1", 90400)
self.assert_parses(
"PostgreSQL 9.3.1 on i386-apple-darwin9.2.2, compiled by GCC "
"i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)",
90301
)
def test_nodb_connection(self):
"""
Test that the _nodb_connection property fallbacks to the default connection
database when access to the 'postgres' database is not granted.
"""
def mocked_connect(self):
if self.settings_dict['NAME'] is None:
raise DatabaseError()
return ''
nodb_conn = connection._nodb_connection
self.assertIsNone(nodb_conn.settings_dict['NAME'])
# Now assume the 'postgres' db isn't available
with warnings.catch_warnings(record=True) as w:
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.connect',
side_effect=mocked_connect, autospec=True):
nodb_conn = connection._nodb_connection
self.assertIsNotNone(nodb_conn.settings_dict['NAME'])
self.assertEqual(nodb_conn.settings_dict['NAME'], connection.settings_dict['NAME'])
# Check a RuntimeWarning has been emitted
self.assertEqual(len(w), 1)
self.assertEqual(w[0].message.__class__, RuntimeWarning)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 9.3"]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 90300)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
new_connection = connection.copy()
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Invalidate timezone name cache, because the setting_changed
# handler cannot know about new_connection.
del new_connection.timezone_name
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
with self.settings(TIME_ZONE=new_tz):
new_connection.set_autocommit(False)
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
new_connection = connection.copy()
new_connection.settings_dict['AUTOCOMMIT'] = False
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def test_connect_isolation_level(self):
"""
Regression test for #18130 and #24318.
"""
from psycopg2.extensions import (
ISOLATION_LEVEL_READ_COMMITTED as read_committed,
ISOLATION_LEVEL_SERIALIZABLE as serializable,
)
# Since this is a django.test.TestCase, a transaction is in progress
# and the isolation level isn't reported as 0. This test assumes that
# PostgreSQL is configured with the default isolation level.
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(connection.connection.isolation_level, read_committed)
new_connection = connection.copy()
new_connection.settings_dict['OPTIONS']['isolation_level'] = serializable
try:
# Start a transaction so the isolation level isn't reported as 0.
new_connection.set_autocommit(False)
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(new_connection.connection.isolation_level, serializable)
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute("SELECT %s", (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ["awef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ["ᄲawef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
for lookup in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
self.assertIn('::text', do.lookup_cast(lookup))
def test_correct_extraction_psycopg2_version(self):
from django.db.backends.postgresql.base import psycopg2_version
version_path = 'django.db.backends.postgresql.base.Database.__version__'
with mock.patch(version_path, '2.6.9'):
self.assertEqual(psycopg2_version(), (2, 6, 9))
with mock.patch(version_path, '2.5.dev0'):
self.assertEqual(psycopg2_version(), (2, 5))
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
connection.ops.last_executed_query(cursor, '', ())
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
data = models.RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'], query)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_parameter_quoting_on_sqlite(self):
# The implementation of last_executed_queries isn't optimal. It's
# worth testing that parameters are quoted. See #14091.
query = "SELECT %s"
params = ["\"'\\"]
connection.cursor().execute(query, params)
# Note that the single quote is repeated
substituted = "SELECT '\"''\\'"
self.assertEqual(connection.queries[-1]['sql'], substituted)
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ['backends']
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""
An m2m save of a model with a long name and a long m2m field name
doesn't error (#8901).
"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""
Sequence resetting as part of a flush with model with long name and
long pk name doesn't error (#8901).
"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
# '%s' escaping support for sqlite3 #13648
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ['backends']
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
# fetchone, fetchmany, fetchall return strings as unicode objects #6254
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
Test that DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Test that cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
Test that is_usable() doesn't crash when the database disconnects.
Regression for #21553.
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
with connection.cursor() as cursor:
reset_queries()
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
six.assertCountEqual(self, connection.queries[0].keys(), ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
Test that the backend doesn't store an unlimited number of queries.
Regression for #12581.
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connection = connection.copy()
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(1, len(w))
self.assertEqual(str(w[0].message), "Limit for query logging "
"exceeded, only the last 3 queries will be returned.")
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
a2 = models.Article(headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30)
self.assertRaises(IntegrityError, a2.save)
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = models.Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
# Create another article
r_proxy = models.ReporterProxy.objects.get(pk=self.r.pk)
models.Article.objects.create(headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy)
# Retrieve the second article from the DB
a2 = models.Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
self.assertRaises(IntegrityError, a2.save)
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data
without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be
able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ['backends']
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field("related_objects").remote_field.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(SimpleTestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
equal('0.1234567890', None, 0,
'0')
equal('1234567890.1234567890', None, 0,
'1234567890')
equal('1234567890.1234567890', None, 2,
'1234567890.12')
equal('0.1234', 5, None,
'0.1234')
equal('123.12', 5, None,
'123.12')
with self.assertRaises(Rounded):
equal('0.1234567890', 5, None,
'0.12346')
with self.assertRaises(Rounded):
equal('1234567890.1234', 5, None,
'1234600000')
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite specific test.')
@skipUnlessDBFeature('can_share_in_memory_db')
class TestSqliteThreadSharing(TransactionTestCase):
available_apps = ['backends']
def test_database_sharing_in_threads(self):
def create_object():
models.Object.objects.create()
create_object()
thread = threading.Thread(target=create_object)
thread.start()
thread.join()
self.assertEqual(models.Object.objects.count(), 2)
| bsd-3-clause | -6,892,357,699,733,083,000 | 38.90429 | 119 | 0.617319 | false |
fernandog/Medusa | ext/dateutil/rrule.py | 9 | 64642 | # -*- coding: utf-8 -*-
"""
The rrule module offers a small, complete, and very fast, implementation of
the recurrence rules documented in the
`iCalendar RFC <https://tools.ietf.org/html/rfc5545>`_,
including support for caching of results.
"""
import itertools
import datetime
import calendar
import re
import sys
try:
from math import gcd
except ImportError:
from fractions import gcd
from six import advance_iterator, integer_types
from six.moves import _thread, range
import heapq
from ._common import weekday as weekdaybase
from .tz import tzutc, tzlocal
# For warning about deprecation of until and count
from warnings import warn
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY']
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = list(range(7))
# Imported on demand.
easter = None
parser = None
class weekday(weekdaybase):
"""
This version of weekday does not allow n = 0.
"""
def __init__(self, wkday, n=None):
if n == 0:
raise ValueError("Can't create weekday with n==0")
super(weekday, self).__init__(wkday, n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
def _invalidates_cache(f):
"""
Decorator for rruleset methods which may invalidate the
cached length.
"""
def inner_func(self, *args, **kwargs):
rv = f(self, *args, **kwargs)
self._invalidate_cache()
return rv
return inner_func
class rrulebase(object):
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = _thread.allocate_lock()
self._invalidate_cache()
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _invalidate_cache(self):
if self._cache is not None:
self._cache = []
self._cache_complete = False
self._cache_gen = self._iter()
if self._cache_lock.locked():
self._cache_lock.release()
self._len = None
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(advance_iterator(gen))
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
""" Returns the number of recurrences in this set. It will have go
trough the whole recurrence, if this hasn't been done before. """
if self._len is None:
for x in self:
pass
return self._len
def before(self, dt, inc=False):
""" Returns the last recurrence before the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
""" Returns the first recurrence after the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def xafter(self, dt, count=None, inc=False):
"""
Generator which yields up to `count` recurrences after the given
datetime instance, equivalent to `after`.
:param dt:
The datetime at which to start generating recurrences.
:param count:
The maximum number of recurrences to generate. If `None` (default),
dates are generated until the recurrence rule is exhausted.
:param inc:
If `dt` is an instance of the rule and `inc` is `True`, it is
included in the output.
:yields: Yields a sequence of `datetime` objects.
"""
if self._cache_complete:
gen = self._cache
else:
gen = self
# Select the comparison function
if inc:
comp = lambda dc, dtc: dc >= dtc
else:
comp = lambda dc, dtc: dc > dtc
# Generate dates
n = 0
for d in gen:
if comp(d, dt):
if count is not None:
n += 1
if n > count:
break
yield d
def between(self, after, before, inc=False, count=1):
""" Returns all the occurrences of the rrule between after and before.
The inc keyword defines what happens if after and/or before are
themselves occurrences. With inc=True, they will be included in the
list, if they are found in the recurrence set. """
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
"""
That's the base of the rrule operation. It accepts all the keywords
defined in the RFC as its constructor parameters (except byday,
which was renamed to byweekday) and more. The constructor prototype is::
rrule(freq)
Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
or SECONDLY.
.. note::
Per RFC section 3.3.10, recurrence instances falling on invalid dates
and times are ignored rather than coerced:
Recurrence rules may generate recurrence instances with an invalid
date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM
on a day where the local time is moved forward by an hour at 1:00
AM). Such recurrence instances MUST be ignored and MUST NOT be
counted as part of the recurrence set.
This can lead to possibly surprising behavior when, for example, the
start date occurs at the end of the month:
>>> from dateutil.rrule import rrule, MONTHLY
>>> from datetime import datetime
>>> start_date = datetime(2014, 12, 31)
>>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date))
... # doctest: +NORMALIZE_WHITESPACE
[datetime.datetime(2014, 12, 31, 0, 0),
datetime.datetime(2015, 1, 31, 0, 0),
datetime.datetime(2015, 3, 31, 0, 0),
datetime.datetime(2015, 5, 31, 0, 0)]
Additionally, it supports the following keyword arguments:
:param cache:
If given, it must be a boolean value specifying to enable or disable
caching of results. If you will use the same rrule instance multiple
times, enabling caching will improve the performance considerably.
:param dtstart:
The recurrence start. Besides being the base for the recurrence,
missing parameters in the final recurrence instances will also be
extracted from this date. If not given, datetime.now() will be used
instead.
:param interval:
The interval between each freq iteration. For example, when using
YEARLY, an interval of 2 means once every two years, but with HOURLY,
it means once every two hours. The default interval is 1.
:param wkst:
The week start day. Must be one of the MO, TU, WE constants, or an
integer, specifying the first day of the week. This will affect
recurrences based on weekly periods. The default week start is got
from calendar.firstweekday(), and may be modified by
calendar.setfirstweekday().
:param count:
How many occurrences will be generated.
.. note::
As of version 2.5.0, the use of the ``until`` keyword together
with the ``count`` keyword is deprecated per RFC-5545 Sec. 3.3.10.
:param until:
If given, this must be a datetime instance, that will specify the
limit of the recurrence. The last recurrence in the rule is the greatest
datetime that is less than or equal to the value specified in the
``until`` parameter.
.. note::
As of version 2.5.0, the use of the ``until`` keyword together
with the ``count`` keyword is deprecated per RFC-5545 Sec. 3.3.10.
:param bysetpos:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each given integer will specify an occurrence
number, corresponding to the nth occurrence of the rule inside the
frequency period. For example, a bysetpos of -1 if combined with a
MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
result in the last work day of every month.
:param bymonth:
If given, it must be either an integer, or a sequence of integers,
meaning the months to apply the recurrence to.
:param bymonthday:
If given, it must be either an integer, or a sequence of integers,
meaning the month days to apply the recurrence to.
:param byyearday:
If given, it must be either an integer, or a sequence of integers,
meaning the year days to apply the recurrence to.
:param byweekno:
If given, it must be either an integer, or a sequence of integers,
meaning the week numbers to apply the recurrence to. Week numbers
have the meaning described in ISO8601, that is, the first week of
the year is that containing at least four days of the new year.
:param byweekday:
If given, it must be either an integer (0 == MO), a sequence of
integers, one of the weekday constants (MO, TU, etc), or a sequence
of these constants. When given, these variables will define the
weekdays where the recurrence will be applied. It's also possible to
use an argument n for the weekday instances, which will mean the nth
occurrence of this weekday in the period. For example, with MONTHLY,
or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
first friday of the month where the recurrence happens. Notice that in
the RFC documentation, this is specified as BYDAY, but was renamed to
avoid the ambiguity of that keyword.
:param byhour:
If given, it must be either an integer, or a sequence of integers,
meaning the hours to apply the recurrence to.
:param byminute:
If given, it must be either an integer, or a sequence of integers,
meaning the minutes to apply the recurrence to.
:param bysecond:
If given, it must be either an integer, or a sequence of integers,
meaning the seconds to apply the recurrence to.
:param byeaster:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each integer will define an offset from the
Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
Sunday itself. This is an extension to the RFC specification.
"""
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
super(rrule, self).__init__(cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
# Cache the original byxxx rules, if they are provided, as the _byxxx
# attributes do not necessarily map to the inputs, and this can be
# a problem in generating the strings. Only store things if they've
# been supplied (the string retrieval will just use .get())
self._original_rule = {}
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if self._dtstart and self._until:
if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None):
# According to RFC5545 Section 3.3.10:
# https://tools.ietf.org/html/rfc5545#section-3.3.10
#
# > If the "DTSTART" property is specified as a date with UTC
# > time or a date with local time and time zone reference,
# > then the UNTIL rule part MUST be specified as a date with
# > UTC time.
raise ValueError(
'RRULE UNTIL values must be specified in UTC when DTSTART '
'is timezone-aware'
)
if count is not None and until:
warn("Using both 'count' and 'until' is inconsistent with RFC 5545"
" and has been deprecated in dateutil. Future versions will "
"raise an error.", DeprecationWarning)
if wkst is None:
self._wkst = calendar.firstweekday()
elif isinstance(wkst, integer_types):
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif isinstance(bysetpos, integer_types):
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if self._bysetpos:
self._original_rule['bysetpos'] = self._bysetpos
if (byweekno is None and byyearday is None and bymonthday is None and
byweekday is None and byeaster is None):
if freq == YEARLY:
if bymonth is None:
bymonth = dtstart.month
self._original_rule['bymonth'] = None
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == MONTHLY:
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == WEEKLY:
byweekday = dtstart.weekday()
self._original_rule['byweekday'] = None
# bymonth
if bymonth is None:
self._bymonth = None
else:
if isinstance(bymonth, integer_types):
bymonth = (bymonth,)
self._bymonth = tuple(sorted(set(bymonth)))
if 'bymonth' not in self._original_rule:
self._original_rule['bymonth'] = self._bymonth
# byyearday
if byyearday is None:
self._byyearday = None
else:
if isinstance(byyearday, integer_types):
byyearday = (byyearday,)
self._byyearday = tuple(sorted(set(byyearday)))
self._original_rule['byyearday'] = self._byyearday
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if isinstance(byeaster, integer_types):
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(sorted(byeaster))
self._original_rule['byeaster'] = self._byeaster
else:
self._byeaster = None
# bymonthday
if bymonthday is None:
self._bymonthday = ()
self._bynmonthday = ()
else:
if isinstance(bymonthday, integer_types):
bymonthday = (bymonthday,)
bymonthday = set(bymonthday) # Ensure it's unique
self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0))
self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0))
# Storing positive numbers first, then negative numbers
if 'bymonthday' not in self._original_rule:
self._original_rule['bymonthday'] = tuple(
itertools.chain(self._bymonthday, self._bynmonthday))
# byweekno
if byweekno is None:
self._byweekno = None
else:
if isinstance(byweekno, integer_types):
byweekno = (byweekno,)
self._byweekno = tuple(sorted(set(byweekno)))
self._original_rule['byweekno'] = self._byweekno
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
else:
# If it's one of the valid non-sequence types, convert to a
# single-element sequence before the iterator that builds the
# byweekday set.
if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
byweekday = (byweekday,)
self._byweekday = set()
self._bynweekday = set()
for wday in byweekday:
if isinstance(wday, integer_types):
self._byweekday.add(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.add(wday.weekday)
else:
self._bynweekday.add((wday.weekday, wday.n))
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
if self._byweekday is not None:
self._byweekday = tuple(sorted(self._byweekday))
orig_byweekday = [weekday(x) for x in self._byweekday]
else:
orig_byweekday = ()
if self._bynweekday is not None:
self._bynweekday = tuple(sorted(self._bynweekday))
orig_bynweekday = [weekday(*x) for x in self._bynweekday]
else:
orig_bynweekday = ()
if 'byweekday' not in self._original_rule:
self._original_rule['byweekday'] = tuple(itertools.chain(
orig_byweekday, orig_bynweekday))
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = {dtstart.hour}
else:
self._byhour = None
else:
if isinstance(byhour, integer_types):
byhour = (byhour,)
if freq == HOURLY:
self._byhour = self.__construct_byset(start=dtstart.hour,
byxxx=byhour,
base=24)
else:
self._byhour = set(byhour)
self._byhour = tuple(sorted(self._byhour))
self._original_rule['byhour'] = self._byhour
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = {dtstart.minute}
else:
self._byminute = None
else:
if isinstance(byminute, integer_types):
byminute = (byminute,)
if freq == MINUTELY:
self._byminute = self.__construct_byset(start=dtstart.minute,
byxxx=byminute,
base=60)
else:
self._byminute = set(byminute)
self._byminute = tuple(sorted(self._byminute))
self._original_rule['byminute'] = self._byminute
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = ((dtstart.second,))
else:
self._bysecond = None
else:
if isinstance(bysecond, integer_types):
bysecond = (bysecond,)
self._bysecond = set(bysecond)
if freq == SECONDLY:
self._bysecond = self.__construct_byset(start=dtstart.second,
byxxx=bysecond,
base=60)
else:
self._bysecond = set(bysecond)
self._bysecond = tuple(sorted(self._bysecond))
self._original_rule['bysecond'] = self._bysecond
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def __str__(self):
"""
Output a string that would generate this RRULE if passed to rrulestr.
This is mostly compatible with RFC5545, except for the
dateutil-specific extension BYEASTER.
"""
output = []
h, m, s = [None] * 3
if self._dtstart:
output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S'))
h, m, s = self._dtstart.timetuple()[3:6]
parts = ['FREQ=' + FREQNAMES[self._freq]]
if self._interval != 1:
parts.append('INTERVAL=' + str(self._interval))
if self._wkst:
parts.append('WKST=' + repr(weekday(self._wkst))[0:2])
if self._count is not None:
parts.append('COUNT=' + str(self._count))
if self._until:
parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S'))
if self._original_rule.get('byweekday') is not None:
# The str() method on weekday objects doesn't generate
# RFC5545-compliant strings, so we should modify that.
original_rule = dict(self._original_rule)
wday_strings = []
for wday in original_rule['byweekday']:
if wday.n:
wday_strings.append('{n:+d}{wday}'.format(
n=wday.n,
wday=repr(wday)[0:2]))
else:
wday_strings.append(repr(wday))
original_rule['byweekday'] = wday_strings
else:
original_rule = self._original_rule
partfmt = '{name}={vals}'
for name, key in [('BYSETPOS', 'bysetpos'),
('BYMONTH', 'bymonth'),
('BYMONTHDAY', 'bymonthday'),
('BYYEARDAY', 'byyearday'),
('BYWEEKNO', 'byweekno'),
('BYDAY', 'byweekday'),
('BYHOUR', 'byhour'),
('BYMINUTE', 'byminute'),
('BYSECOND', 'bysecond'),
('BYEASTER', 'byeaster')]:
value = original_rule.get(key)
if value:
parts.append(partfmt.format(name=name, vals=(','.join(str(v)
for v in value))))
output.append('RRULE:' + ';'.join(parts))
return '\n'.join(output)
def replace(self, **kwargs):
"""Return new rrule with same attributes except for those attributes given new
values by whichever keyword arguments are specified."""
new_kwargs = {"interval": self._interval,
"count": self._count,
"dtstart": self._dtstart,
"freq": self._freq,
"until": self._until,
"wkst": self._wkst,
"cache": False if self._cache is None else True }
new_kwargs.update(self._original_rule)
new_kwargs.update(kwargs)
return rrule(**new_kwargs)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY: ii.ydayset,
MONTHLY: ii.mdayset,
WEEKLY: ii.wdayset,
DAILY: ii.ddayset,
HOURLY: ii.ddayset,
MINUTELY: ii.ddayset,
SECONDLY: ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY: ii.htimeset,
MINUTELY: ii.mtimeset,
SECONDLY: ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday and
-ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
-ii.nextyearlen+i-ii.yearlen not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
if count is not None:
count -= 1
if count < 0:
self._len = total
return
total += 1
yield res
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal + i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
if count is not None:
count -= 1
if count < 0:
self._len = total
return
total += 1
yield res
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
if byhour:
ndays, hour = self.__mod_distance(value=hour,
byxxx=self._byhour,
base=24)
else:
ndays, hour = divmod(hour+interval, 24)
if ndays:
day += ndays
fixday = True
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
valid = False
rep_rate = (24*60)
for j in range(rep_rate // gcd(interval, rep_rate)):
if byminute:
nhours, minute = \
self.__mod_distance(value=minute,
byxxx=self._byminute,
base=60)
else:
nhours, minute = divmod(minute+interval, 60)
div, hour = divmod(hour+nhours, 24)
if div:
day += div
fixday = True
filtered = False
if not byhour or hour in byhour:
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval and ' +
'byhour resulting in empty rule.')
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399 - (hour * 3600 + minute * 60 + second))
// interval) * interval)
rep_rate = (24 * 3600)
valid = False
for j in range(0, rep_rate // gcd(interval, rep_rate)):
if bysecond:
nminutes, second = \
self.__mod_distance(value=second,
byxxx=self._bysecond,
base=60)
else:
nminutes, second = divmod(second+interval, 60)
div, minute = divmod(minute+nminutes, 60)
if div:
hour += div
div, hour = divmod(hour, 24)
if div:
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval, ' +
'byhour and byminute resulting in empty' +
' rule.')
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
def __construct_byset(self, start, byxxx, base):
"""
If a `BYXXX` sequence is passed to the constructor at the same level as
`FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
specifications which cannot be reached given some starting conditions.
This occurs whenever the interval is not coprime with the base of a
given unit and the difference between the starting position and the
ending position is not coprime with the greatest common denominator
between the interval and the base. For example, with a FREQ of hourly
starting at 17:00 and an interval of 4, the only valid values for
BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
coprime.
:param start:
Specifies the starting position.
:param byxxx:
An iterable containing the list of allowed values.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
This does not preserve the type of the iterable, returning a set, since
the values should be unique and the order is irrelevant, this will
speed up later lookups.
In the event of an empty set, raises a :exception:`ValueError`, as this
results in an empty rrule.
"""
cset = set()
# Support a single byxxx value.
if isinstance(byxxx, integer_types):
byxxx = (byxxx, )
for num in byxxx:
i_gcd = gcd(self._interval, base)
# Use divmod rather than % because we need to wrap negative nums.
if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
cset.add(num)
if len(cset) == 0:
raise ValueError("Invalid rrule byxxx generates an empty set.")
return cset
def __mod_distance(self, value, byxxx, base):
"""
Calculates the next value in a sequence where the `FREQ` parameter is
specified along with a `BYXXX` parameter at the same "level"
(e.g. `HOURLY` specified with `BYHOUR`).
:param value:
The old value of the component.
:param byxxx:
The `BYXXX` set, which should have been generated by
`rrule._construct_byset`, or something else which checks that a
valid rule is present.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
If a valid value is not found after `base` iterations (the maximum
number before the sequence would start to repeat), this raises a
:exception:`ValueError`, as no valid values were found.
This returns a tuple of `divmod(n*interval, base)`, where `n` is the
smallest number of `interval` repetitions until the next specified
value in `byxxx` is found.
"""
accumulator = 0
for ii in range(1, base + 1):
# Using divmod() over % to account for negative intervals
div, value = divmod(value + self._interval, base)
accumulator += div
if value in byxxx:
return (accumulator, value)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365 + calendar.isleap(year)
self.nextyearlen = 365 + calendar.isleap(year + 1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
# no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1, 1, 1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst) % 7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen +
(lyearweekday-rr._wkst) % 7) % 7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and (month != self.lastmonth or
year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday) % 7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday) % 7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return list(range(self.yearlen)), 0, self.yearlen
def mdayset(self, year, month, day):
dset = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
dset[i] = i
return dset, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
dset = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
dset[i] = i
i += 1
# if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return dset, start, i
def ddayset(self, year, month, day):
dset = [None] * self.yearlen
i = datetime.date(year, month, day).toordinal() - self.yearordinal
dset[i] = i
return dset, i, i + 1
def htimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
tset.sort()
return tset
def mtimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
tset.sort()
return tset
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
""" The rruleset type allows more complex recurrence setups, mixing
multiple rules, dates, exclusion rules, and exclusion dates. The type
constructor takes the following keyword arguments:
:param cache: If True, caching of results will be enabled, improving
performance of multiple queries considerably. """
class _genitem(object):
def __init__(self, genlist, gen):
try:
self.dt = advance_iterator(gen)
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def __next__(self):
try:
self.dt = advance_iterator(self.gen)
except StopIteration:
if self.genlist[0] is self:
heapq.heappop(self.genlist)
else:
self.genlist.remove(self)
heapq.heapify(self.genlist)
next = __next__
def __lt__(self, other):
return self.dt < other.dt
def __gt__(self, other):
return self.dt > other.dt
def __eq__(self, other):
return self.dt == other.dt
def __ne__(self, other):
return self.dt != other.dt
def __init__(self, cache=False):
super(rruleset, self).__init__(cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
@_invalidates_cache
def rrule(self, rrule):
""" Include the given :py:class:`rrule` instance in the recurrence set
generation. """
self._rrule.append(rrule)
@_invalidates_cache
def rdate(self, rdate):
""" Include the given :py:class:`datetime` instance in the recurrence
set generation. """
self._rdate.append(rdate)
@_invalidates_cache
def exrule(self, exrule):
""" Include the given rrule instance in the recurrence set exclusion
list. Dates which are part of the given recurrence rules will not
be generated, even if some inclusive rrule or rdate matches them.
"""
self._exrule.append(exrule)
@_invalidates_cache
def exdate(self, exdate):
""" Include the given datetime instance in the recurrence set
exclusion list. Dates included that way will not be generated,
even if some inclusive rrule or rdate matches them. """
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate))
for gen in [iter(x) for x in self._rrule]:
self._genitem(rlist, gen)
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate))
for gen in [iter(x) for x in self._exrule]:
self._genitem(exlist, gen)
lastdt = None
total = 0
heapq.heapify(rlist)
heapq.heapify(exlist)
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exitem = exlist[0]
advance_iterator(exitem)
if exlist and exlist[0] is exitem:
heapq.heapreplace(exlist, exitem)
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
advance_iterator(ritem)
if rlist and rlist[0] is ritem:
heapq.heapreplace(rlist, ritem)
self._len = total
class _rrulestr(object):
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
"FR": 4, "SA": 5, "SU": 6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError("invalid until date")
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
"""
Two ways to specify this: +1MO or MO(+1)
"""
l = []
for wday in value.split(','):
if '(' in wday:
# If it's of the form TH(+1), etc.
splt = wday.split('(')
w = splt[0]
n = int(splt[1][:-1])
elif len(wday):
# If it's of the form +1MO
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n:
n = int(n)
else:
raise ValueError("Invalid (empty) BYDAY specification.")
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError("unknown parameter name")
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError("unknown parameter '%s'" % name)
except (KeyError, ValueError):
raise ValueError("invalid '%s': %s" % (name, value))
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzids=None,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
TZID_NAMES = dict(map(
lambda x: (x.upper(), x),
re.findall('TZID=(?P<name>[^:]+):', s)
))
s = s.upper()
if not s.strip():
raise ValueError("empty string")
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError("unsupported RRULE parm: "+parm)
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError("unsupported EXRULE parm: "+parm)
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported EXDATE parm: "+parm)
exdatevals.append(value)
elif name == "DTSTART":
# RFC 5445 3.8.2.4: The VALUE parameter is optional, but
# may be found only once.
value_found = False
TZID = None
valid_values = {"VALUE=DATE-TIME", "VALUE=DATE"}
for parm in parms:
if parm.startswith("TZID="):
try:
tzkey = TZID_NAMES[parm.split('TZID=')[-1]]
except KeyError:
continue
if tzids is None:
from . import tz
tzlookup = tz.gettz
elif callable(tzids):
tzlookup = tzids
else:
tzlookup = getattr(tzids, 'get', None)
if tzlookup is None:
msg = ('tzids must be a callable, ' +
'mapping, or None, ' +
'not %s' % tzids)
raise ValueError(msg)
TZID = tzlookup(tzkey)
continue
if parm not in valid_values:
raise ValueError("unsupported DTSTART parm: "+parm)
else:
if value_found:
msg = ("Duplicate value parameter found in " +
"DTSTART: " + parm)
raise ValueError(msg)
value_found = True
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
if TZID is not None:
if dtstart.tzinfo is None:
dtstart = dtstart.replace(tzinfo=TZID)
else:
raise ValueError('DTSTART specifies multiple timezones')
else:
raise ValueError("unsupported property: "+name)
if (forceset or len(rrulevals) > 1 or rdatevals
or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
rset = rruleset(cache=cache)
for value in rrulevals:
rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
rset.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
rset.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
rset.rdate(dtstart)
return rset
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
| gpl-3.0 | 3,283,657,446,893,784,000 | 37.730977 | 86 | 0.485087 | false |
ecreall/lagendacommun | lac/views/lac_view_manager/home.py | 1 | 5196 | # -*- coding: utf8 -*-
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import math
import datetime
import pytz
from pyramid.view import view_config
from pyramid.threadlocal import get_current_request
from substanced.objectmap import find_objectmap
from substanced.util import get_oid
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from dace.objectofcollaboration.principal.util import get_current
from dace.util import find_catalog
from pontus.view import BasicView
from pontus.util import merge_dicts
from lac.content.processes.lac_view_manager.behaviors import (
SeeHome)
from lac.content.lac_application import (
CreationCulturelleApplication)
from lac.content.interface import ISmartFolder
from lac.utilities.smart_folder_utility import get_folder_content
from lac.utilities.utils import get_site_folder
from lac.content.smart_folder import SmartFolder
from lac.content.site_configuration import (
DEFAULT_DAYS_VISIBILITY)
from lac.views.filter import find_entities
from lac.views.user_management.login import LoginView
MORE_NB = 3
@view_config(
name='index',
context=CreationCulturelleApplication,
renderer='pontus:templates/views_templates/grid.pt',
)
@view_config(
name='',
context=CreationCulturelleApplication,
renderer='pontus:templates/views_templates/grid.pt',
)
class SeeHomeView(BasicView):
title = ''
name = ''
behaviors = [SeeHome]
template = 'lac:views/lac_view_manager/templates/home.pt'
viewid = 'seehome'
wrapper_template = 'daceui:templates/simple_view_wrapper.pt'
container_css_class = 'home'
def login(self):
log_instance = LoginView(self.context, self.request)
return log_instance()
def update(self):
if self.request.POST and 'login_form.submitted' in self.request.POST:
log_result = self.login()
if not isinstance(log_result, dict):
return log_result
self.execute(None)
site = get_site_folder(True)
self.title = site.title
site_id = get_oid(site)
user = get_current()
folders = find_entities(
interfaces=[ISmartFolder],
metadata_filter={'states': ['published']},
force_local_control=True)
my_folders = []
if self.request.user:
my_folders = getattr(user, 'folders', [])
my_folders = [folder for folder in my_folders
if isinstance(folder, SmartFolder) and
not folder.parents and
'private' in folder.state]
folders = [folder for folder in folders
if not folder.parents and
getattr(folder, 'add_as_a_block', False)]
folders.extend(my_folders)
foldersdata = []
old_date = datetime.datetime.now(tz=pytz.UTC) - datetime.timedelta(
days=getattr(site, 'days_visibility', DEFAULT_DAYS_VISIBILITY))
old_date = old_date.replace(tzinfo=pytz.UTC)
lac_catalog = find_catalog('lac')
release_date_index = lac_catalog['release_date']
query = release_date_index.ge(old_date)
content_types = getattr(site, 'home_content_types',
['review', 'cinema_review',
'brief', 'interview'])
for folder in folders:
all_folders = [folder]
all_folders.extend(folder.all_sub_folders('published'))
contents_oids = set()
for sub_folder in all_folders:
result_set = get_folder_content(
sub_folder, user,
sort_on='release_date',
reverse=True,
limit=MORE_NB,
add_query=query,
metadata_filter={'content_types': content_types,
'states': ['published']}
)
contents_oids |= set(result_set.ids)
if contents_oids:
contents_oids = release_date_index.sort(
contents_oids, reverse=True, limit=MORE_NB)
objectmap = find_objectmap(get_current_request().root)
resolver = objectmap.object_for
contents = [resolver(oid) for oid in contents_oids]
foldersdata.append({'folder': folder,
'contents': contents,
'order': folder.get_order(site_id)})
foldersdata = sorted(foldersdata, key=lambda e: e['order'])
result = {}
values = {'folders': foldersdata,
'content_types': content_types,
'row_len': math.ceil(len(foldersdata)/2)}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
result = merge_dicts(self.requirements_copy, result)
return result
DEFAULTMAPPING_ACTIONS_VIEWS.update({SeeHome: SeeHomeView})
| agpl-3.0 | 7,191,274,477,247,623,000 | 36.927007 | 77 | 0.60893 | false |
geekaia/edx-platform | common/djangoapps/track/contexts.py | 5 | 1475 | """Generates common contexts"""
import logging
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys import InvalidKeyError
from util.request import COURSE_REGEX
log = logging.getLogger(__name__)
def course_context_from_url(url):
"""
Extracts the course_context from the given `url` and passes it on to
`course_context_from_course_id()`.
"""
url = url or ''
match = COURSE_REGEX.match(url)
course_id = None
if match:
course_id_string = match.group('course_id')
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id_string)
except InvalidKeyError:
log.warning(
'unable to parse course_id "{course_id}"'.format(
course_id=course_id_string
),
exc_info=True
)
return course_context_from_course_id(course_id)
def course_context_from_course_id(course_id):
"""
Creates a course context from a `course_id`.
Example Returned Context::
{
'course_id': 'org/course/run',
'org_id': 'org'
}
"""
if course_id is None:
return {'course_id': '', 'org_id': ''}
# TODO: Make this accept any CourseKey, and serialize it using .to_string
assert(isinstance(course_id, SlashSeparatedCourseKey))
return {
'course_id': course_id.to_deprecated_string(),
'org_id': course_id.org,
}
| agpl-3.0 | 1,627,284,375,638,080,300 | 25.818182 | 88 | 0.60339 | false |
BoPeng/simuPOP | docs/PyOperator.py | 1 | 2258 | #!/usr/bin/env python
#
# $File: PyOperator.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
def dynaMutator(pop, param):
'''This mutator mutates commom loci with low mutation rate and rare
loci with high mutation rate, as an attempt to raise allele frequency
of rare loci to an higher level.'''
# unpack parameter
(cutoff, mu1, mu2) = param;
sim.stat(pop, alleleFreq=range(pop.totNumLoci()))
for i in range(pop.totNumLoci()):
# Get the frequency of allele 1 (disease allele)
if pop.dvars().alleleFreq[i][1] < cutoff:
sim.kAlleleMutate(pop, k=2, rates=mu1, loci=[i])
else:
sim.kAlleleMutate(pop, k=2, rates=mu2, loci=[i])
return True
pop = sim.Population(size=10000, loci=[2, 3])
pop.evolve(
initOps=[
sim.InitSex(),
sim.InitGenotype(freq=[.99, .01], loci=[0, 2, 4]),
sim.InitGenotype(freq=[.8, .2], loci=[1, 3])
],
preOps=sim.PyOperator(func=dynaMutator, param=(.2, 1e-2, 1e-5)),
matingScheme=sim.RandomMating(),
postOps=[
sim.Stat(alleleFreq=range(5), step=10),
sim.PyEval(r"' '.join(['%.2f' % alleleFreq[x][1] for x in range(5)]) + '\n'",
step=10),
],
gen = 31
)
| gpl-2.0 | -4,326,466,934,336,378,000 | 34.84127 | 85 | 0.667405 | false |
michaelerule/neurotools | stats/mcint.py | 1 | 5228 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# BEGIN PYTHON 2/3 COMPATIBILITY BOILERPLATE
from __future__ import absolute_import
from __future__ import with_statement
from __future__ import division
from __future__ import nested_scopes
from __future__ import generators
from __future__ import unicode_literals
from __future__ import print_function
from neurotools.system import *
'''
Routines for monte-carlo integration
'''
import numpy as np
import sys
def monte_carlo_expectation(f,maxiter=int(1e6),converge=1e-2,verbose=False):
'''
x = monte_carlo_expectation(f,maxiter,converge)
Evaluate expectation of f using Monte-Carlo integration.
For simplicit (for now), this casts the return value of f() to a float64 array
Ensure the return value is compatible with this datatype.
This uses the standard error of the mean to check for convergence.
It converges slowly at 1/sqrt(n)
Parameters
----------
f :
function that returns array_like.
maxiter :
maximum number of samples to draw
converge :
maximum absolute error tolerated
Returns
-------
number or array-like:
Estimate of the mean of f
Example
-------
def f():
x = randn(2)+array([9,-9])
return x
Ex = monte_carlo_moments(f,verbose=1,maxiter=100000,converge=1e-2)
print('Ex:\n',Ex)
'''
# perform argument validation
maxiter=int(maxiter)
assert(maxiter>0)
assert(converge>0)
# perform checks for numerical accuracy
dtype = np.float64
eps = np.sqrt(np.finfo(dtype).eps)
maxsample = 1./eps
if (maxiter>maxsample):
print('Warning: maximum iterations cannot be computed with acceptable precision')
assert 0
# draw the first sample
nsamp = 1.0
sample = dtype(f()).ravel()
moment_1 = sample
variance = np.mean(sample**2)
#moment_2 = np.outer(sample,sample)
if verbose:
sys.stdout.write('\n\r')
sys.stdout.flush()
# draw samples until we have converged
for i in range(maxiter):
sample = dtype(f()).ravel()
moment_1 = moment_1*(nsamp/(nsamp+1.0))+(1.0/(nsamp+1.0))*sample
variance = variance*(nsamp/(nsamp+1.0))+(1.0/(nsamp+1.0))*np.mean(sample**2)
stderror = np.sqrt((variance-np.mean(moment_1**2))/nsamp)
assert np.isfinite(stderror)
if (stderror<=converge): break
nsamp += 1
if verbose and (nsamp%100==0):
sys.stdout.write(('\r\b'*40)+'Sample %d, error %0.2f'%(nsamp,stderror))
sys.stdout.flush()
if verbose:
sys.stdout.write('\n')
sys.stdout.flush()
return moment_1
def monte_carlo_moments(f,maxiter=int(1e6),converge=1e-2,verbose=False):
'''
x = monte_carlo_expectation(f,maxiter,converge)
Evaluate expectation of f using Monte-Carlo integration.
For simplicit (for now), this casts the return value of f() to a float64 array
Ensure the return value is compatible with this datatype.
This uses the standard error of the mean to check for convergence.
It converges very slowly (1/sqrt(n)), so don't ask for too much precision.
Parameters
----------
f : function that returns array_like.
maxiter : maximum number of samples to draw
converge : maximum absolute error tolerated
Returns
-------
Estimate of the mean and second moment of f
Example
-------
def f():
x = randn(2)+array([9,-9])
return x
Ex,Exx = monte_carlo_moments(f,verbose=1,maxiter=100000,converge=1e-2)
print('Ex:\n',Ex)
print('Exx:\n',Exx)
'''
# perform argument validation
maxiter=int(maxiter)
assert(maxiter>0)
assert(converge>0)
# perform checks for numerical accuracy
dtype = np.float64
eps = np.sqrt(np.finfo(dtype).eps)
maxsample = 1./eps
if (maxiter>maxsample):
print('Warning: maximum iterations cannot be computed with acceptable precision')
assert 0
# draw the first sample
nsamp = 1.0
sample = dtype(f()).ravel()
moment_1 = sample
moment_2 = np.outer(sample,sample)
if verbose:
sys.stdout.write('\n\r')
sys.stdout.flush()
# draw samples until we have converged
for i in range(maxiter):
sample = dtype(f()).ravel()
moment_1 = moment_1*(nsamp/(nsamp+1.0))+(1.0/(nsamp+1.0))*sample
moment_2 = moment_2*(nsamp/(nsamp+1.0))+(1.0/(nsamp+1.0))*np.outer(sample,sample)
# check error for convergence
covariance = moment_2 - np.outer(moment_1,moment_1)
standard_error = covariance / nsamp
mean_standard_error = np.sqrt(np.trace(standard_error)/sample.shape[0])
assert np.isfinite(mean_standard_error)
if (mean_standard_error<=converge):
break
nsamp += 1
if verbose and (nsamp%100==0):
sys.stdout.write(('\r\b'*40)+'Sample %d, error %0.2f'%(nsamp,mean_standard_error))
sys.stdout.flush()
if verbose:
sys.stdout.write('\n')
sys.stdout.flush()
return moment_1,moment_2
| gpl-3.0 | 453,506,364,964,403,900 | 31.675 | 94 | 0.613236 | false |
kbsezginel/tee_mof | thermof/reldist.py | 1 | 4529 | # Date: February 2017
# Author: Patrick Asinger and Kutay B. Sezginel
"""
Calculate relative distance between interpenetrated frameworks
"""
import os
import math
def reldist(traj_path, end=300000):
segmentation = []
x_axis_data = []
MOF1_pt1_coords = []
MOF1_pt2_coords = []
MOF1_pt3_coords = []
MOF1_pt4_coords = []
MOF1_pt5_coords = []
MOF1_pt6_coords = []
MOF1_pt7_coords = []
MOF1_pt8_coords = []
MOF2_pt1_coords = []
MOF2_pt2_coords = []
MOF2_pt3_coords = []
MOF2_pt4_coords = []
MOF2_pt5_coords = []
MOF2_pt6_coords = []
MOF2_pt7_coords = []
MOF2_pt8_coords = []
equil_end_timestep = end
equil_end_linenum = []
with open(traj_path, 'r') as t:
for l, line in enumerate(t):
if '30 30 30' in line:
MOF1_pt1_initial = l
if '40 30 30' in line:
MOF1_pt2_initial = l
if '30 40 30' in line:
MOF1_pt3_initial = l
if '30 30 40' in line:
MOF1_pt4_initial = l
if '40 40 30' in line:
MOF1_pt5_initial = l
if '40 30 40' in line:
MOF1_pt6_initial = l
if '30 40 40' in line:
MOF1_pt7_initial = l
if '40 40 40' in line:
MOF1_pt8_initial = l
if '35 35 35' in line:
MOF2_pt1_initial = l
if 'Timestep' in line:
segmentation.append(l)
x_axis_data.append(line.split()[2])
if float(line.split()[2]) > equil_end_timestep:
equil_end_linenum.append(l)
with open(traj_path, 'r') as t:
data = t.readlines()
for i in segmentation:
MOF1_pt1_coords.append(data[i+MOF1_pt1_initial-1].split()[1:])
MOF1_pt2_coords.append(data[i+MOF1_pt2_initial-1].split()[1:])
MOF1_pt3_coords.append(data[i+MOF1_pt3_initial-1].split()[1:])
MOF1_pt4_coords.append(data[i+MOF1_pt4_initial-1].split()[1:])
MOF1_pt5_coords.append(data[i+MOF1_pt5_initial-1].split()[1:])
MOF1_pt6_coords.append(data[i+MOF1_pt6_initial-1].split()[1:])
MOF1_pt7_coords.append(data[i+MOF1_pt7_initial-1].split()[1:])
MOF1_pt8_coords.append(data[i+MOF1_pt8_initial-1].split()[1:])
MOF2_pt1_coords.append(data[i+MOF2_pt1_initial-1].split()[1:])
MOF1_xave = []
MOF1_yave = []
MOF1_zave = []
MOF1_center_coords = []
for i in range(len(MOF1_pt1_coords)):
MOF1_xave.append((float(MOF1_pt1_coords[i][0]) + float(MOF1_pt2_coords[i][0]) + float(MOF1_pt3_coords[i][0]) + float(MOF1_pt4_coords[i][0]) + float(MOF1_pt5_coords[i][0]) + float(MOF1_pt6_coords[i][0]) + float(MOF1_pt7_coords[i][0]) + float(MOF1_pt8_coords[i][0])) / 8)
MOF1_yave.append((float(MOF1_pt1_coords[i][1]) + float(MOF1_pt2_coords[i][1]) + float(MOF1_pt3_coords[i][1]) + float(MOF1_pt4_coords[i][1]) + float(MOF1_pt5_coords[i][1]) + float(MOF1_pt6_coords[i][1]) + float(MOF1_pt7_coords[i][1]) + float(MOF1_pt8_coords[i][1])) / 8)
MOF1_zave.append((float(MOF1_pt1_coords[i][2]) + float(MOF1_pt2_coords[i][2]) + float(MOF1_pt3_coords[i][2]) + float(MOF1_pt4_coords[i][2]) + float(MOF1_pt5_coords[i][2]) + float(MOF1_pt6_coords[i][2]) + float(MOF1_pt7_coords[i][2]) + float(MOF1_pt8_coords[i][2])) / 8)
MOF1_center_coords.append([MOF1_xave[i], MOF1_yave[i], MOF1_zave[i]])
distance = []
after_equil_distance = []
for i in range(len(MOF1_pt1_coords)):
distance.append(((float(MOF1_center_coords[i][0]) - float(MOF2_pt1_coords[i][0])) ** 2 +
(float(MOF1_center_coords[i][1]) - float(MOF2_pt1_coords[i][1])) ** 2 +
(float(MOF1_center_coords[i][2]) - float(MOF2_pt1_coords[i][2])) ** 2) ** 1 / 2)
MOF_reldist = []
for i in range(len(MOF1_pt1_coords)):
MOF_reldist.append([((float(MOF2_pt1_coords[i][0]) - float(MOF1_pt1_coords[i][0])) / (float(MOF1_pt2_coords[i][0]) - float(MOF1_pt1_coords[i][0]))),
((float(MOF2_pt1_coords[i][1]) - float(MOF1_pt1_coords[i][1])) / (float(MOF1_pt3_coords[i][1]) - float(MOF1_pt1_coords[i][1]))),
((float(MOF2_pt1_coords[i][2]) - float(MOF1_pt1_coords[i][2])) / (float(MOF1_pt4_coords[i][2]) - float(MOF1_pt1_coords[i][0])))])
x_coords = [i[0] for i in MOF_reldist]
y_coords = [i[1] for i in MOF_reldist]
z_coords = [i[2] for i in MOF_reldist]
return x_coords, y_coords, z_coords
| mit | 119,960,073,522,521,230 | 45.690722 | 277 | 0.558181 | false |
ronaldbradford/os-demo | oslo_log/examples/syntax_helper.py | 1 | 2797 | # Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A syntax example with helper debugging of minimum Oslo Logging
This example requires the following package to be installed.
$ pip install oslo.log
Additional Oslo packages installed include oslo.config, oslo.context,
oslo.i18n, osli.serialization and oslo.utils.
More information about Oslo Logging can be found at:
http://docs.openstack.org/developer/oslo.log/usage.html
"""
from oslo_config import cfg
from oslo_log import log as logging
# Use default Python logging to display running output
import logging as py_logging
LOG = py_logging.getLogger(__name__)
CONF = cfg.CONF
DOMAIN = "demo"
def prepare():
"""Prepare Oslo Logging (2 or 3 steps)
Use of Oslo Logging involves the following:
* logging.register_options
* logging.set_defaults (optional)
* logging.setup
"""
LOG.debug("Prepare Oslo Logging")
LOG.info("Size of configuration options before %d", len(CONF))
# Required step to register common, logging and generic configuration
# variables
logging.register_options(CONF)
LOG.info("Size of configuration options after %d", len(CONF))
LOG.info("List of Oslo Logging configuration options and default values")
LOG.info("=" * 80)
for c in CONF:
LOG.info("%s = %s" % (c, CONF[c]))
LOG.info("=" * 80)
# Optional step to set new defaults if necessary for
# * logging_context_format_string
# * default_log_levels
#
# These variables default to respectively:
#
# import oslo_log
# oslo_log._options.DEFAULT_LOG_LEVELS
# oslo_log._options.log_opts[0].default
#
extra_log_level_defaults = [
'dogpile=INFO',
'routes=INFO'
]
logging.set_defaults(default_log_levels=CONF.default_log_levels +
extra_log_level_defaults)
LOG.info("New list of package log level defaults")
LOG.info("=" * 80)
for pair in CONF.default_log_levels:
LOG.info(pair.partition('='))
LOG.info("=" * 80)
# Required setup based on configuration and domain
logging.setup(CONF, DOMAIN)
if __name__ == '__main__':
py_logging.basicConfig(level=py_logging.DEBUG)
prepare()
LOG.info("Welcome to Oslo Logging")
| apache-2.0 | 1,801,431,667,089,447,000 | 27.835052 | 77 | 0.690025 | false |
crossroadchurch/paul | openlp/core/lib/colorbutton.py | 1 | 3307 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
Provide a custom widget based on QPushButton for the selection of colors
"""
from PyQt4 import QtCore, QtGui
from openlp.core.common import translate
class ColorButton(QtGui.QPushButton):
"""
Subclasses QPushbutton to create a "Color Chooser" button
"""
colorChanged = QtCore.pyqtSignal(str)
def __init__(self, parent=None):
"""
Initialise the ColorButton
"""
super(ColorButton, self).__init__()
self.parent = parent
self.change_color('#ffffff')
self.setToolTip(translate('OpenLP.ColorButton', 'Click to select a color.'))
self.clicked.connect(self.on_clicked)
def change_color(self, color):
"""
Sets the _color variable and the background color.
:param color: String representation of a hexidecimal color
"""
self._color = color
self.setStyleSheet('background-color: %s' % color)
@property
def color(self):
"""
Property method to return the color variable
:return: String representation of a hexidecimal color
"""
return self._color
@color.setter
def color(self, color):
"""
Property setter to change the instance color
:param color: String representation of a hexidecimal color
"""
self.change_color(color)
def on_clicked(self):
"""
Handle the PushButton clicked signal, showing the ColorDialog and validating the input
"""
new_color = QtGui.QColorDialog.getColor(QtGui.QColor(self._color), self.parent)
if new_color.isValid() and self._color != new_color.name():
self.change_color(new_color.name())
self.colorChanged.emit(new_color.name())
| gpl-2.0 | 7,508,550,098,501,231,000 | 39.329268 | 94 | 0.527366 | false |
AKLLaursen/gamgee | blogengine/models.py | 1 | 2183 | from django.db import models
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.utils.text import slugify
from django.db.models.signals import post_save
from django.core.cache import cache
# Category model
class Category(models.Model):
name = models.CharField(max_length = 200)
description = models.TextField()
slug = models.SlugField(max_length = 40, unique = True, blank = True, null = True)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(str(self.name))
super(Category, self).save(*args, **kwargs)
def get_absolute_url(self):
return '/category/{0}/'.format(self.slug)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'categories'
# Tag model
class Tag(models.Model):
name = models.CharField(max_length = 200)
description = models.TextField()
slug = models.SlugField(max_length = 40, unique = True, blank = True, null = True)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(str(self.name))
super(Tag, self).save(*args, **kwargs)
def get_absolute_url(self):
return "/tag/%s/" % (self.slug)
def __str__(self):
return self.name
# Blogpost model
class Post(models.Model):
# Model specifications
title = models.CharField(max_length = 200)
author = models.ForeignKey(User, on_delete = models.CASCADE)
category = models.ForeignKey(Category, blank = True, null = True, on_delete = models.CASCADE)
tags = models.ManyToManyField(Tag, blank = True, null = True)
pub_date = models.DateTimeField()
text = models.TextField()
slug = models.SlugField(max_length = 40, unique = True)
site = models.ForeignKey(Site, on_delete = models.CASCADE)
# Define url for each post. (Possibly change this to have 0 in front of single number elements.)
def get_absolute_url(self):
return '/{0}/{1}/{2}/{3}/'.format(self.pub_date.year, self.pub_date.month, self.pub_date.day, self.slug)
def __str__(self):
return self.title
class Meta:
ordering = ['-pub_date']
# Define signals function
def new_post(sender, instance, created, **kwargs):
cache.clear()
# Set up signals
post_save.connect(new_post, sender = Post) | gpl-2.0 | -1,252,414,630,109,535,500 | 26.64557 | 106 | 0.706825 | false |
ericmjl/bokeh | tests/unit/bokeh/util/test_hex.py | 1 | 3858 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import numpy as np
# Module under test
import bokeh.util.hex as buh # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
np.random.seed(0)
n = 500
x = 2 + np.random.standard_normal(n)
y = 2 + np.random.standard_normal(n)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_axial_to_cartesian(object):
def test_default_aspect_pointytop(self) -> None:
q = np.array([0, 0, 0, 1, -1, 1, -1])
r = np.array([0, 1, -1, 0, 1, -1, 0])
x, y = buh.axial_to_cartesian(q, r, 1, "pointytop")
sq3 = np.sqrt(3)
assert list(x) == [0, sq3/2, -sq3/2, sq3, -sq3/2, sq3/2, -sq3]
assert list(y) == [-0.0, -1.5, 1.5, -0.0, -1.5, 1.5, -0.0]
def test_default_aspect_flattop(self) -> None:
q = np.array([0, 0, 0, 1, -1, 1, -1])
r = np.array([0, 1, -1, 0, 1, -1, 0])
x, y = buh.axial_to_cartesian(q, r, 1, "flattop")
sq3 = np.sqrt(3)
assert list(x) == [0.0, 0.0, 0.0, 1.5, -1.5, 1.5, -1.5]
assert list(y) == [0, -sq3, sq3, -sq3/2, -sq3/2, sq3/2, sq3/2]
class Test_cartesian_to_axial(object):
def test_default_aspect_pointytop(self) -> None:
x = np.array([0, -2, 2, -1.5, -1.5, 1.5, 1.5])
y = np.array([0, 0, 0, 1.5, -1.5, 1.5, -1.5])
q, r = buh.cartesian_to_axial(x, y, 1, "pointytop")
assert list(zip(q, r)) == [
(0,0), (-1, 0), (1,0), (0,-1), (-1, 1), (1, -1), (0,1)
]
def test_default_aspect_flattop(self) -> None:
x = np.array([0, 0, 0, 1.5, -1.5, 1.5, -1.5])
y = np.array([0, -2, 2, -1.5, -1.5, 1.5, 1.5])
q, r = buh.cartesian_to_axial(x, y, 1, "flattop")
assert list(zip(q, r)) == [
(0,0), (0,1), (0,-1), (1, 0), (-1, 1), (1, -1), (-1,0)
]
class Test_hexbin(object):
# hexbin requires pandas
def test_gaussian_pointytop(self, pd) -> None:
bins = buh.hexbin(x, y, 2)
assert list(bins.q) == [0,0,1,1,1,2,2]
assert list(bins.r) == [-1,0,-2,-1,0,-2,-1]
assert list(bins.counts) == [9,54,1,313,98,3,22]
assert bins.equals(buh.hexbin(x, y, 2, "pointytop"))
def test_gaussian_flattop(self, pd) -> None:
bins = buh.hexbin(x, y, 2, "flattop")
assert list(bins.q) == [0, 0, 1, 1, 1, 2]
assert list(bins.r) == [-1, 0, -2, -1, 0, -2]
assert list(bins.counts) == [95, 57, 14, 324, 8, 2]
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 807,412,369,140,659,100 | 34.394495 | 78 | 0.347589 | false |
dlatk/dlatk | dlatk/sqlitemethods/sqliteMethods.py | 1 | 3334 | """
Sqlite interface based on the sqlite3 package
"""
import sys, time
import sqlite3
from dlatk.dlaConstants import MAX_ATTEMPTS, SQLITE_ERROR_SLEEP, MAX_SQL_PRINT_CHARS, warn
def dbConnect(db):
"""
Creates connection with the database
"""
db = db + ".db"
dbConn = None
attempts = 0
while(1):
try:
dbConn = sqlite3.connect(db)
break
except sqlite3.Error as err:
attempts += 1
warn(" *Sqlite Connect Error on db: %s\n%s\n (%d attempt)"%(db, err, attempts))
time.sleep(SQLITE_ERROR_SLEEP)
if(attempts > MAX_ATTEMPTS):
sys.exit(1)
dbCursor = dbConn.cursor()
return dbConn, dbCursor
def executeWriteMany(db, dbConn, sql, rows, writeCursor=None, warnQuery=False):
"""Executes a write query"""
if warnQuery:
warn("SQL (write many) QUERY: %s"% sql)
if not writeCursor:
writeCursor = dbConn.cursor()
attempts = 0
while(1):
try:
writeCursor.executemany(sql, rows)
dbConn.commit()
break
except sqlite3.Error as err:
attempts += 1
warn(" *Sqlite Corpus DB Error on %s:\n%s (%d attempt)" % (sql, err, attempts))
time.sleep(SQLITE_ERROR_SLEEP)
dbConn, dbCursor = dbConnect(db)
writeCursor = dbConn.cursor()
if(attempts > MAX_ATTEMPTS):
sys.exit(1)
return writeCursor
def executeGetList(db, dbCursor, sql, warnQuery=False):
"""Executes a SELECT query"""
if warnQuery:
warn("SQL Query: %s"% sql[:MAX_SQL_PRINT_CHARS])
attempts = 0
data = ""
while(1):
try:
dbCursor.execute(sql)
data = dbCursor.fetchall()
break
except sqlite3.Error as err:
attempts += 1
warn(" *Sqlite Corpus DB Error on %s:\n%s (%d attempt)" % (sql, err, attempts))
time.sleep(SQLITE_ERROR_SLEEP)
dbConn, dbCursor = dbConnect(db)
writeCursor = dbConn.cursor()
if(attempts > MAX_ATTEMPTS):
sys.exit(1)
return data
def execute(db, dbCursor, sql, warnQuery=True):
"""Executes a given query"""
if warnQuery:
warn("SQL Query: %s" % sql[:MAX_SQL_PRINT_CHARS])
attempts = 0
while(1):
try:
dbCursor.execute(sql)
break
except sqlite3.Error as err:
attempts += 1
warn(" *Sqlite Corpus DB Error on %s:\n%s (%d attempt)" % (sql, err, attempts))
time.sleep(SQLITE_ERROR_SLEEP)
dbConn, dbCursor = dbConnect(db)
if(attempts > MAX_ATTEMPTS):
sys.exit(1)
return True
def tableExists(db, dbCursor, table_name):
"""Checks if table exists"""
sql = """SELECT count(name) FROM sqlite_master WHERE type='table' AND name='%s'"""% table_name
count = executeGetList(db, dbCursor, sql)
if count[0][0] > 0:
return True
else:
return False
def primaryKeyExists(db, dbCursor, table_name, column_name):
"""Checks if primary key exists on a table's column"""
sql = "PRAGMA table_info("+table_name+")"
data = executeGetList(db, dbCursor, sql)
for row in data:
if row[1] == column_name and row[len(row)-1] == 1:
print(row)
print(row[1])
print(column_name)
print(row[len(row)-1])
return True
return False
def indexExists(db, dbCursor, table_name, column_name):
"""Checks if index exists on a table's column"""
sql = "SELECT name, tbl_name, sql FROM sqlite_master WHERE type='index'"
data = executeGetList(db, dbCursor, sql)
for row in data:
db_sql = row[len(row)-1].split(" ")
db_col_name = db_sql[len(db_sql)-1][1:-1]
if row[1] == table_name and db_col_name == column_name:
return True
return False
| gpl-3.0 | 2,413,575,742,804,356,000 | 26.553719 | 95 | 0.670666 | false |
natefoo/ansible-modules-extras | network/f5/bigip_pool.py | 3 | 19402 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Matt Hite <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_pool
short_description: "Manages F5 BIG-IP LTM pools"
description:
- "Manages F5 BIG-IP LTM pools via iControl SOAP API"
version_added: "1.2"
author: "Matt Hite (@mhite)"
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
requirements:
- bigsuds
options:
server:
description:
- BIG-IP host
required: true
default: null
choices: []
aliases: []
user:
description:
- BIG-IP username
required: true
default: null
choices: []
aliases: []
password:
description:
- BIG-IP password
required: true
default: null
choices: []
aliases: []
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites. Prior to 2.0, this module would always
validate on python >= 2.7.9 and never validate on python <= 2.7.8
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 2.0
state:
description:
- Pool/pool member state
required: false
default: present
choices: ['present', 'absent']
aliases: []
name:
description:
- Pool name
required: true
default: null
choices: []
aliases: ['pool']
partition:
description:
- Partition of pool/pool member
required: false
default: 'Common'
choices: []
aliases: []
lb_method:
description:
- Load balancing method
version_added: "1.3"
required: False
default: 'round_robin'
choices: ['round_robin', 'ratio_member', 'least_connection_member',
'observed_member', 'predictive_member', 'ratio_node_address',
'least_connection_node_address', 'fastest_node_address',
'observed_node_address', 'predictive_node_address',
'dynamic_ratio', 'fastest_app_response', 'least_sessions',
'dynamic_ratio_member', 'l3_addr', 'unknown',
'weighted_least_connection_member',
'weighted_least_connection_node_address',
'ratio_session', 'ratio_least_connection_member',
'ratio_least_connection_node_address']
aliases: []
monitor_type:
description:
- Monitor rule type when monitors > 1
version_added: "1.3"
required: False
default: null
choices: ['and_list', 'm_of_n']
aliases: []
quorum:
description:
- Monitor quorum value when monitor_type is m_of_n
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
monitors:
description:
- Monitor template name list. Always use the full path to the monitor.
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
slow_ramp_time:
description:
- Sets the ramp-up time (in seconds) to gradually ramp up the load on newly added or freshly detected up pool members
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
service_down_action:
description:
- Sets the action to take when node goes down in pool
version_added: "1.3"
required: False
default: null
choices: ['none', 'reset', 'drop', 'reselect']
aliases: []
host:
description:
- "Pool member IP"
required: False
default: null
choices: []
aliases: ['address']
port:
description:
- "Pool member port"
required: False
default: null
choices: []
aliases: []
'''
EXAMPLES = '''
## playbook task examples:
---
# file bigip-test.yml
# ...
- hosts: localhost
tasks:
- name: Create pool
local_action: >
bigip_pool
server=lb.mydomain.com
user=admin
password=mysecret
state=present
name=matthite-pool
partition=matthite
lb_method=least_connection_member
slow_ramp_time=120
- name: Modify load balancer method
local_action: >
bigip_pool
server=lb.mydomain.com
user=admin
password=mysecret
state=present
name=matthite-pool
partition=matthite
lb_method=round_robin
- hosts: bigip-test
tasks:
- name: Add pool member
local_action: >
bigip_pool
server=lb.mydomain.com
user=admin
password=mysecret
state=present
name=matthite-pool
partition=matthite
host="{{ ansible_default_ipv4["address"] }}"
port=80
- name: Remove pool member from pool
local_action: >
bigip_pool
server=lb.mydomain.com
user=admin
password=mysecret
state=absent
name=matthite-pool
partition=matthite
host="{{ ansible_default_ipv4["address"] }}"
port=80
- hosts: localhost
tasks:
- name: Delete pool
local_action: >
bigip_pool
server=lb.mydomain.com
user=admin
password=mysecret
state=absent
name=matthite-pool
partition=matthite
'''
def pool_exists(api, pool):
# hack to determine if pool exists
result = False
try:
api.LocalLB.Pool.get_object_status(pool_names=[pool])
result = True
except bigsuds.OperationFailed, e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_pool(api, pool, lb_method):
# create requires lb_method but we don't want to default
# to a value on subsequent runs
if not lb_method:
lb_method = 'round_robin'
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.LocalLB.Pool.create_v2(pool_names=[pool], lb_methods=[lb_method],
members=[[]])
def remove_pool(api, pool):
api.LocalLB.Pool.delete_pool(pool_names=[pool])
def get_lb_method(api, pool):
lb_method = api.LocalLB.Pool.get_lb_method(pool_names=[pool])[0]
lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
return lb_method
def set_lb_method(api, pool, lb_method):
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.LocalLB.Pool.set_lb_method(pool_names=[pool], lb_methods=[lb_method])
def get_monitors(api, pool):
result = api.LocalLB.Pool.get_monitor_association(pool_names=[pool])[0]['monitor_rule']
monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower()
quorum = result['quorum']
monitor_templates = result['monitor_templates']
return (monitor_type, quorum, monitor_templates)
def set_monitors(api, pool, monitor_type, quorum, monitor_templates):
monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper()
monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates}
monitor_association = {'pool_name': pool, 'monitor_rule': monitor_rule}
api.LocalLB.Pool.set_monitor_association(monitor_associations=[monitor_association])
def get_slow_ramp_time(api, pool):
result = api.LocalLB.Pool.get_slow_ramp_time(pool_names=[pool])[0]
return result
def set_slow_ramp_time(api, pool, seconds):
api.LocalLB.Pool.set_slow_ramp_time(pool_names=[pool], values=[seconds])
def get_action_on_service_down(api, pool):
result = api.LocalLB.Pool.get_action_on_service_down(pool_names=[pool])[0]
result = result.split("SERVICE_DOWN_ACTION_")[-1].lower()
return result
def set_action_on_service_down(api, pool, action):
action = "SERVICE_DOWN_ACTION_%s" % action.strip().upper()
api.LocalLB.Pool.set_action_on_service_down(pool_names=[pool], actions=[action])
def member_exists(api, pool, address, port):
# hack to determine if member exists
result = False
try:
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.get_member_object_status(pool_names=[pool],
members=[members])
result = True
except bigsuds.OperationFailed, e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def delete_node_address(api, address):
result = False
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
except bigsuds.OperationFailed, e:
if "is referenced by a member of pool" in str(e):
result = False
else:
# genuine exception
raise
return result
def remove_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members])
def add_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members])
def main():
lb_method_choices = ['round_robin', 'ratio_member',
'least_connection_member', 'observed_member',
'predictive_member', 'ratio_node_address',
'least_connection_node_address',
'fastest_node_address', 'observed_node_address',
'predictive_node_address', 'dynamic_ratio',
'fastest_app_response', 'least_sessions',
'dynamic_ratio_member', 'l3_addr', 'unknown',
'weighted_least_connection_member',
'weighted_least_connection_node_address',
'ratio_session', 'ratio_least_connection_member',
'ratio_least_connection_node_address']
monitor_type_choices = ['and_list', 'm_of_n']
service_down_choices = ['none', 'reset', 'drop', 'reselect']
argument_spec=f5_argument_spec();
argument_spec.update(dict(
name = dict(type='str', required=True, aliases=['pool']),
lb_method = dict(type='str', choices=lb_method_choices),
monitor_type = dict(type='str', choices=monitor_type_choices),
quorum = dict(type='int'),
monitors = dict(type='list'),
slow_ramp_time = dict(type='int'),
service_down_action = dict(type='str', choices=service_down_choices),
host = dict(type='str', aliases=['address']),
port = dict(type='int')
)
)
module = AnsibleModule(
argument_spec = argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
server = module.params['server']
user = module.params['user']
password = module.params['password']
state = module.params['state']
partition = module.params['partition']
validate_certs = module.params['validate_certs']
name = module.params['name']
pool = fq_name(partition,name)
lb_method = module.params['lb_method']
if lb_method:
lb_method = lb_method.lower()
monitor_type = module.params['monitor_type']
if monitor_type:
monitor_type = monitor_type.lower()
quorum = module.params['quorum']
monitors = module.params['monitors']
if monitors:
monitors = []
for monitor in module.params['monitors']:
monitors.append(fq_name(partition, monitor))
slow_ramp_time = module.params['slow_ramp_time']
service_down_action = module.params['service_down_action']
if service_down_action:
service_down_action = service_down_action.lower()
host = module.params['host']
address = fq_name(partition,host)
port = module.params['port']
# sanity check user supplied values
if (host and port is None) or (port is not None and not host):
module.fail_json(msg="both host and port must be supplied")
if port is not None and (0 > port or port > 65535):
module.fail_json(msg="valid ports must be in range 0 - 65535")
if monitors:
if len(monitors) == 1:
# set default required values for single monitor
quorum = 0
monitor_type = 'single'
elif len(monitors) > 1:
if not monitor_type:
module.fail_json(msg="monitor_type required for monitors > 1")
if monitor_type == 'm_of_n' and not quorum:
module.fail_json(msg="quorum value required for monitor_type m_of_n")
if monitor_type != 'm_of_n':
quorum = 0
elif monitor_type:
# no monitors specified but monitor_type exists
module.fail_json(msg="monitor_type require monitors parameter")
elif quorum is not None:
# no monitors specified but quorum exists
module.fail_json(msg="quorum requires monitors parameter")
try:
api = bigip_api(server, user, password, validate_certs)
result = {'changed': False} # default
if state == 'absent':
if host and port and pool:
# member removal takes precedent
if pool_exists(api, pool) and member_exists(api, pool, address, port):
if not module.check_mode:
remove_pool_member(api, pool, address, port)
deleted = delete_node_address(api, address)
result = {'changed': True, 'deleted': deleted}
else:
result = {'changed': True}
elif pool_exists(api, pool):
# no host/port supplied, must be pool removal
if not module.check_mode:
# hack to handle concurrent runs of module
# pool might be gone before we actually remove it
try:
remove_pool(api, pool)
result = {'changed': True}
except bigsuds.OperationFailed, e:
if "was not found" in str(e):
result = {'changed': False}
else:
# genuine exception
raise
else:
# check-mode return value
result = {'changed': True}
elif state == 'present':
update = False
if not pool_exists(api, pool):
# pool does not exist -- need to create it
if not module.check_mode:
# a bit of a hack to handle concurrent runs of this module.
# even though we've checked the pool doesn't exist,
# it may exist by the time we run create_pool().
# this catches the exception and does something smart
# about it!
try:
create_pool(api, pool, lb_method)
result = {'changed': True}
except bigsuds.OperationFailed, e:
if "already exists" in str(e):
update = True
else:
# genuine exception
raise
else:
if monitors:
set_monitors(api, pool, monitor_type, quorum, monitors)
if slow_ramp_time:
set_slow_ramp_time(api, pool, slow_ramp_time)
if service_down_action:
set_action_on_service_down(api, pool, service_down_action)
if host and port:
add_pool_member(api, pool, address, port)
else:
# check-mode return value
result = {'changed': True}
else:
# pool exists -- potentially modify attributes
update = True
if update:
if lb_method and lb_method != get_lb_method(api, pool):
if not module.check_mode:
set_lb_method(api, pool, lb_method)
result = {'changed': True}
if monitors:
t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, pool)
if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)):
if not module.check_mode:
set_monitors(api, pool, monitor_type, quorum, monitors)
result = {'changed': True}
if slow_ramp_time and slow_ramp_time != get_slow_ramp_time(api, pool):
if not module.check_mode:
set_slow_ramp_time(api, pool, slow_ramp_time)
result = {'changed': True}
if service_down_action and service_down_action != get_action_on_service_down(api, pool):
if not module.check_mode:
set_action_on_service_down(api, pool, service_down_action)
result = {'changed': True}
if (host and port) and not member_exists(api, pool, address, port):
if not module.check_mode:
add_pool_member(api, pool, address, port)
result = {'changed': True}
if (host and port == 0) and not member_exists(api, pool, address, port):
if not module.check_mode:
add_pool_member(api, pool, address, port)
result = {'changed': True}
except Exception, e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
| gpl-3.0 | 2,605,106,136,488,584,700 | 34.996289 | 166 | 0.562777 | false |
TieWei/nova | nova/tests/cells/test_cells_manager.py | 9 | 37403 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellsManager
"""
import copy
import datetime
from oslo.config import cfg
from nova.cells import messaging
from nova.cells import utils as cells_utils
from nova import context
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import test
from nova.tests.cells import fakes
from nova.tests import fake_instance_actions
CONF = cfg.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
FAKE_COMPUTE_NODES = [dict(id=1), dict(id=2)]
FAKE_SERVICES = [dict(id=1, host='host1',
compute_node=[FAKE_COMPUTE_NODES[0]]),
dict(id=2, host='host2',
compute_node=[FAKE_COMPUTE_NODES[1]]),
dict(id=3, host='host3', compute_node=[])]
FAKE_TASK_LOGS = [dict(id=1, host='host1'),
dict(id=2, host='host2')]
class CellsManagerClassTestCase(test.NoDBTestCase):
"""Test case for CellsManager class."""
def setUp(self):
super(CellsManagerClassTestCase, self).setUp()
fakes.init(self)
# pick a child cell to use for tests.
self.our_cell = 'grandchild-cell1'
self.cells_manager = fakes.get_cells_manager(self.our_cell)
self.msg_runner = self.cells_manager.msg_runner
self.state_manager = fakes.get_state_manager(self.our_cell)
self.driver = self.cells_manager.driver
self.ctxt = 'fake_context'
def _get_fake_response(self, raw_response=None, exc=False):
if exc:
return messaging.Response('fake', test.TestingException(),
True)
if raw_response is None:
raw_response = 'fake-response'
return messaging.Response('fake', raw_response, False)
def test_get_cell_info_for_neighbors(self):
self.mox.StubOutWithMock(self.cells_manager.state_manager,
'get_cell_info_for_neighbors')
self.cells_manager.state_manager.get_cell_info_for_neighbors()
self.mox.ReplayAll()
self.cells_manager.get_cell_info_for_neighbors(self.ctxt)
def test_post_start_hook_child_cell(self):
self.mox.StubOutWithMock(self.driver, 'start_consumers')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents')
self.driver.start_consumers(self.msg_runner)
context.get_admin_context().AndReturn(self.ctxt)
self.cells_manager._update_our_parents(self.ctxt)
self.mox.ReplayAll()
self.cells_manager.post_start_hook()
def test_post_start_hook_middle_cell(self):
cells_manager = fakes.get_cells_manager('child-cell2')
msg_runner = cells_manager.msg_runner
driver = cells_manager.driver
self.mox.StubOutWithMock(driver, 'start_consumers')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(msg_runner,
'ask_children_for_capabilities')
self.mox.StubOutWithMock(msg_runner,
'ask_children_for_capacities')
driver.start_consumers(msg_runner)
context.get_admin_context().AndReturn(self.ctxt)
msg_runner.ask_children_for_capabilities(self.ctxt)
msg_runner.ask_children_for_capacities(self.ctxt)
self.mox.ReplayAll()
cells_manager.post_start_hook()
def test_update_our_parents(self):
self.mox.StubOutWithMock(self.msg_runner,
'tell_parents_our_capabilities')
self.mox.StubOutWithMock(self.msg_runner,
'tell_parents_our_capacities')
self.msg_runner.tell_parents_our_capabilities(self.ctxt)
self.msg_runner.tell_parents_our_capacities(self.ctxt)
self.mox.ReplayAll()
self.cells_manager._update_our_parents(self.ctxt)
def test_schedule_run_instance(self):
host_sched_kwargs = 'fake_host_sched_kwargs_silently_passed'
self.mox.StubOutWithMock(self.msg_runner, 'schedule_run_instance')
our_cell = self.msg_runner.state_manager.get_my_state()
self.msg_runner.schedule_run_instance(self.ctxt, our_cell,
host_sched_kwargs)
self.mox.ReplayAll()
self.cells_manager.schedule_run_instance(self.ctxt,
host_sched_kwargs=host_sched_kwargs)
def test_build_instances(self):
build_inst_kwargs = {'instances': [1, 2]}
self.mox.StubOutWithMock(self.msg_runner, 'build_instances')
our_cell = self.msg_runner.state_manager.get_my_state()
self.msg_runner.build_instances(self.ctxt, our_cell, build_inst_kwargs)
self.mox.ReplayAll()
self.cells_manager.build_instances(self.ctxt,
build_inst_kwargs=build_inst_kwargs)
def test_run_compute_api_method(self):
# Args should just be silently passed through
cell_name = 'fake-cell-name'
method_info = 'fake-method-info'
self.mox.StubOutWithMock(self.msg_runner,
'run_compute_api_method')
fake_response = self._get_fake_response()
self.msg_runner.run_compute_api_method(self.ctxt,
cell_name,
method_info,
True).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.run_compute_api_method(
self.ctxt, cell_name=cell_name, method_info=method_info,
call=True)
self.assertEqual('fake-response', response)
def test_instance_update_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'instance_update_at_top')
self.msg_runner.instance_update_at_top(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.instance_update_at_top(self.ctxt,
instance='fake-instance')
def test_instance_destroy_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'instance_destroy_at_top')
self.msg_runner.instance_destroy_at_top(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.instance_destroy_at_top(self.ctxt,
instance='fake-instance')
def test_instance_delete_everywhere(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_delete_everywhere')
self.msg_runner.instance_delete_everywhere(self.ctxt,
'fake-instance',
'fake-type')
self.mox.ReplayAll()
self.cells_manager.instance_delete_everywhere(
self.ctxt, instance='fake-instance',
delete_type='fake-type')
def test_instance_fault_create_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_fault_create_at_top')
self.msg_runner.instance_fault_create_at_top(self.ctxt,
'fake-fault')
self.mox.ReplayAll()
self.cells_manager.instance_fault_create_at_top(
self.ctxt, instance_fault='fake-fault')
def test_bw_usage_update_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'bw_usage_update_at_top')
self.msg_runner.bw_usage_update_at_top(self.ctxt,
'fake-bw-info')
self.mox.ReplayAll()
self.cells_manager.bw_usage_update_at_top(
self.ctxt, bw_update_info='fake-bw-info')
def test_heal_instances(self):
self.flags(instance_updated_at_threshold=1000,
instance_update_num_instances=2,
group='cells')
fake_context = context.RequestContext('fake', 'fake')
stalled_time = timeutils.utcnow()
updated_since = stalled_time - datetime.timedelta(seconds=1000)
def utcnow():
return stalled_time
call_info = {'get_instances': 0, 'sync_instances': []}
instances = ['instance1', 'instance2', 'instance3']
def get_instances_to_sync(context, **kwargs):
self.assertEqual(context, fake_context)
call_info['shuffle'] = kwargs.get('shuffle')
call_info['project_id'] = kwargs.get('project_id')
call_info['updated_since'] = kwargs.get('updated_since')
call_info['get_instances'] += 1
return iter(instances)
def instance_get_by_uuid(context, uuid):
return instances[int(uuid[-1]) - 1]
def sync_instance(context, instance):
self.assertEqual(context, fake_context)
call_info['sync_instances'].append(instance)
self.stubs.Set(cells_utils, 'get_instances_to_sync',
get_instances_to_sync)
self.stubs.Set(self.cells_manager.db, 'instance_get_by_uuid',
instance_get_by_uuid)
self.stubs.Set(self.cells_manager, '_sync_instance',
sync_instance)
self.stubs.Set(timeutils, 'utcnow', utcnow)
self.cells_manager._heal_instances(fake_context)
self.assertEqual(call_info['shuffle'], True)
self.assertEqual(call_info['project_id'], None)
self.assertEqual(call_info['updated_since'], updated_since)
self.assertEqual(call_info['get_instances'], 1)
# Only first 2
self.assertEqual(call_info['sync_instances'],
instances[:2])
call_info['sync_instances'] = []
self.cells_manager._heal_instances(fake_context)
self.assertEqual(call_info['shuffle'], True)
self.assertEqual(call_info['project_id'], None)
self.assertEqual(call_info['updated_since'], updated_since)
self.assertEqual(call_info['get_instances'], 2)
# Now the last 1 and the first 1
self.assertEqual(call_info['sync_instances'],
[instances[-1], instances[0]])
def test_sync_instances(self):
self.mox.StubOutWithMock(self.msg_runner,
'sync_instances')
self.msg_runner.sync_instances(self.ctxt, 'fake-project',
'fake-time', 'fake-deleted')
self.mox.ReplayAll()
self.cells_manager.sync_instances(self.ctxt,
project_id='fake-project',
updated_since='fake-time',
deleted='fake-deleted')
def test_service_get_all(self):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of services.
# Manager should turn these into a single list of responses.
for i in xrange(3):
cell_name = 'path!to!cell%i' % i
services = []
for service in FAKE_SERVICES:
services.append(copy.deepcopy(service))
expected_service = copy.deepcopy(service)
cells_utils.add_cell_to_service(expected_service, cell_name)
expected_response.append(expected_service)
response = messaging.Response(cell_name, services, False)
responses.append(response)
self.mox.StubOutWithMock(self.msg_runner,
'service_get_all')
self.msg_runner.service_get_all(self.ctxt,
'fake-filters').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.service_get_all(self.ctxt,
filters='fake-filters')
self.assertEqual(expected_response, response)
def test_service_get_by_compute_host(self):
self.mox.StubOutWithMock(self.msg_runner,
'service_get_by_compute_host')
fake_cell = 'fake-cell'
fake_response = messaging.Response(fake_cell, FAKE_SERVICES[0],
False)
expected_response = copy.deepcopy(FAKE_SERVICES[0])
cells_utils.add_cell_to_service(expected_response, fake_cell)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
self.msg_runner.service_get_by_compute_host(self.ctxt,
fake_cell, 'fake-host').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.service_get_by_compute_host(self.ctxt,
host_name=cell_and_host)
self.assertEqual(expected_response, response)
def test_get_host_uptime(self):
fake_cell = 'parent!fake-cell'
fake_host = 'fake-host'
fake_cell_and_host = cells_utils.cell_with_item(fake_cell, fake_host)
host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
fake_response = messaging.Response(fake_cell, host_uptime, False)
self.mox.StubOutWithMock(self.msg_runner,
'get_host_uptime')
self.msg_runner.get_host_uptime(self.ctxt, fake_cell, fake_host).\
AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.get_host_uptime(self.ctxt,
fake_cell_and_host)
self.assertEqual(host_uptime, response)
def test_service_update(self):
fake_cell = 'fake-cell'
fake_response = messaging.Response(
fake_cell, FAKE_SERVICES[0], False)
expected_response = copy.deepcopy(FAKE_SERVICES[0])
cells_utils.add_cell_to_service(expected_response, fake_cell)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
params_to_update = {'disabled': True}
self.mox.StubOutWithMock(self.msg_runner, 'service_update')
self.msg_runner.service_update(self.ctxt,
fake_cell, 'fake-host', 'nova-api',
params_to_update).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.service_update(
self.ctxt, host_name=cell_and_host, binary='nova-api',
params_to_update=params_to_update)
self.assertEqual(expected_response, response)
def test_proxy_rpc_to_manager(self):
self.mox.StubOutWithMock(self.msg_runner,
'proxy_rpc_to_manager')
fake_response = self._get_fake_response()
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
topic = rpc.queue_get_for(self.ctxt, CONF.compute_topic,
cell_and_host)
self.msg_runner.proxy_rpc_to_manager(self.ctxt, 'fake-cell',
'fake-host', topic, 'fake-rpc-msg',
True, -1).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.proxy_rpc_to_manager(self.ctxt,
topic=topic, rpc_message='fake-rpc-msg', call=True,
timeout=-1)
self.assertEqual('fake-response', response)
def _build_task_log_responses(self, num):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of task log
# entries. Manager should turn these into a single list of
# task log entries.
for i in xrange(num):
cell_name = 'path!to!cell%i' % i
task_logs = []
for task_log in FAKE_TASK_LOGS:
task_logs.append(copy.deepcopy(task_log))
expected_task_log = copy.deepcopy(task_log)
cells_utils.add_cell_to_task_log(expected_task_log,
cell_name)
expected_response.append(expected_task_log)
response = messaging.Response(cell_name, task_logs, False)
responses.append(response)
return expected_response, responses
def test_task_log_get_all(self):
expected_response, responses = self._build_task_log_responses(3)
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, None,
'fake-name', 'fake-begin',
'fake-end', host=None, state=None).AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end')
self.assertEqual(expected_response, response)
def test_task_log_get_all_with_filters(self):
expected_response, responses = self._build_task_log_responses(1)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
'fake-name', 'fake-begin', 'fake-end', host='fake-host',
state='fake-state').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end',
host=cell_and_host, state='fake-state')
self.assertEqual(expected_response, response)
def test_task_log_get_all_with_cell_but_no_host_filters(self):
expected_response, responses = self._build_task_log_responses(1)
# Host filter only has cell name.
cell_and_host = 'fake-cell'
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
'fake-name', 'fake-begin', 'fake-end', host=None,
state='fake-state').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end',
host=cell_and_host, state='fake-state')
self.assertEqual(expected_response, response)
def test_compute_node_get_all(self):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of computes.
# Manager should turn these into a single list of responses.
for i in xrange(3):
cell_name = 'path!to!cell%i' % i
compute_nodes = []
for compute_node in FAKE_COMPUTE_NODES:
compute_nodes.append(copy.deepcopy(compute_node))
expected_compute_node = copy.deepcopy(compute_node)
cells_utils.add_cell_to_compute_node(expected_compute_node,
cell_name)
expected_response.append(expected_compute_node)
response = messaging.Response(cell_name, compute_nodes, False)
responses.append(response)
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_get_all')
self.msg_runner.compute_node_get_all(self.ctxt,
hypervisor_match='fake-match').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_get_all(self.ctxt,
hypervisor_match='fake-match')
self.assertEqual(expected_response, response)
def test_compute_node_stats(self):
raw_resp1 = {'key1': 1, 'key2': 2}
raw_resp2 = {'key2': 1, 'key3': 2}
raw_resp3 = {'key3': 1, 'key4': 2}
responses = [messaging.Response('cell1', raw_resp1, False),
messaging.Response('cell2', raw_resp2, False),
messaging.Response('cell2', raw_resp3, False)]
expected_resp = {'key1': 1, 'key2': 3, 'key3': 3, 'key4': 2}
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_stats')
self.msg_runner.compute_node_stats(self.ctxt).AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_stats(self.ctxt)
self.assertEqual(expected_resp, response)
def test_compute_node_get(self):
fake_cell = 'fake-cell'
fake_response = messaging.Response(fake_cell,
FAKE_COMPUTE_NODES[0],
False)
expected_response = copy.deepcopy(FAKE_COMPUTE_NODES[0])
cells_utils.add_cell_to_compute_node(expected_response, fake_cell)
cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id')
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_get')
self.msg_runner.compute_node_get(self.ctxt,
'fake-cell', 'fake-id').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_get(self.ctxt,
compute_id=cell_and_id)
self.assertEqual(expected_response, response)
def test_actions_get(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
fake_response = messaging.Response('fake-cell', [fake_act], False)
expected_response = [fake_act]
self.mox.StubOutWithMock(self.msg_runner, 'actions_get')
self.msg_runner.actions_get(self.ctxt, 'fake-cell',
'fake-uuid').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.actions_get(self.ctxt, 'fake-cell',
'fake-uuid')
self.assertEqual(expected_response, response)
def test_action_get_by_request_id(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
fake_response = messaging.Response('fake-cell', fake_act, False)
expected_response = fake_act
self.mox.StubOutWithMock(self.msg_runner, 'action_get_by_request_id')
self.msg_runner.action_get_by_request_id(self.ctxt, 'fake-cell',
'fake-uuid', 'req-fake').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.action_get_by_request_id(self.ctxt,
'fake-cell',
'fake-uuid',
'req-fake')
self.assertEqual(expected_response, response)
def test_action_events_get(self):
fake_action_id = fake_instance_actions.FAKE_ACTION_ID1
fake_events = fake_instance_actions.FAKE_EVENTS[fake_action_id]
fake_response = messaging.Response('fake-cell', fake_events, False)
expected_response = fake_events
self.mox.StubOutWithMock(self.msg_runner, 'action_events_get')
self.msg_runner.action_events_get(self.ctxt, 'fake-cell',
'fake-action').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.action_events_get(self.ctxt, 'fake-cell',
'fake-action')
self.assertEqual(expected_response, response)
def test_consoleauth_delete_tokens(self):
instance_uuid = 'fake-instance-uuid'
self.mox.StubOutWithMock(self.msg_runner,
'consoleauth_delete_tokens')
self.msg_runner.consoleauth_delete_tokens(self.ctxt, instance_uuid)
self.mox.ReplayAll()
self.cells_manager.consoleauth_delete_tokens(self.ctxt,
instance_uuid=instance_uuid)
def test_get_capacities(self):
cell_name = 'cell_name'
response = {"ram_free":
{"units_by_mb": {"64": 20, "128": 10}, "total_mb": 1491}}
self.mox.StubOutWithMock(self.state_manager,
'get_capacities')
self.state_manager.get_capacities(cell_name).AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.get_capacities(self.ctxt, cell_name))
def test_validate_console_port(self):
instance_uuid = 'fake-instance-uuid'
cell_name = 'fake-cell-name'
instance = {'cell_name': cell_name}
console_port = 'fake-console-port'
console_type = 'fake-console-type'
self.mox.StubOutWithMock(self.msg_runner,
'validate_console_port')
self.mox.StubOutWithMock(self.cells_manager.db,
'instance_get_by_uuid')
fake_response = self._get_fake_response()
self.cells_manager.db.instance_get_by_uuid(self.ctxt,
instance_uuid).AndReturn(instance)
self.msg_runner.validate_console_port(self.ctxt, cell_name,
instance_uuid, console_port,
console_type).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.validate_console_port(self.ctxt,
instance_uuid=instance_uuid, console_port=console_port,
console_type=console_type)
self.assertEqual('fake-response', response)
def test_bdm_update_or_create_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'bdm_update_or_create_at_top')
self.msg_runner.bdm_update_or_create_at_top(self.ctxt,
'fake-bdm',
create='foo')
self.mox.ReplayAll()
self.cells_manager.bdm_update_or_create_at_top(self.ctxt,
'fake-bdm',
create='foo')
def test_bdm_destroy_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'bdm_destroy_at_top')
self.msg_runner.bdm_destroy_at_top(self.ctxt,
'fake_instance_uuid',
device_name='fake_device_name',
volume_id='fake_volume_id')
self.mox.ReplayAll()
self.cells_manager.bdm_destroy_at_top(self.ctxt,
'fake_instance_uuid',
device_name='fake_device_name',
volume_id='fake_volume_id')
def test_get_migrations(self):
filters = {'status': 'confirmed'}
cell1_migrations = [{'id': 123}]
cell2_migrations = [{'id': 456}]
fake_responses = [self._get_fake_response(cell1_migrations),
self._get_fake_response(cell2_migrations)]
self.mox.StubOutWithMock(self.msg_runner,
'get_migrations')
self.msg_runner.get_migrations(self.ctxt, None, False, filters).\
AndReturn(fake_responses)
self.mox.ReplayAll()
response = self.cells_manager.get_migrations(self.ctxt, filters)
self.assertEqual([cell1_migrations[0], cell2_migrations[0]], response)
def test_get_migrations_for_a_given_cell(self):
filters = {'status': 'confirmed', 'cell_name': 'ChildCell1'}
target_cell = '%s%s%s' % (CONF.cells.name, '!', filters['cell_name'])
migrations = [{'id': 123}]
fake_responses = [self._get_fake_response(migrations)]
self.mox.StubOutWithMock(self.msg_runner,
'get_migrations')
self.msg_runner.get_migrations(self.ctxt, target_cell, False,
filters).AndReturn(fake_responses)
self.mox.ReplayAll()
response = self.cells_manager.get_migrations(self.ctxt, filters)
self.assertEqual(migrations, response)
def test_instance_update_from_api(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_update_from_api')
self.msg_runner.instance_update_from_api(self.ctxt,
'fake-instance',
'exp_vm', 'exp_task',
'admin_reset')
self.mox.ReplayAll()
self.cells_manager.instance_update_from_api(
self.ctxt, instance='fake-instance',
expected_vm_state='exp_vm',
expected_task_state='exp_task',
admin_state_reset='admin_reset')
def test_start_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'start_instance')
self.msg_runner.start_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.start_instance(self.ctxt, instance='fake-instance')
def test_stop_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'stop_instance')
self.msg_runner.stop_instance(self.ctxt, 'fake-instance',
do_cast='meow')
self.mox.ReplayAll()
self.cells_manager.stop_instance(self.ctxt,
instance='fake-instance',
do_cast='meow')
def test_cell_create(self):
values = 'values'
response = 'created_cell'
self.mox.StubOutWithMock(self.state_manager,
'cell_create')
self.state_manager.cell_create(self.ctxt, values).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_create(self.ctxt, values))
def test_cell_update(self):
cell_name = 'cell_name'
values = 'values'
response = 'updated_cell'
self.mox.StubOutWithMock(self.state_manager,
'cell_update')
self.state_manager.cell_update(self.ctxt, cell_name, values).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_update(self.ctxt, cell_name,
values))
def test_cell_delete(self):
cell_name = 'cell_name'
response = 1
self.mox.StubOutWithMock(self.state_manager,
'cell_delete')
self.state_manager.cell_delete(self.ctxt, cell_name).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_delete(self.ctxt, cell_name))
def test_cell_get(self):
cell_name = 'cell_name'
response = 'cell_info'
self.mox.StubOutWithMock(self.state_manager,
'cell_get')
self.state_manager.cell_get(self.ctxt, cell_name).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_get(self.ctxt, cell_name))
def test_reboot_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'reboot_instance')
self.msg_runner.reboot_instance(self.ctxt, 'fake-instance',
'HARD')
self.mox.ReplayAll()
self.cells_manager.reboot_instance(self.ctxt,
instance='fake-instance',
reboot_type='HARD')
def test_suspend_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'suspend_instance')
self.msg_runner.suspend_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.suspend_instance(self.ctxt,
instance='fake-instance')
def test_resume_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'resume_instance')
self.msg_runner.resume_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.resume_instance(self.ctxt,
instance='fake-instance')
def test_terminate_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'terminate_instance')
self.msg_runner.terminate_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.terminate_instance(self.ctxt,
instance='fake-instance')
def test_soft_delete_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'soft_delete_instance')
self.msg_runner.soft_delete_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.soft_delete_instance(self.ctxt,
instance='fake-instance')
def test_resize_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'resize_instance')
self.msg_runner.resize_instance(self.ctxt, 'fake-instance',
'fake-flavor', 'fake-updates')
self.mox.ReplayAll()
self.cells_manager.resize_instance(
self.ctxt, instance='fake-instance', flavor='fake-flavor',
extra_instance_updates='fake-updates')
def test_live_migrate_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'live_migrate_instance')
self.msg_runner.live_migrate_instance(self.ctxt, 'fake-instance',
'fake-block', 'fake-commit',
'fake-host')
self.mox.ReplayAll()
self.cells_manager.live_migrate_instance(
self.ctxt, instance='fake-instance',
block_migration='fake-block', disk_over_commit='fake-commit',
host_name='fake-host')
def test_revert_resize(self):
self.mox.StubOutWithMock(self.msg_runner, 'revert_resize')
self.msg_runner.revert_resize(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.revert_resize(self.ctxt, instance='fake-instance')
def test_confirm_resize(self):
self.mox.StubOutWithMock(self.msg_runner, 'confirm_resize')
self.msg_runner.confirm_resize(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.confirm_resize(self.ctxt, instance='fake-instance')
def test_reset_network(self):
self.mox.StubOutWithMock(self.msg_runner, 'reset_network')
self.msg_runner.reset_network(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.reset_network(self.ctxt, instance='fake-instance')
def test_inject_network_info(self):
self.mox.StubOutWithMock(self.msg_runner, 'inject_network_info')
self.msg_runner.inject_network_info(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.inject_network_info(self.ctxt,
instance='fake-instance')
def test_snapshot_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'snapshot_instance')
self.msg_runner.snapshot_instance(self.ctxt, 'fake-instance',
'fake-id')
self.mox.ReplayAll()
self.cells_manager.snapshot_instance(self.ctxt,
instance='fake-instance',
image_id='fake-id')
def test_backup_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'backup_instance')
self.msg_runner.backup_instance(self.ctxt, 'fake-instance',
'fake-id', 'backup-type',
'rotation')
self.mox.ReplayAll()
self.cells_manager.backup_instance(self.ctxt,
instance='fake-instance',
image_id='fake-id',
backup_type='backup-type',
rotation='rotation')
| apache-2.0 | -5,645,561,614,519,444,000 | 45.75375 | 79 | 0.571131 | false |
lukovnikov/SME | Family/rTransE.py | 5 | 18397 | '''
Build a tweet sentiment analyzer
'''
from collections import OrderedDict
import cPickle as pkl
import random
import sys
import time
import scipy.io
import numpy
import theano
from theano import config
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from aux_func import *
def L2sim(left, right):
return - tensor.sqrt(tensor.sum(tensor.sqr(left - right), axis=1))
def L2norm(left, right):
return tensor.sum(tensor.sqr(left - right), axis=1)
def margincost(pos, neg, marge=1.0):
out = neg - pos + marge
return tensor.sum(out * (out > 0)), out > 0
def numpy_floatX(data):
return numpy.asarray(data, dtype=config.floatX)
def get_minibatches_idx(n, minibatch_size, shuffle=False):
"""
Used to shuffle the dataset at each iteration.
"""
idx_list = numpy.arange(n, dtype="int32")
if shuffle:
random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if (minibatch_start != n):
# Make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
def zipp(params, tparams):
"""
When we reload the model.
"""
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
def unzip(zipped):
"""
When we pickle the model.
"""
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
def _p(pp, name):
return '%s_%s' % (pp, name)
def init_params(options):
"""
Global (not LSTM) parameter. For the embeding and the classifier.
"""
params = OrderedDict()
# embedding
wb = numpy.sqrt(6. / options['dim_proj'])
Wemb = numpy.random.uniform(low=-wb, high=wb, size=(options['n_words'], options['dim_proj']))
Wemb = Wemb.T / numpy.sqrt(numpy.sum(Wemb ** 2, axis=1))
params['Wemb'] = numpy.asarray(Wemb.T, dtype=config.floatX)
return params
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
raise Warning('%s is not in the archive' % kk)
params[kk] = pp[kk]
return params
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def get_layer(name):
fns = layers[name]
return fns
def rtranse_layer(tparams, state_below, options, prefix='rnn', mask=None):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
assert mask is not None
def _step(m_, x_, h_):
h_t = x_ + h_
h_t = m_[:, None] * h_t + (1. - m_)[:, None] * h_
return h_t
dim_proj = options['dim_proj']
rval, updates = theano.scan(_step,
sequences=[mask, state_below],
outputs_info=tensor.alloc(numpy_floatX(0.),
n_samples,
dim_proj),
name=_p(prefix, '_layers'),
n_steps=nsteps)
return rval
# ff: Feed Forward (normal neural net), only useful to put after lstm
# before the classifier.
layers = {'rtranse':(rtranse_layer)}
def sgd(lr, tparams, grads, xP, xN, mask, yP, yN, x, y, maskAlt, cost, weight):
""" Stochastic Gradient Descent
:note: A more complicated version of sgd then needed. This is
done like that for adadelta and rmsprop.
"""
# New set of shared variable that will contain the gradient
# for a mini-batch.
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
# Function that computes gradients for a mini-batch, but do not
# updates the weights.
f_grad_shared = theano.function([xP, xN, mask, yP, yN, x, y, maskAlt, weight], cost, updates=gsup,
name='sgd_f_grad_shared')
pup = [(p, p - lr * g) for p, g in zip(tparams.values(), gshared)]
# Function that updates the weights from the previously computed
# gradient.
f_update = theano.function([lr], [], updates=pup,
name='sgd_f_update')
return f_grad_shared, f_update
def adadelta(lrateEmb, lrateRNN, tparams, grads, xP, xN, mask, yP, yN, x, y, maskAlt, cost):
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
#They are not updated until the updates parameter is called.
#See http://nbviewer.ipython.org/github/jaberg/IPythonTheanoTutorials/blob/master/ipynb/Theano%20Tutorial%20%28Part%203%20-%20Functions%20and%20Shared%20Variables%29.ipynb
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([xP, xN, mask, yP, yN, x, y, maskAlt], cost, updates=zgup + rg2up,
name='adadelta_f_grad_shared')
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([lrateEmb, lrateRNN], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_f_update')
return f_grad_shared, f_update
def rmsprop(lrateEmb, lrateRNN, tparams, grads, xP, xN, mask, yP, yN, x, y, maskAlt, cost):
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([xP, xN, mask, yP, yN, x, y, maskAlt], cost,
updates=zgup + rgup + rg2up,
name='rmsprop_f_grad_shared')
updir = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_updir' % k)
for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(tparams.values(), updir_new)]
f_update = theano.function([lrateEmb, lrateRNN], [], updates=updir_new + param_up,
on_unused_input='ignore',
name='rmsprop_f_update')
return f_grad_shared, f_update
def build_model(tparams, options):
trng = RandomStreams(1234)
xP = tensor.matrix('xP', dtype='int64')
yP = tensor.vector('yP', dtype='int64')
xN = tensor.matrix('xN', dtype='int64')
yN = tensor.vector('yN', dtype='int64')
mask = tensor.matrix('mask', dtype=config.floatX)
n_timesteps = mask.shape[0]
n_samples = mask.shape[1]
embP = tparams['Wemb'][xP.flatten()].reshape([n_timesteps,
n_samples,
options['dim_proj']])
embN = tparams['Wemb'][xN.flatten()].reshape([n_timesteps,
n_samples,
options['dim_proj']])
outP = tparams['Wemb'][yP.flatten()].reshape([n_samples,
options['dim_proj']])
outN = tparams['Wemb'][yN.flatten()].reshape([n_samples,
options['dim_proj']])
projP = get_layer(options['encoder'])(tparams, embP, options,
prefix=options['encoder'],
mask=mask)
projN = get_layer(options['encoder'])(tparams, embN, options,
prefix=options['encoder'],
mask=mask)
projP = projP[-1,:,:]
projN = projN[-1,:,:]
simi= L2sim(projP, outP)
similn= L2sim(projN, outP)
simirn= L2sim(projP, outN)
coding_err = L2norm(projP, outP)
costl, outl = margincost(simi, similn, options['margin'])
costr, outr = margincost(simi, simirn, options['margin'])
L2norms_sum = 0
for sym in tparams.keys():
L2norms_sum += tensor.sum(tensor.sqr(tparams[sym]))
cost = costl + costr
cost += options['lambda_pen']*(tensor.sum(coding_err)) + options['penalty_norm']*(L2norms_sum)
return xP, xN, mask, yP, yN, cost
def autoencoding_errors(tparams, options, cost):
x = tensor.matrix('x', dtype='int64')
y = tensor.vector('y', dtype='int64')
mask = tensor.matrix('mask', dtype=config.floatX)
weight = tensor.vector('weight', dtype=config.floatX)
n_timesteps = mask.shape[0]
n_samples = mask.shape[1]
emb = tparams['Wemb'][x.flatten()].reshape([n_timesteps,
n_samples,
options['dim_proj']])
out = tparams['Wemb'][y.flatten()].reshape([n_samples,
options['dim_proj']])
proj = get_layer(options['encoder'])(tparams, emb, options,
prefix=options['encoder'],
mask=mask)
proj = proj[-1,:,:]
coding_err = L2norm(proj, out)
cost += tensor.sum(options['alpha']*weight*coding_err)
return x, y, mask, weight, cost
def RankLeftFn(tparams, options):
idxr=tensor.scalar('idxr', dtype='int64')
idxo=tensor.scalar('idxo',dtype='int64')
embL = tparams['Wemb'][tensor.arange(options['n_ent'])].reshape([1,
options['n_ent'],
options['dim_proj']])
embO = tparams['Wemb'][idxo].reshape([1, options['dim_proj']])
embO = tensor.tile(embO, (options['n_ent'],1))[None,:,:]
emb=tensor.concatenate([embL, embO])
out = tparams['Wemb'][idxr].reshape([1, options['dim_proj']])
time_steps = emb.shape[0]
n_samples = emb.shape[1]
mask = tensor.alloc(numpy_floatX(1), time_steps, n_samples)
proj = get_layer(options['encoder'])(tparams, emb, options,
prefix=options['encoder'],
mask=mask)
proj = proj[-1,:,:]
simi= L2sim(proj, out)
return theano.function([idxr, idxo], simi)
def RankRightFn(tparams, options):
idxl=tensor.scalar('idxl', dtype='int64')
idxo=tensor.scalar('idxo',dtype='int64')
embL = tparams['Wemb'][idxl].reshape([1, 1, options['dim_proj']])
embO = tparams['Wemb'][idxo].reshape([1, 1, options['dim_proj']])
emb=tensor.concatenate([embL, embO])
emb=tensor.tile(emb, [1, options['n_ent'], 1])
out = tparams['Wemb'][tensor.arange(options['n_ent'])].reshape([options['n_ent'], options['dim_proj']])
time_steps = emb.shape[0]
n_samples = emb.shape[1]
mask = tensor.alloc(numpy_floatX(1), time_steps, n_samples)
proj = get_layer(options['encoder'])(tparams, emb, options,
prefix=options['encoder'],
mask=mask)
proj = proj[-1,:,:]
simi= L2sim(proj, out)
return theano.function([idxl, idxo], simi)
def train_lstm(
dim_proj=20, # word embeding dimension
max_epochs=201, # The maximum number of epoch to run
lrate=0.01, # Learning rate for sgd (not used for adadelta and rmsprop)
margin=0.5,
lambda_pen = 0,
penalty_norm=0,
alpha=0,
optimizer=sgd, # sgd, adadelta and rmsprop
encoder='rtranse',
validFreq=10, # Compute the validation error after this number of epochs.
dataset='SN',
datapath='datapath/',
savepath= '',
# Parameter for extra option
reload_model=''
):
numpy.random.seed(1234)
random.seed(1234)
Nsyn=728
Nrel=7
Nent=Nsyn-Nrel
batch_size=200
# Model options
model_options = locals().copy()
model_options['n_words'] = Nsyn
model_options['n_ent'] = Nent
print "model options", model_options
print 'Loading data'
trainl = convert2idx(load_file(datapath + dataset + '-train-lhs.pkl'))
trainr = convert2idx(load_file(datapath + dataset + '-train-rhs.pkl'))
traino = convert2idx(load_file(datapath + dataset + '-train-rel.pkl'))
train_lex, labelsTrain = buildTriplesForward(trainl,trainr,traino)
validl = convert2idx(load_file(datapath + dataset + '-valid-lhs.pkl'))
validr = convert2idx(load_file(datapath + dataset + '-valid-rhs.pkl'))
valido = convert2idx(load_file(datapath + dataset + '-valid-rel.pkl'))
testl = convert2idx(load_file(datapath + dataset + '-test-lhs.pkl'))
testr = convert2idx(load_file(datapath + dataset + '-test-rhs.pkl'))
o = convert2idx(load_file(datapath + dataset + '-test-rel.pkl')[-Nrel:, :])
testo = convert2idx(load_file(datapath + dataset + '-test-rel.pkl'))
alt_paths=cPickle.load(open(datapath+'alt_paths.pkl'))
altrel2idx=cPickle.load(open(datapath+'altrel2idx.pkl'))
alphas=numpy.asarray(cPickle.load(open(datapath+'alphas.pkl')))
true_triples=numpy.concatenate([testl,validl,trainl,testo,valido,traino,testr,validr,trainr]).reshape(3,testl.shape[0]+validl.shape[0]+trainl.shape[0]).T
print 'Building model'
params = init_params(model_options)
tparams = init_tparams(params)
(xP, xN, mask, yP, yN, cost) = build_model(tparams, model_options)
(x, y, maskAlt, weight, costTotal) = autoencoding_errors(tparams, model_options, cost)
f_cost = theano.function([xP, xN, mask, yP, yN, x, y, weight, maskAlt], costTotal, name='f_cost')
grads = tensor.grad(costTotal, wrt=tparams.values())
f_grad = theano.function([xP, xN, mask, yP, yN, x, y, weight, maskAlt], grads, name='f_grad')
lr = tensor.scalar(name='lr')
f_grad_shared, f_update = optimizer(lr, tparams, grads,
xP, xN, mask, yP, yN, x, y, maskAlt, costTotal, weight)
ranklfunc=RankLeftFn(tparams, model_options)
rankrfunc=RankRightFn(tparams, model_options)
print 'Optimization'
best_MR=numpy.inf
try:
for eidx in xrange(max_epochs):
print "Epoch %s"%(eidx)
trainln = create_random_arr(len(train_lex), Nent)
trainrn = create_random_arr(len(train_lex), Nent)
train_lex_neg, labelsTrain_neg = buildTriplesNeg(trainln,trainrn,traino)
# Get new shuffled index for the training set.
kf = get_minibatches_idx(len(train_lex), batch_size, shuffle=True)
for _, train_index in kf:
# Select the random examples for this minibatch
x = [train_lex[t]for t in train_index]
y = [labelsTrain[t]for t in train_index]
xP, mask, yP = prepare_data(x, y)
x = [train_lex_neg[t]for t in train_index]
y = [labelsTrain_neg[t]for t in train_index]
xN, mask, yN = prepare_data(x, y)
x, mask2hop, list_idx=build_matrices(alt_paths, xP[0,:], xP[1,:], numpy.asarray(yP), altrel2idx)
x2hop = x[:-1,:]
y2hop = list(x[-1,:])
costTT = f_grad_shared(xP, xN, mask, yP, yN, x2hop, y2hop, mask2hop, alphas[list_idx])
f_update(lrate)
if numpy.mod(eidx, validFreq) == 0:
#VALIDATION PERFORMANCE
resvalid = FilteredRankingScoreIdx(ranklfunc, rankrfunc, validl, validr, valido, true_triples)
MR = numpy.mean(resvalid[0]+resvalid[1])
if MR < best_MR:
best_MR=MR
#TEST PERFORMANCE
restest = FilteredRankingScoreIdx(ranklfunc, rankrfunc, testl, testr, testo, true_triples)
test_MR = numpy.mean(restest[0]+restest[1])
test_HITS5 = numpy.mean(numpy.asarray(restest[0] + restest[1]) <= 5) * 100
#saveto=''
#params = unzip(tparams)
#numpy.savez(saveto, **params)
except KeyboardInterrupt:
print "Training interupted"
print "TEST MR: %s"%(test_MR)
print "TEST HITS@5: %s"%(test_HITS5)
if __name__ == '__main__':
# See function train for all possible parameter and there definition.
train_lstm()
| bsd-3-clause | 6,000,488,881,538,462,000 | 36.090726 | 175 | 0.550361 | false |
yanheven/neutron | neutron/tests/unit/dummy_plugin.py | 9 | 4446 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from neutron.api import extensions
from neutron.api.v2 import base
from neutron.common import exceptions
from neutron.db import servicetype_db
from neutron.extensions import servicetype
from neutron import manager
from neutron.plugins.common import constants
from neutron.services import service_base
RESOURCE_NAME = "dummy"
COLLECTION_NAME = "%ss" % RESOURCE_NAME
# Attribute Map for dummy resource
RESOURCE_ATTRIBUTE_MAP = {
COLLECTION_NAME: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'is_visible': True},
'service_type': {'allow_post': True,
'allow_put': False,
'validate': {'type:servicetype_ref': None},
'is_visible': True,
'default': None}
}
}
class Dummy(object):
@classmethod
def get_name(cls):
return "dummy"
@classmethod
def get_alias(cls):
return "dummy"
@classmethod
def get_description(cls):
return "Dummy stuff"
@classmethod
def get_updated(cls):
return "2012-11-20T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Extended Resource for dummy management."""
n_mgr = manager.NeutronManager.get_instance()
dummy_inst = n_mgr.get_service_plugins()['DUMMY']
controller = base.create_resource(
COLLECTION_NAME, RESOURCE_NAME, dummy_inst,
RESOURCE_ATTRIBUTE_MAP[COLLECTION_NAME])
return [extensions.ResourceExtension(COLLECTION_NAME,
controller)]
class DummyServicePlugin(service_base.ServicePluginBase):
"""This is a simple plugin for managing instances of a fictional 'dummy'
service. This plugin is provided as a proof-of-concept of how
advanced service might leverage the service type extension.
Ideally, instances of real advanced services, such as load balancing
or VPN will adopt a similar solution.
"""
supported_extension_aliases = ['dummy', servicetype.EXT_ALIAS]
path_prefix = "/dummy_svc"
agent_notifiers = {'dummy': 'dummy_agent_notifier'}
def __init__(self):
self.svctype_mgr = servicetype_db.ServiceTypeManager.get_instance()
self.dummys = {}
def get_plugin_type(self):
return constants.DUMMY
def get_plugin_description(self):
return "Neutron Dummy Service Plugin"
def get_dummys(self, context, filters, fields):
return self.dummys.values()
def get_dummy(self, context, id, fields):
try:
return self.dummys[id]
except KeyError:
raise exceptions.NotFound()
def create_dummy(self, context, dummy):
d = dummy['dummy']
d['id'] = uuidutils.generate_uuid()
self.dummys[d['id']] = d
self.svctype_mgr.increase_service_type_refcount(context,
d['service_type'])
return d
def update_dummy(self, context, id, dummy):
pass
def delete_dummy(self, context, id):
try:
svc_type_id = self.dummys[id]['service_type']
del self.dummys[id]
self.svctype_mgr.decrease_service_type_refcount(context,
svc_type_id)
except KeyError:
raise exceptions.NotFound()
| apache-2.0 | -7,171,095,521,552,108,000 | 32.938931 | 78 | 0.607737 | false |
Em-Pan/swift | swift/proxy/controllers/info.py | 39 | 3768 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import time
from swift.common.utils import public, get_hmac, get_swift_info, json, \
streq_const_time
from swift.proxy.controllers.base import Controller, delay_denial
from swift.common.swob import HTTPOk, HTTPForbidden, HTTPUnauthorized
class InfoController(Controller):
"""WSGI controller for info requests"""
server_type = 'Info'
def __init__(self, app, version, expose_info, disallowed_sections,
admin_key):
Controller.__init__(self, app)
self.expose_info = expose_info
self.disallowed_sections = disallowed_sections
self.admin_key = admin_key
self.allowed_hmac_methods = {
'HEAD': ['HEAD', 'GET'],
'GET': ['GET']}
@public
@delay_denial
def GET(self, req):
return self.GETorHEAD(req)
@public
@delay_denial
def HEAD(self, req):
return self.GETorHEAD(req)
@public
@delay_denial
def OPTIONS(self, req):
return HTTPOk(request=req, headers={'Allow': 'HEAD, GET, OPTIONS'})
def GETorHEAD(self, req):
"""Handler for HTTP GET/HEAD requests."""
"""
Handles requests to /info
Should return a WSGI-style callable (such as swob.Response).
:param req: swob.Request object
"""
if not self.expose_info:
return HTTPForbidden(request=req)
admin_request = False
sig = req.params.get('swiftinfo_sig', '')
expires = req.params.get('swiftinfo_expires', '')
if sig != '' or expires != '':
admin_request = True
if not self.admin_key:
return HTTPForbidden(request=req)
try:
expires = int(expires)
except ValueError:
return HTTPUnauthorized(request=req)
if expires < time():
return HTTPUnauthorized(request=req)
valid_sigs = []
for method in self.allowed_hmac_methods[req.method]:
valid_sigs.append(get_hmac(method,
'/info',
expires,
self.admin_key))
# While it's true that any() will short-circuit, this doesn't
# affect the timing-attack resistance since the only way this will
# short-circuit is when a valid signature is passed in.
is_valid_hmac = any(streq_const_time(valid_sig, sig)
for valid_sig in valid_sigs)
if not is_valid_hmac:
return HTTPUnauthorized(request=req)
headers = {}
if 'Origin' in req.headers:
headers['Access-Control-Allow-Origin'] = req.headers['Origin']
headers['Access-Control-Expose-Headers'] = ', '.join(
['x-trans-id'])
info = json.dumps(get_swift_info(
admin=admin_request, disallowed_sections=self.disallowed_sections))
return HTTPOk(request=req,
headers=headers,
body=info,
content_type='application/json; charset=UTF-8')
| apache-2.0 | 7,559,925,865,017,092,000 | 34.54717 | 79 | 0.586253 | false |
pbougue/navitia | source/jormungandr/jormungandr/street_network/utils.py | 1 | 2284 | # encoding: utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import math
N_DEG_TO_RAD = 0.01745329238
EARTH_RADIUS_IN_METERS = 6372797.560856
def crowfly_distance_between(start_coord, end_coord):
lon_arc = (start_coord.lon - end_coord.lon) * N_DEG_TO_RAD
lon_h = math.sin(lon_arc * 0.5)
lon_h *= lon_h
lat_arc = (start_coord.lat - end_coord.lat) * N_DEG_TO_RAD
lat_h = math.sin(lat_arc * 0.5)
lat_h *= lat_h
tmp = math.cos(start_coord.lat * N_DEG_TO_RAD) * math.cos(end_coord.lat * N_DEG_TO_RAD)
return EARTH_RADIUS_IN_METERS * 2.0 * math.asin(math.sqrt(lat_h + tmp * lon_h))
def make_speed_switcher(req):
from jormungandr.fallback_modes import FallbackModes
return {
FallbackModes.walking.name: req['walking_speed'],
FallbackModes.bike.name: req['bike_speed'],
FallbackModes.car.name: req['car_speed'],
FallbackModes.bss.name: req['bss_speed'],
FallbackModes.ridesharing.name: req['car_no_park_speed'],
FallbackModes.taxi.name: req['taxi_speed'],
}
| agpl-3.0 | -8,004,988,770,288,755,000 | 37.066667 | 91 | 0.707531 | false |
sestrella/ansible | lib/ansible/module_utils/network/junos/facts/interfaces/interfaces.py | 19 | 4091 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The junos interfaces fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from copy import deepcopy
from ansible.module_utils._text import to_bytes
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.junos.argspec.interfaces.interfaces import InterfacesArgs
from ansible.module_utils.network.junos.utils.utils import get_resource_config
from ansible.module_utils.six import string_types
try:
from lxml import etree
HAS_LXML = True
except ImportError:
HAS_LXML = False
class InterfacesFacts(object):
""" The junos interfaces fact class
"""
def __init__(self, module, subspec='config', options='options'):
self._module = module
self.argument_spec = InterfacesArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for interfaces
:param connection: the device connection
:param data: previously collected configuration as lxml ElementTree root instance
or valid xml sting
:rtype: dictionary
:returns: facts
"""
if not HAS_LXML:
self._module.fail_json(msg='lxml is not installed.')
if not data:
config_filter = """
<configuration>
<interfaces/>
</configuration>
"""
data = get_resource_config(connection, config_filter=config_filter)
if isinstance(data, string_types):
data = etree.fromstring(to_bytes(data, errors='surrogate_then_replace'))
resources = data.xpath('configuration/interfaces/interface')
objs = []
for resource in resources:
if resource is not None:
obj = self.render_config(self.generated_spec, resource)
if obj:
objs.append(obj)
facts = {}
if objs:
facts['interfaces'] = []
params = utils.validate_config(self.argument_spec, {'config': objs})
for cfg in params['config']:
facts['interfaces'].append(utils.remove_empties(cfg))
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
def render_config(self, spec, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The ElementTree instance of configuration object
:rtype: dictionary
:returns: The generated config
"""
config = deepcopy(spec)
config['name'] = utils.get_xml_conf_arg(conf, 'name')
config['description'] = utils.get_xml_conf_arg(conf, 'description')
mtu = utils.get_xml_conf_arg(conf, 'mtu')
config['mtu'] = int(mtu) if mtu else None
config['speed'] = utils.get_xml_conf_arg(conf, 'speed')
config['duplex'] = utils.get_xml_conf_arg(conf, 'link-mode')
config['hold_time']['down'] = utils.get_xml_conf_arg(conf, 'hold-time/down')
config['hold_time']['up'] = utils.get_xml_conf_arg(conf, 'hold-time/up')
disable = utils.get_xml_conf_arg(conf, 'disable', data='tag')
if disable:
config['enabled'] = False
else:
config['enabled'] = True
return utils.remove_empties(config)
| gpl-3.0 | 2,844,295,097,034,292,700 | 35.855856 | 91 | 0.620631 | false |
qutebrowser/qutebrowser | qutebrowser/utils/log.py | 1 | 28519 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Loggers and utilities related to logging."""
import os
import sys
import html as pyhtml
import logging
import contextlib
import collections
import copy
import faulthandler
import traceback
import warnings
import json
import inspect
import argparse
from typing import (TYPE_CHECKING, Any, Iterator, Mapping, MutableSequence,
Optional, Set, Tuple, Union)
from PyQt5 import QtCore
# Optional imports
try:
import colorama
except ImportError:
colorama = None
if TYPE_CHECKING:
from qutebrowser.config import config as configmodule
_log_inited = False
_args = None
COLORS = ['black', 'red', 'green', 'yellow', 'blue', 'purple', 'cyan', 'white']
COLOR_ESCAPES = {color: '\033[{}m'.format(i)
for i, color in enumerate(COLORS, start=30)}
RESET_ESCAPE = '\033[0m'
# Log formats to use.
SIMPLE_FMT = ('{green}{asctime:8}{reset} {log_color}{levelname}{reset}: '
'{message}')
EXTENDED_FMT = ('{green}{asctime:8}{reset} '
'{log_color}{levelname:8}{reset} '
'{cyan}{name:10} {module}:{funcName}:{lineno}{reset} '
'{log_color}{message}{reset}')
EXTENDED_FMT_HTML = (
'<tr>'
'<td><pre>%(green)s%(asctime)-8s%(reset)s</pre></td>'
'<td><pre>%(log_color)s%(levelname)-8s%(reset)s</pre></td>'
'<td></pre>%(cyan)s%(name)-10s</pre></td>'
'<td><pre>%(cyan)s%(module)s:%(funcName)s:%(lineno)s%(reset)s</pre></td>'
'<td><pre>%(log_color)s%(message)s%(reset)s</pre></td>'
'</tr>'
)
DATEFMT = '%H:%M:%S'
LOG_COLORS = {
'VDEBUG': 'white',
'DEBUG': 'white',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
# We first monkey-patch logging to support our VDEBUG level before getting the
# loggers. Based on https://stackoverflow.com/a/13638084
# mypy doesn't know about this, so we need to ignore it.
VDEBUG_LEVEL = 9
logging.addLevelName(VDEBUG_LEVEL, 'VDEBUG')
logging.VDEBUG = VDEBUG_LEVEL # type: ignore[attr-defined]
LOG_LEVELS = {
'VDEBUG': logging.VDEBUG, # type: ignore[attr-defined]
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL,
}
def vdebug(self: logging.Logger,
msg: str,
*args: Any,
**kwargs: Any) -> None:
"""Log with a VDEBUG level.
VDEBUG is used when a debug message is rather verbose, and probably of
little use to the end user or for post-mortem debugging, i.e. the content
probably won't change unless the code changes.
"""
if self.isEnabledFor(VDEBUG_LEVEL):
# pylint: disable=protected-access
self._log(VDEBUG_LEVEL, msg, args, **kwargs)
# pylint: enable=protected-access
logging.Logger.vdebug = vdebug # type: ignore[attr-defined]
# The different loggers used.
statusbar = logging.getLogger('statusbar')
completion = logging.getLogger('completion')
destroy = logging.getLogger('destroy')
modes = logging.getLogger('modes')
webview = logging.getLogger('webview')
mouse = logging.getLogger('mouse')
misc = logging.getLogger('misc')
url = logging.getLogger('url')
procs = logging.getLogger('procs')
commands = logging.getLogger('commands')
init = logging.getLogger('init')
signals = logging.getLogger('signals')
hints = logging.getLogger('hints')
keyboard = logging.getLogger('keyboard')
downloads = logging.getLogger('downloads')
js = logging.getLogger('js') # Javascript console messages
qt = logging.getLogger('qt') # Warnings produced by Qt
ipc = logging.getLogger('ipc')
shlexer = logging.getLogger('shlexer')
save = logging.getLogger('save')
message = logging.getLogger('message')
config = logging.getLogger('config')
sessions = logging.getLogger('sessions')
webelem = logging.getLogger('webelem')
prompt = logging.getLogger('prompt')
network = logging.getLogger('network')
sql = logging.getLogger('sql')
greasemonkey = logging.getLogger('greasemonkey')
extensions = logging.getLogger('extensions')
LOGGER_NAMES = [
'statusbar', 'completion', 'init', 'url',
'destroy', 'modes', 'webview', 'misc',
'mouse', 'procs', 'hints', 'keyboard',
'commands', 'signals', 'downloads',
'js', 'qt', 'ipc', 'shlexer',
'save', 'message', 'config', 'sessions',
'webelem', 'prompt', 'network', 'sql',
'greasemonkey', 'extensions',
]
ram_handler: Optional['RAMHandler'] = None
console_handler: Optional[logging.Handler] = None
console_filter = None
def stub(suffix: str = '') -> None:
"""Show a STUB: message for the calling function."""
try:
function = inspect.stack()[1][3]
except IndexError: # pragma: no cover
misc.exception("Failed to get stack")
function = '<unknown>'
text = "STUB: {}".format(function)
if suffix:
text = '{} ({})'.format(text, suffix)
misc.warning(text)
def init_log(args: argparse.Namespace) -> None:
"""Init loggers based on the argparse namespace passed."""
level = (args.loglevel or "info").upper()
try:
numeric_level = getattr(logging, level)
except AttributeError:
raise ValueError("Invalid log level: {}".format(args.loglevel))
if numeric_level > logging.DEBUG and args.debug:
numeric_level = logging.DEBUG
console, ram = _init_handlers(numeric_level, args.color, args.force_color,
args.json_logging, args.loglines)
root = logging.getLogger()
global console_filter
if console is not None:
console_filter = LogFilter.parse(args.logfilter)
console.addFilter(console_filter)
root.addHandler(console)
if ram is not None:
root.addHandler(ram)
else:
# If we add no handler, we shouldn't process non visible logs at all
#
# disable blocks the current level (while setHandler shows the current
# level), so -1 to avoid blocking handled messages.
logging.disable(numeric_level - 1)
global _log_inited, _args
_args = args
root.setLevel(logging.NOTSET)
logging.captureWarnings(True)
_init_py_warnings()
QtCore.qInstallMessageHandler(qt_message_handler)
_log_inited = True
@QtCore.pyqtSlot()
def shutdown_log() -> None:
QtCore.qInstallMessageHandler(None)
def _init_py_warnings() -> None:
"""Initialize Python warning handling."""
assert _args is not None
warnings.simplefilter('error' if 'werror' in _args.debug_flags
else 'default')
warnings.filterwarnings('ignore', module='pdb', category=ResourceWarning)
# This happens in many qutebrowser dependencies...
warnings.filterwarnings('ignore', category=DeprecationWarning,
message=r"Using or importing the ABCs from "
r"'collections' instead of from 'collections.abc' "
r"is deprecated.*")
@contextlib.contextmanager
def disable_qt_msghandler() -> Iterator[None]:
"""Contextmanager which temporarily disables the Qt message handler."""
old_handler = QtCore.qInstallMessageHandler(None)
try:
yield
finally:
QtCore.qInstallMessageHandler(old_handler)
@contextlib.contextmanager
def py_warning_filter(action: str = 'ignore', **kwargs: Any) -> Iterator[None]:
"""Contextmanager to temporarily disable certain Python warnings."""
warnings.filterwarnings(action, **kwargs)
yield
if _log_inited:
_init_py_warnings()
def _init_handlers(
level: int,
color: bool,
force_color: bool,
json_logging: bool,
ram_capacity: int
) -> Tuple[logging.StreamHandler, Optional['RAMHandler']]:
"""Init log handlers.
Args:
level: The numeric logging level.
color: Whether to use color if available.
force_color: Force colored output.
json_logging: Output log lines in JSON (this disables all colors).
"""
global ram_handler
global console_handler
console_fmt, ram_fmt, html_fmt, use_colorama = _init_formatters(
level, color, force_color, json_logging)
if sys.stderr is None:
console_handler = None # type: ignore[unreachable]
else:
strip = False if force_color else None
if use_colorama:
stream = colorama.AnsiToWin32(sys.stderr, strip=strip)
else:
stream = sys.stderr
console_handler = logging.StreamHandler(stream)
console_handler.setLevel(level)
console_handler.setFormatter(console_fmt)
if ram_capacity == 0:
ram_handler = None
else:
ram_handler = RAMHandler(capacity=ram_capacity)
ram_handler.setLevel(logging.DEBUG)
ram_handler.setFormatter(ram_fmt)
ram_handler.html_formatter = html_fmt
return console_handler, ram_handler
def get_console_format(level: int) -> str:
"""Get the log format the console logger should use.
Args:
level: The numeric logging level.
Return:
Format of the requested level.
"""
return EXTENDED_FMT if level <= logging.DEBUG else SIMPLE_FMT
def _init_formatters(
level: int,
color: bool,
force_color: bool,
json_logging: bool
) -> Tuple[Union['JSONFormatter', 'ColoredFormatter'],
'ColoredFormatter', 'HTMLFormatter', bool]:
"""Init log formatters.
Args:
level: The numeric logging level.
color: Whether to use color if available.
force_color: Force colored output.
json_logging: Format lines as JSON (disables all color).
Return:
A (console_formatter, ram_formatter, use_colorama) tuple.
console_formatter/ram_formatter: logging.Formatter instances.
use_colorama: Whether to use colorama.
"""
console_fmt = get_console_format(level)
ram_formatter = ColoredFormatter(EXTENDED_FMT, DATEFMT, '{',
use_colors=False)
html_formatter = HTMLFormatter(EXTENDED_FMT_HTML, DATEFMT,
log_colors=LOG_COLORS)
use_colorama = False
if sys.stderr is None:
console_formatter = None # type: ignore[unreachable]
return console_formatter, ram_formatter, html_formatter, use_colorama
if json_logging:
json_formatter = JSONFormatter()
return json_formatter, ram_formatter, html_formatter, use_colorama
color_supported = os.name == 'posix' or colorama
if color_supported and (sys.stderr.isatty() or force_color) and color:
use_colors = True
if colorama and os.name != 'posix':
use_colorama = True
else:
use_colors = False
console_formatter = ColoredFormatter(console_fmt, DATEFMT, '{',
use_colors=use_colors)
return console_formatter, ram_formatter, html_formatter, use_colorama
def change_console_formatter(level: int) -> None:
"""Change console formatter based on level.
Args:
level: The numeric logging level
"""
assert console_handler is not None
old_formatter = console_handler.formatter
if isinstance(old_formatter, ColoredFormatter):
console_fmt = get_console_format(level)
console_formatter = ColoredFormatter(
console_fmt, DATEFMT, '{', use_colors=old_formatter.use_colors)
console_handler.setFormatter(console_formatter)
else:
# Same format for all levels
assert isinstance(old_formatter, JSONFormatter), old_formatter
def qt_message_handler(msg_type: QtCore.QtMsgType,
context: QtCore.QMessageLogContext,
msg: str) -> None:
"""Qt message handler to redirect qWarning etc. to the logging system.
Args:
QtMsgType msg_type: The level of the message.
QMessageLogContext context: The source code location of the message.
msg: The message text.
"""
# Mapping from Qt logging levels to the matching logging module levels.
# Note we map critical to ERROR as it's actually "just" an error, and fatal
# to critical.
qt_to_logging = {
QtCore.QtDebugMsg: logging.DEBUG,
QtCore.QtWarningMsg: logging.WARNING,
QtCore.QtCriticalMsg: logging.ERROR,
QtCore.QtFatalMsg: logging.CRITICAL,
}
try:
qt_to_logging[QtCore.QtInfoMsg] = logging.INFO
except AttributeError:
# Added in Qt 5.5.
# While we don't support Qt < 5.5 anymore, logging still needs to work so that
# the Qt version warning in earlyinit.py does.
pass
# Change levels of some well-known messages to debug so they don't get
# shown to the user.
#
# If a message starts with any text in suppressed_msgs, it's not logged as
# error.
suppressed_msgs = [
# PNGs in Qt with broken color profile
# https://bugreports.qt.io/browse/QTBUG-39788
('libpng warning: iCCP: Not recognizing known sRGB profile that has '
'been edited'),
'libpng warning: iCCP: known incorrect sRGB profile',
# Hopefully harmless warning
'OpenType support missing for script ',
# Error if a QNetworkReply gets two different errors set. Harmless Qt
# bug on some pages.
# https://bugreports.qt.io/browse/QTBUG-30298
('QNetworkReplyImplPrivate::error: Internal problem, this method must '
'only be called once.'),
# Sometimes indicates missing text, but most of the time harmless
'load glyph failed ',
# Harmless, see https://bugreports.qt.io/browse/QTBUG-42479
('content-type missing in HTTP POST, defaulting to '
'application/x-www-form-urlencoded. '
'Use QNetworkRequest::setHeader() to fix this problem.'),
# https://bugreports.qt.io/browse/QTBUG-43118
'Using blocking call!',
# Hopefully harmless
('"Method "GetAll" with signature "s" on interface '
'"org.freedesktop.DBus.Properties" doesn\'t exist'),
('"Method \\"GetAll\\" with signature \\"s\\" on interface '
'\\"org.freedesktop.DBus.Properties\\" doesn\'t exist\\n"'),
'WOFF support requires QtWebKit to be built with zlib support.',
# Weird Enlightment/GTK X extensions
'QXcbWindow: Unhandled client message: "_E_',
'QXcbWindow: Unhandled client message: "_ECORE_',
'QXcbWindow: Unhandled client message: "_GTK_',
# Happens on AppVeyor CI
'SetProcessDpiAwareness failed:',
# https://bugreports.qt.io/browse/QTBUG-49174
('QObject::connect: Cannot connect (null)::stateChanged('
'QNetworkSession::State) to '
'QNetworkReplyHttpImpl::_q_networkSessionStateChanged('
'QNetworkSession::State)'),
# https://bugreports.qt.io/browse/QTBUG-53989
("Image of format '' blocked because it is not considered safe. If "
"you are sure it is safe to do so, you can white-list the format by "
"setting the environment variable QTWEBKIT_IMAGEFORMAT_WHITELIST="),
# Installing Qt from the installer may cause it looking for SSL3 or
# OpenSSL 1.0 which may not be available on the system
"QSslSocket: cannot resolve ",
"QSslSocket: cannot call unresolved function ",
# When enabling debugging with QtWebEngine
("Remote debugging server started successfully. Try pointing a "
"Chromium-based browser to "),
# https://github.com/qutebrowser/qutebrowser/issues/1287
"QXcbClipboard: SelectionRequest too old",
# https://github.com/qutebrowser/qutebrowser/issues/2071
'QXcbWindow: Unhandled client message: ""',
# https://codereview.qt-project.org/176831
"QObject::disconnect: Unexpected null parameter",
# https://bugreports.qt.io/browse/QTBUG-76391
"Attribute Qt::AA_ShareOpenGLContexts must be set before "
"QCoreApplication is created.",
]
# not using utils.is_mac here, because we can't be sure we can successfully
# import the utils module here.
if sys.platform == 'darwin':
suppressed_msgs += [
# https://bugreports.qt.io/browse/QTBUG-47154
('virtual void QSslSocketBackendPrivate::transmit() SSLRead '
'failed with: -9805'),
]
if not msg:
msg = "Logged empty message!"
if any(msg.strip().startswith(pattern) for pattern in suppressed_msgs):
level = logging.DEBUG
else:
level = qt_to_logging[msg_type]
if context.line is None:
lineno = -1 # type: ignore[unreachable]
else:
lineno = context.line
if context.function is None:
func = 'none' # type: ignore[unreachable]
elif ':' in context.function:
func = '"{}"'.format(context.function)
else:
func = context.function
if context.category is None or context.category == 'default':
name = 'qt'
else:
name = 'qt-' + context.category
if msg.splitlines()[0] == ('This application failed to start because it '
'could not find or load the Qt platform plugin '
'"xcb".'):
# Handle this message specially.
msg += ("\n\nOn Archlinux, this should fix the problem:\n"
" pacman -S libxkbcommon-x11")
faulthandler.disable()
assert _args is not None
if _args.debug:
stack: Optional[str] = ''.join(traceback.format_stack())
else:
stack = None
record = qt.makeRecord(name=name, level=level, fn=context.file, lno=lineno,
msg=msg, args=(), exc_info=None, func=func,
sinfo=stack)
qt.handle(record)
@contextlib.contextmanager
def hide_qt_warning(pattern: str, logger: str = 'qt') -> Iterator[None]:
"""Hide Qt warnings matching the given regex."""
log_filter = QtWarningFilter(pattern)
logger_obj = logging.getLogger(logger)
logger_obj.addFilter(log_filter)
try:
yield
finally:
logger_obj.removeFilter(log_filter)
def init_from_config(conf: 'configmodule.ConfigContainer') -> None:
"""Initialize logging settings from the config.
init_log is called before the config module is initialized, so config-based
initialization cannot be performed there.
Args:
conf: The global ConfigContainer.
This is passed rather than accessed via the module to avoid a
cyclic import.
"""
assert _args is not None
if _args.debug:
init.debug("--debug flag overrides log configs")
return
if ram_handler:
ramlevel = conf.logging.level.ram
init.debug("Configuring RAM loglevel to %s", ramlevel)
ram_handler.setLevel(LOG_LEVELS[ramlevel.upper()])
if console_handler:
consolelevel = conf.logging.level.console
if _args.loglevel:
init.debug("--loglevel flag overrides logging.level.console")
else:
init.debug("Configuring console loglevel to %s", consolelevel)
level = LOG_LEVELS[consolelevel.upper()]
console_handler.setLevel(level)
change_console_formatter(level)
class QtWarningFilter(logging.Filter):
"""Filter to filter Qt warnings.
Attributes:
_pattern: The start of the message.
"""
def __init__(self, pattern: str) -> None:
super().__init__()
self._pattern = pattern
def filter(self, record: logging.LogRecord) -> bool:
"""Determine if the specified record is to be logged."""
do_log = not record.msg.strip().startswith(self._pattern)
return do_log
class InvalidLogFilterError(Exception):
"""Raised when an invalid filter string is passed to LogFilter.parse()."""
def __init__(self, names: Set[str]):
invalid = names - set(LOGGER_NAMES)
super().__init__("Invalid log category {} - valid categories: {}"
.format(', '.join(sorted(invalid)),
', '.join(LOGGER_NAMES)))
class LogFilter(logging.Filter):
"""Filter to filter log records based on the commandline argument.
The default Filter only supports one name to show - we support a
comma-separated list instead.
Attributes:
names: A set of logging names to allow.
negated: Whether names is a set of names to log or to suppress.
only_debug: Only filter debug logs, always show anything more important
than debug.
"""
def __init__(self, names: Set[str], *, negated: bool = False,
only_debug: bool = True) -> None:
super().__init__()
self.names = names
self.negated = negated
self.only_debug = only_debug
@classmethod
def parse(cls, filter_str: Optional[str], *,
only_debug: bool = True) -> 'LogFilter':
"""Parse a log filter from a string."""
if filter_str is None or filter_str == 'none':
names = set()
negated = False
else:
filter_str = filter_str.lower()
if filter_str.startswith('!'):
negated = True
filter_str = filter_str[1:]
else:
negated = False
names = {e.strip() for e in filter_str.split(',')}
if not names.issubset(LOGGER_NAMES):
raise InvalidLogFilterError(names)
return cls(names=names, negated=negated, only_debug=only_debug)
def update_from(self, other: 'LogFilter') -> None:
"""Update this filter's properties from another filter."""
self.names = other.names
self.negated = other.negated
self.only_debug = other.only_debug
def filter(self, record: logging.LogRecord) -> bool:
"""Determine if the specified record is to be logged."""
if not self.names:
# No filter
return True
elif record.levelno > logging.DEBUG and self.only_debug:
# More important than DEBUG, so we won't filter at all
return True
elif record.name.split('.')[0] in self.names:
return not self.negated
return self.negated
class RAMHandler(logging.Handler):
"""Logging handler which keeps the messages in a deque in RAM.
Loosely based on logging.BufferingHandler which is unsuitable because it
uses a simple list rather than a deque.
Attributes:
_data: A deque containing the logging records.
"""
def __init__(self, capacity: int) -> None:
super().__init__()
self.html_formatter: Optional[HTMLFormatter] = None
if capacity != -1:
self._data: MutableSequence[logging.LogRecord] = collections.deque(
maxlen=capacity
)
else:
self._data = collections.deque()
def emit(self, record: logging.LogRecord) -> None:
self._data.append(record)
def dump_log(self, html: bool = False, level: str = 'vdebug',
logfilter: LogFilter = None) -> str:
"""Dump the complete formatted log data as string.
FIXME: We should do all the HTML formatting via jinja2.
(probably obsolete when moving to a widget for logging,
https://github.com/qutebrowser/qutebrowser/issues/34
Args:
html: Produce HTML rather than plaintext output.
level: The minimal loglevel to show.
logfilter: A LogFilter instance used to filter log lines.
"""
minlevel = LOG_LEVELS.get(level.upper(), VDEBUG_LEVEL)
if logfilter is None:
logfilter = LogFilter(set())
if html:
assert self.html_formatter is not None
fmt = self.html_formatter.format
else:
fmt = self.format
self.acquire()
try:
lines = [fmt(record)
for record in self._data
if record.levelno >= minlevel and
logfilter.filter(record)]
finally:
self.release()
return '\n'.join(lines)
def change_log_capacity(self, capacity: int) -> None:
self._data = collections.deque(self._data, maxlen=capacity)
class ColoredFormatter(logging.Formatter):
"""Logging formatter to output colored logs.
Attributes:
use_colors: Whether to do colored logging or not.
"""
def __init__(self, fmt: str,
datefmt: str,
style: str, *,
use_colors: bool) -> None:
super().__init__(fmt, datefmt, style)
self.use_colors = use_colors
def format(self, record: logging.LogRecord) -> str:
if self.use_colors:
color_dict = dict(COLOR_ESCAPES)
color_dict['reset'] = RESET_ESCAPE
log_color = LOG_COLORS[record.levelname]
color_dict['log_color'] = COLOR_ESCAPES[log_color]
else:
color_dict = {color: '' for color in COLOR_ESCAPES}
color_dict['reset'] = ''
color_dict['log_color'] = ''
record.__dict__.update(color_dict)
return super().format(record)
class HTMLFormatter(logging.Formatter):
"""Formatter for HTML-colored log messages.
Attributes:
_log_colors: The colors to use for logging levels.
_colordict: The colordict passed to the logger.
"""
def __init__(self, fmt: str, datefmt: str, log_colors: Mapping[str, str]) -> None:
"""Constructor.
Args:
fmt: The format string to use.
datefmt: The date format to use.
log_colors: The colors to use for logging levels.
"""
super().__init__(fmt, datefmt)
self._log_colors: Mapping[str, str] = log_colors
self._colordict: Mapping[str, str] = {}
# We could solve this nicer by using CSS, but for this simple case this
# works.
for color in COLORS:
self._colordict[color] = '<font color="{}">'.format(color)
self._colordict['reset'] = '</font>'
def format(self, record: logging.LogRecord) -> str:
record_clone = copy.copy(record)
record_clone.__dict__.update(self._colordict)
if record_clone.levelname in self._log_colors:
color = self._log_colors[record_clone.levelname]
color_str = self._colordict[color]
record_clone.log_color = color_str # type: ignore[attr-defined]
else:
record_clone.log_color = '' # type: ignore[attr-defined]
for field in ['msg', 'filename', 'funcName', 'levelname', 'module',
'name', 'pathname', 'processName', 'threadName']:
data = str(getattr(record_clone, field))
setattr(record_clone, field, pyhtml.escape(data))
msg = super().format(record_clone)
if not msg.endswith(self._colordict['reset']):
msg += self._colordict['reset']
return msg
def formatTime(self, record: logging.LogRecord,
datefmt: str = None) -> str:
out = super().formatTime(record, datefmt)
return pyhtml.escape(out)
class JSONFormatter(logging.Formatter):
"""Formatter for JSON-encoded log messages."""
def format(self, record: logging.LogRecord) -> str:
obj = {}
for field in ['created', 'msecs', 'levelname', 'name', 'module',
'funcName', 'lineno', 'levelno']:
obj[field] = getattr(record, field)
obj['message'] = record.getMessage()
if record.exc_info is not None:
obj['traceback'] = super().formatException(record.exc_info)
return json.dumps(obj)
| gpl-3.0 | 8,036,734,590,630,784,000 | 34.252163 | 86 | 0.624566 | false |
webitup/puke | puke/Console.py | 2 | 2576 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import logging, sys, os, json
from colorama import *
import subprocess
SPEAK_ENABLED = False
SPEAK_MESSAGE_ON_FAIL = "Build failed!"
SPEAK_MESSAGE_ON_SUCCESS = "Build success!"
init(autoreset = True)
class console:
@staticmethod
def log(*messages):
color = Style.BRIGHT
if os.environ.get("NOCOLOR"):
color = ""
for m in messages:
msg = console.stringify(m)
logging.info(color + msg)
@staticmethod
def info(*messages):
for m in messages:
msg = console.stringify(m)
logging.info( msg)
@staticmethod
def say(*messages):
try:
if sys.platform.lower() == 'darwin':
for m in messages:
s = subprocess.Popen(['say', m])
s.communicate()
except:
pass
@staticmethod
def debug(*messages):
color = Back.BLUE
if os.environ.get("NOCOLOR"):
color = ""
if os.environ.get("NOCOLOR"):
color = ""
for m in messages:
msg = console.stringify(m)
logging.debug( color + msg )
@staticmethod
def warn(*messages):
color = Fore.YELLOW + Style.BRIGHT
if os.environ.get("NOCOLOR"):
color = ""
for m in messages:
msg = console.stringify(m)
msg = console.pukefactory(msg)
logging.warning( color + msg)
@staticmethod
def error(*messages):
color = Back.RED + Style.BRIGHT
if os.environ.get("NOCOLOR"):
color = ""
for m in messages:
msg = console.stringify(m)
logging.error( color + msg)
@staticmethod
def confirm(*messages):
color = Fore.GREEN + Style.BRIGHT
if os.environ.get("NOCOLOR"):
color = ""
for m in messages:
msg = console.stringify(m)
logging.info(color + msg )
@staticmethod
def header(msg, level = 2):
msg = console.stringify(msg)
logging.info("")
if level == 1:
color = Fore.MAGENTA
else:
color = Fore.CYAN
if os.environ.get("NOCOLOR"):
logging.info(msg )
else:
logging.info(color + Style.BRIGHT + msg )
@staticmethod
def fail(msg):
if SPEAK_ENABLED and SPEAK_MESSAGE_ON_FAIL:
console.say(SPEAK_MESSAGE_ON_FAIL)
msg = console.stringify(msg)
msg = console.pukefactory(msg)
console.error(" /!\\ BUILD FAIL : " + msg)
sys.exit(1)
@staticmethod
def pukefactory(msg):
if ':puke:' in msg:
try:
f = open(os.path.join(os.path.dirname( __file__ ), 'datas','decoration', 'puke.txt'), 'r')
msg = msg.replace(':puke:', '\n' + f.read())
f.close()
return msg
except Exception:
pass
return msg
@staticmethod
def stringify(msg):
if isinstance(msg, str):
return msg
return json.JSONEncoder().encode(msg)
| mit | 2,733,193,653,001,594,400 | 17.941176 | 94 | 0.64014 | false |
cea-sec/miasm | example/expression/solve_condition_stp.py | 2 | 6730 | from __future__ import print_function
import sys
import subprocess
from optparse import OptionParser
from pdb import pm
from future.utils import viewitems
from miasm.analysis.machine import Machine
from miasm.analysis.binary import Container
from miasm.expression.expression import ExprInt, ExprCond, ExprId, \
get_expr_ids, ExprAssign, ExprLoc
from miasm.core.bin_stream import bin_stream_str
from miasm.ir.symbexec import SymbolicExecutionEngine, get_block
from miasm.expression.simplifications import expr_simp
from miasm.core import parse_asm
from miasm.ir.translators.translator import Translator
from miasm.core.locationdb import LocationDB
machine = Machine("x86_32")
parser = OptionParser(usage="usage: %prog [options] file")
parser.add_option('-a', "--address", dest="address", metavar="ADDRESS",
help="address to disasemble", default="0")
(options, args) = parser.parse_args(sys.argv[1:])
if not args:
parser.print_help()
sys.exit(0)
def emul_symb(lifter, ircfg, mdis, states_todo, states_done):
while states_todo:
addr, symbols, conds = states_todo.pop()
print('*' * 40, "addr", addr, '*' * 40)
if (addr, symbols, conds) in states_done:
print('Known state, skipping', addr)
continue
states_done.add((addr, symbols, conds))
symbexec = SymbolicExecutionEngine(lifter)
symbexec.symbols = symbols.copy()
if lifter.pc in symbexec.symbols:
del symbexec.symbols[lifter.pc]
irblock = get_block(lifter, ircfg, mdis, addr)
print('Run block:')
print(irblock)
addr = symbexec.eval_updt_irblock(irblock)
print('Final state:')
symbexec.dump(mems=False)
assert addr is not None
if isinstance(addr, ExprCond):
# Create 2 states, each including complementary conditions
cond_group_a = {addr.cond: ExprInt(0, addr.cond.size)}
cond_group_b = {addr.cond: ExprInt(1, addr.cond.size)}
addr_a = expr_simp(symbexec.eval_expr(addr.replace_expr(cond_group_a), {}))
addr_b = expr_simp(symbexec.eval_expr(addr.replace_expr(cond_group_b), {}))
if not (addr_a.is_int() or addr_a.is_loc() and
addr_b.is_int() or addr_b.is_loc()):
print(str(addr_a), str(addr_b))
raise ValueError("Unsupported condition")
if isinstance(addr_a, ExprInt):
addr_a = int(addr_a.arg)
if isinstance(addr_b, ExprInt):
addr_b = int(addr_b.arg)
states_todo.add((addr_a, symbexec.symbols.copy(), tuple(list(conds) + list(viewitems(cond_group_a)))))
states_todo.add((addr_b, symbexec.symbols.copy(), tuple(list(conds) + list(viewitems(cond_group_b)))))
elif addr == ret_addr:
print('Return address reached')
continue
elif addr.is_int():
addr = int(addr.arg)
states_todo.add((addr, symbexec.symbols.copy(), tuple(conds)))
elif addr.is_loc():
states_todo.add((addr, symbexec.symbols.copy(), tuple(conds)))
else:
raise ValueError("Unsupported destination")
if __name__ == '__main__':
loc_db = LocationDB()
translator_smt2 = Translator.to_language("smt2")
addr = int(options.address, 16)
cont = Container.from_stream(open(args[0], 'rb'), loc_db)
mdis = machine.dis_engine(cont.bin_stream, loc_db=loc_db)
lifter = machine.lifter(mdis.loc_db)
ircfg = lifter.new_ircfg()
symbexec = SymbolicExecutionEngine(lifter)
asmcfg = parse_asm.parse_txt(
machine.mn, 32, '''
init:
PUSH argv
PUSH argc
PUSH ret_addr
''',
loc_db
)
argc_lbl = loc_db.get_name_location('argc')
argv_lbl = loc_db.get_name_location('argv')
ret_addr_lbl = loc_db.get_name_location('ret_addr')
init_lbl = loc_db.get_name_location('init')
argc_loc = ExprLoc(argc_lbl, 32)
argv_loc = ExprLoc(argv_lbl, 32)
ret_addr_loc = ExprLoc(ret_addr_lbl, 32)
ret_addr = ExprId("ret_addr", ret_addr_loc.size)
fix_args = {
argc_loc: ExprId("argc", argc_loc.size),
argv_loc: ExprId("argv", argv_loc.size),
ret_addr_loc: ret_addr,
}
block = asmcfg.loc_key_to_block(init_lbl)
for instr in block.lines:
for i, arg in enumerate(instr.args):
instr.args[i]= arg.replace_expr(fix_args)
print(block)
# add fake address and len to parsed instructions
lifter.add_asmblock_to_ircfg(block, ircfg)
irb = ircfg.blocks[init_lbl]
symbexec.eval_updt_irblock(irb)
symbexec.dump(ids=False)
# reset lifter blocks
lifter.blocks = {}
states_todo = set()
states_done = set()
states_todo.add((addr, symbexec.symbols, ()))
# emul blocks, propagate states
emul_symb(lifter, ircfg, mdis, states_todo, states_done)
all_info = []
print('*' * 40, 'conditions to match', '*' * 40)
for addr, symbols, conds in sorted(states_done, key=str):
print('*' * 40, addr, '*' * 40)
reqs = []
for k, v in conds:
print(k, v)
reqs.append((k, v))
all_info.append((addr, reqs))
all_cases = set()
symbexec = SymbolicExecutionEngine(lifter)
for addr, reqs_cond in all_info:
out = ['(set-logic QF_ABV)',
'(set-info :smt-lib-version 2.0)']
conditions = []
all_ids = set()
for expr, value in reqs_cond:
all_ids.update(get_expr_ids(expr))
expr_test = ExprCond(expr,
ExprInt(1, value.size),
ExprInt(0, value.size))
cond = translator_smt2.from_expr(ExprAssign(expr_test, value))
conditions.append(cond)
for name in all_ids:
out.append("(declare-fun %s () (_ BitVec %d))" % (name, name.size))
if not out:
continue
out += conditions
out.append('(check-sat)')
open('out.dot', 'w').write('\n'.join(out))
try:
cases = subprocess.check_output(["/home/serpilliere/tools/stp/stp",
"-p", '--SMTLIB2',
"out.dot"])
except OSError:
print("Cannot find stp binary!")
break
for c in cases.split('\n'):
if c.startswith('ASSERT'):
all_cases.add((addr, c))
print('*' * 40, 'ALL COND', '*' * 40)
all_cases = list(all_cases)
all_cases.sort(key=lambda x: (x[0], x[1]))
for addr, val in all_cases:
print('Address:', addr, 'is reachable using argc', val)
| gpl-2.0 | -8,537,619,613,936,739,000 | 32.989899 | 114 | 0.58529 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.