repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
krishnazure/Flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py | 330 | 3364 | # urllib3/exceptions.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class ConnectionError(HTTPError):
"Raised when a normal connection fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
## Leaf Exceptions
class MaxRetryError(RequestError):
"Raised when the maximum number of retries is exceeded."
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s" % url
if reason:
message += " (Caused by %s: %s)" % (type(reason), reason)
else:
message += " (Caused by redirect)"
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationParseError(ValueError, HTTPError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
| apache-2.0 | -9,196,344,109,557,528,000 | 25.698413 | 79 | 0.674792 | false |
nodejs/node-gyp | gyp/pylib/gyp/flock_tool.py | 3 | 1859 | #!/usr/bin/env python3
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool:
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace("-", "")
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY | os.O_NOCTTY | os.O_CREAT, 0o666)
if sys.platform.startswith("aix"):
# Python on AIX is compiled with LARGEFILE support, which changes the
# struct size.
op = struct.pack("hhIllqq", fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
op = struct.pack("hhllhhl", fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| mit | -1,530,245,504,890,301,700 | 32.8 | 81 | 0.628833 | false |
arrabito/DIRAC | Core/DISET/private/GatewayService.py | 4 | 20150 | """ The gateway service is used for forwarding service calls to the appropriate services.
For this to be used, the following CS option is required:
DIRAC
{
Gateways
{
my.site.org = dips://thisIsAn.url.org:9159/Framework/Gateway
}
}
At the same time, this same gateway service should be run with option /LocalInstallation/Site
which is different from "my.site.org" or whatever is set in the option above, to avoid initialization loops.
"""
__RCSID__ = "$id:"
import sys
import cStringIO
import DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.LockRing import LockRing
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.Core.DISET.private.FileHelper import FileHelper
from DIRAC.Core.DISET.private.MessageBroker import MessageBroker, getGlobalMessageBroker
from DIRAC.Core.DISET.MessageClient import MessageClient
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.Core.DISET.private.Service import Service
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.DISET.TransferClient import TransferClient
from DIRAC.Core.DISET.private.BaseClient import BaseClient
class GatewayService( Service ):
""" Inherits from Service so it can (and should) be run as a DIRAC service,
but replaces several of the internal methods
"""
GATEWAY_NAME = "Framework/Gateway"
def __init__( self ):
""" Initialize like a real service
"""
super(GatewayService, self).__init__(
{'modName':GatewayService.GATEWAY_NAME,
'loadName':GatewayService.GATEWAY_NAME,
'standalone': True,
'moduleObj': sys.modules[DIRAC.Core.DISET.private.GatewayService.GatewayService.__module__],
'classObj': self.__class__} )
self.__delegatedCredentials = DictCache()
self.__transferBytesLimit = 1024 * 1024 * 100
# to be resolved
self._url = None
self._handler = None
self._threadPool = None
self._msgBroker = None
self._msgForwarder = None
def initialize( self ):
""" This replaces the standard initialize from Service
"""
#Build the URLs
self._url = self._cfg.getURL()
if not self._url:
return S_ERROR( "Could not build service URL for %s" % GatewayService.GATEWAY_NAME )
gLogger.verbose( "Service URL is %s" % self._url )
#Load handler
result = self._loadHandlerInit()
if not result[ 'OK' ]:
return result
self._handler = result[ 'Value' ]
#Discover Handler
self._threadPool = ThreadPool( 1,
max( 0, self._cfg.getMaxThreads() ),
self._cfg.getMaxWaitingPetitions() )
self._threadPool.daemonize()
self._msgBroker = MessageBroker( "%sMSB" % GatewayService.GATEWAY_NAME, threadPool = self._threadPool )
self._msgBroker.useMessageObjects( False )
getGlobalMessageBroker().useMessageObjects( False )
self._msgForwarder = MessageForwarder( self._msgBroker )
return S_OK()
def _processInThread( self, clientTransport ):
""" Threaded process function
"""
#Handshake
try:
clientTransport.handshake()
except:
return
#Add to the transport pool
trid = self._transportPool.add( clientTransport )
if not trid:
return
#Receive and check proposal
result = self._receiveAndCheckProposal( trid )
if not result[ 'OK' ]:
self._transportPool.sendAndClose( trid, result )
return
proposalTuple = result[ 'Value' ]
#Instantiate handler
result = self.__getClientInitArgs( trid, proposalTuple )
if not result[ 'OK' ]:
self._transportPool.sendAndClose( trid, result )
return
clientInitArgs = result[ 'Value' ]
#Execute the action
result = self._processProposal( trid, proposalTuple, clientInitArgs )
#Close the connection if required
if result[ 'closeTransport' ]:
self._transportPool.close( trid )
return result
def _receiveAndCheckProposal( self, trid ):
clientTransport = self._transportPool.get( trid )
#Get the peer credentials
credDict = clientTransport.getConnectingCredentials()
#Receive the action proposal
retVal = clientTransport.receiveData( 1024 )
if not retVal[ 'OK' ]:
gLogger.error( "Invalid action proposal", "%s %s" % ( self._createIdentityString( credDict,
clientTransport ),
retVal[ 'Message' ] ) )
return S_ERROR( "Invalid action proposal" )
proposalTuple = retVal[ 'Value' ]
gLogger.debug( "Received action from client", "/".join( list( proposalTuple[1] ) ) )
#Check if there are extra credentials
if proposalTuple[2]:
clientTransport.setExtraCredentials( proposalTuple[2] )
return S_OK( proposalTuple )
def __getClientInitArgs( self, trid, proposalTuple ):
clientTransport = self._transportPool.get( trid )
#Get the peer credentials
credDict = clientTransport.getConnectingCredentials()
if 'x509Chain' not in credDict:
return S_OK()
cKey = ( credDict[ 'DN' ],
credDict.get( 'group', False ),
credDict.get( 'extraCredentials', False ),
credDict[ 'isLimitedProxy' ] )
dP = self.__delegatedCredentials.get( cKey, 3600 )
idString = self._createIdentityString( credDict, clientTransport )
if dP:
gLogger.verbose( "Proxy for %s is cached" % idString )
return S_OK( dP )
result = self.__requestDelegation( clientTransport, credDict )
if not result[ 'OK' ]:
gLogger.warn( "Could not get proxy for %s: %s" % ( idString, result[ 'Message' ] ) )
return result
delChain = result[ 'Value' ]
delegatedChain = delChain.dumpAllToString()[ 'Value' ]
secsLeft = delChain.getRemainingSecs()[ 'Value' ] - 1
clientInitArgs = { BaseClient.KW_SETUP : proposalTuple[0][1],
BaseClient.KW_TIMEOUT : 600,
BaseClient.KW_IGNORE_GATEWAYS : True,
BaseClient.KW_USE_CERTIFICATES : False,
BaseClient.KW_PROXY_STRING : delegatedChain
}
if BaseClient.KW_EXTRA_CREDENTIALS in credDict:
clientInitArgs[ BaseClient.KW_EXTRA_CREDENTIALS ] = credDict[ BaseClient.KW_EXTRA_CREDENTIALS ]
gLogger.warn( "Got delegated proxy for %s: %s secs left" % ( idString, secsLeft ) )
self.__delegatedCredentials.add( cKey, secsLeft, clientInitArgs )
return S_OK( clientInitArgs )
def __requestDelegation( self, clientTransport, credDict ):
peerChain = credDict[ 'x509Chain' ]
retVal = peerChain.getCertInChain()[ 'Value' ].generateProxyRequest()
if not retVal[ 'OK' ]:
return retVal
delegationRequest = retVal[ 'Value' ]
retVal = delegationRequest.dumpRequest()
if not retVal[ 'OK' ]:
retVal = S_ERROR( "Server Error: Can't generate delegation request" )
clientTransport.sendData( retVal )
return retVal
gLogger.info( "Sending delegation request for %s" % delegationRequest.getSubjectDN()[ 'Value' ] )
clientTransport.sendData( S_OK( { 'delegate' : retVal[ 'Value' ] } ) )
delegatedCertChain = clientTransport.receiveData()
delegatedChain = X509Chain( keyObj = delegationRequest.getPKey() )
retVal = delegatedChain.loadChainFromString( delegatedCertChain )
if not retVal[ 'OK' ]:
retVal = S_ERROR( "Error in receiving delegated proxy: %s" % retVal[ 'Message' ] )
clientTransport.sendData( retVal )
return retVal
return S_OK( delegatedChain )
#Msg
def _mbConnect( self, trid, handlerObj = None ):
return S_OK()
def _mbReceivedMsg( self, cliTrid, msgObj ):
return self._msgForwarder.msgFromClient( cliTrid, msgObj )
def _mbDisconnect( self, cliTrid ):
self._msgForwarder.cliDisconnect( cliTrid )
#Execute action
def _executeAction( self, trid, proposalTuple, clientInitArgs ):
clientTransport = self._transportPool.get( trid )
credDict = clientTransport.getConnectingCredentials()
targetService = proposalTuple[0][0]
actionType = proposalTuple[1][0]
actionMethod = proposalTuple[1][1]
idString = self._createIdentityString( credDict, clientTransport )
#OOkay! Lets do the magic!
retVal = clientTransport.receiveData()
if not retVal[ 'OK' ]:
gLogger.error( "Error while receiving file description", retVal[ 'Message' ] )
clientTransport.sendData( S_ERROR( "Error while receiving file description: %s" % retVal[ 'Message' ] ) )
return
if actionType == "FileTransfer":
gLogger.warn( "Received a file transfer action from %s" % idString )
clientTransport.sendData( S_OK( "Accepted" ) )
retVal = self.__forwardFileTransferCall( targetService, clientInitArgs,
actionMethod, retVal[ 'Value' ], clientTransport )
elif actionType == "RPC":
gLogger.info( "Forwarding %s/%s action to %s for %s" % ( actionType, actionMethod, targetService, idString ) )
retVal = self.__forwardRPCCall( targetService, clientInitArgs, actionMethod, retVal[ 'Value' ] )
elif actionType == "Connection" and actionMethod == "new":
gLogger.info( "Initiating a messaging connection to %s for %s" % ( targetService, idString ) )
retVal = self._msgForwarder.addClient( trid, targetService, clientInitArgs, retVal[ 'Value' ] )
else:
gLogger.warn( "Received an invalid %s/%s action from %s" % ( actionType, actionMethod, idString ) )
retVal = S_ERROR( "Unknown type of action (%s)" % actionType )
#TODO: Send back the data?
if 'rpcStub' in retVal:
retVal.pop( 'rpcStub' )
clientTransport.sendData( retVal )
return retVal
def __forwardRPCCall( self, targetService, clientInitArgs, method, params ):
if targetService == "Configuration/Server":
if method == "getCompressedDataIfNewer":
#Relay CS data directly
serviceVersion = gConfigurationData.getVersion()
retDict = { 'newestVersion' : serviceVersion }
clientVersion = params[0]
if clientVersion < serviceVersion:
retDict[ 'data' ] = gConfigurationData.getCompressedData()
return S_OK( retDict )
#Default
rpcClient = RPCClient( targetService, **clientInitArgs )
methodObj = getattr( rpcClient, method )
return methodObj( *params )
def __forwardFileTransferCall( self, targetService, clientInitArgs, method,
params, clientTransport ):
transferRelay = TransferRelay( targetService, **clientInitArgs )
transferRelay.setTransferLimit( self.__transferBytesLimit )
cliFH = FileHelper( clientTransport )
#Check file size
if method.find( "ToClient" ) > -1:
cliFH.setDirection( "send" )
elif method.find( "FromClient" ) > -1:
cliFH.setDirection( "receive" )
if not self.__ftCheckMaxTransferSize( params[2] ):
cliFH.markAsTransferred()
return S_ERROR( "Transfer size is too big" )
#Forward queries
try:
relayMethodObject = getattr( transferRelay, 'forward%s' % method )
except:
return S_ERROR( "Cannot forward unknown method %s" % method )
result = relayMethodObject( cliFH, params )
return result
def __ftCheckMaxTransferSize( self, requestedTransferSize ):
if not self.__transferBytesLimit:
return True
if not requestedTransferSize:
return True
if requestedTransferSize <= self.__transferBytesLimit:
return True
return False
class TransferRelay( TransferClient ):
def setTransferLimit( self, trLimit ):
self.__transferBytesLimit = trLimit
self.__currentMethod = ""
def infoMsg( self, msg, dynMsg = "" ):
gLogger.info( "[%s] %s" % ( self.__currentMethod, msg ), dynMsg )
def errMsg( self, msg, dynMsg = "" ):
gLogger.error( "[%s] %s" % ( self.__currentMethod, msg ), dynMsg )
def getDataFromClient( self, clientFileHelper ):
sIO = cStringIO.StringIO()
self.infoMsg( "About to get data from client" )
result = clientFileHelper.networkToDataSink( sIO, self.__transferBytesLimit )
if not result[ 'OK' ]:
sIO.close()
self.errMsg( "Could not get data from client", result[ 'Message' ] )
return result
data = sIO.getvalue()
sIO.close()
self.infoMsg( "Got %s bytes from client" % len( data ) )
return S_OK( data )
def sendDataToClient( self, clientFileHelper, dataToSend ):
self.infoMsg( "About to get send data to client" )
result = clientFileHelper.BufferToNetwork( dataToSend )
if not result[ 'OK' ]:
self.errMsg( "Could not send data to client", result[ 'Message' ] )
return result
self.infoMsg( "Sent %s bytes from client" % len( dataToSend ) )
return S_OK()
def sendDataToService( self, srvMethod, params, data ):
self.infoMsg( "Sending header request to %s" % self.getDestinationService(), str( params ) )
result = self._sendTransferHeader( srvMethod, params )
if not result[ 'OK' ]:
self.errMsg( "Could not send header", result[ 'Message' ] )
return result
self.infoMsg( "Starting to send data to service" )
trid, srvTransport = result[ 'Value' ]
srvFileHelper = FileHelper( srvTransport )
srvFileHelper.setDirection( "send" )
result = srvFileHelper.BufferToNetwork( data )
if not result[ 'OK' ]:
self.errMsg( "Could send data to server", result[ 'Message' ] )
srvTransport.close()
return result
self.infoMsg( "Data sent to service (%s bytes)" % len( data ) )
retVal = srvTransport.receiveData()
srvTransport.close()
return retVal
def getDataFromService( self, srvMethod, params ):
self.infoMsg( "Sending header request to %s" % self.getDestinationService(), str( params ) )
result = self._sendTransferHeader( srvMethod, params )
if not result[ 'OK' ]:
self.errMsg( "Could not send header", result[ 'Message' ] )
return result
self.infoMsg( "Starting to receive data from service" )
trid, srvTransport = result[ 'Value' ]
srvFileHelper = FileHelper( srvTransport )
srvFileHelper.setDirection( "receive" )
sIO = cStringIO.StringIO()
result = srvFileHelper.networkToDataSink( sIO, self.__transferBytesLimit )
if not result[ 'OK' ]:
self.errMsg( "Could not receive data from server", result[ 'Message' ] )
srvTransport.close()
sIO.close()
return result
dataReceived = sIO.getvalue()
sIO.close()
self.infoMsg( "Received %s bytes from service" % len( dataReceived ) )
retVal = srvTransport.receiveData()
srvTransport.close()
if not retVal[ 'OK' ]:
return retVal
return S_OK( { 'data' : dataReceived, 'srvResponse' : retVal } )
def forwardFromClient( self, clientFileHelper, params ):
fileId, token = params[:2]
self.__currentMethod = "FromClient"
result = self.getDataFromClient( clientFileHelper )
if not result[ 'OK' ]:
return result
dataReceived = result[ 'Value' ]
receivedBytes = clientFileHelper.getTransferedBytes()
return self.sendDataToService( "FromClient", ( fileId, token, receivedBytes ), dataReceived )
def forwardBulkFromClient( self, clientFileHelper, params ):
fileId, token = params[:2]
self.__currentMethod = "BulkFromClient"
result = self.getDataFromClient( clientFileHelper )
if not result[ 'OK' ]:
return result
dataReceived = result[ 'Value' ]
receivedBytes = clientFileHelper.getTransferedBytes()
return self.sendDataToService( "BulkFromClient", ( fileId, token, receivedBytes ), dataReceived )
def forwardToClient( self, clientFileHelper, params ):
fileId, token = params[:2]
self.__currentMethod = "ToClient"
result = self.getDataFromService( "ToClient", ( fileId, token ) )
if not result[ 'OK' ]:
return result
dataReceived = result[ 'Value' ][ 'data' ]
srvResponse = result[ 'Value' ][ 'srvResponse' ]
result = self.sendDataToClient( clientFileHelper, dataReceived )
if not result[ 'OK' ]:
return result
return srvResponse
def forwardBulkToClient( self, clientFileHelper, params ):
fileId, token = params[:2]
self.__currentMethod = "BulkToClient"
result = self.getDataFromService( "BulkToClient", ( fileId, token ) )
if not result[ 'OK' ]:
return result
dataReceived = result[ 'Value' ][ 'data' ]
srvResponse = result[ 'Value' ][ 'srvResponse' ]
result = self.sendDataToClient( clientFileHelper, dataReceived )
if not result[ 'OK' ]:
return result
return srvResponse
def forwardListBulk( self, clientFileHelper, params ):
self.__currentMethod = "ListBulk"
self.infoMsg( "Sending header request to %s" % self.getDestinationService(), str( params ) )
result = self._sendTransferHeader( "ListBulk", params )
if not result[ 'OK' ]:
self.errMsg( "Could not send header", result[ 'Message' ] )
return result
trid, srvTransport = result[ 'Value' ]
response = srvTransport.receiveData( 1048576 )
srvTransport.close()
self.infoMsg( "Sending data back to client" )
return response
class MessageForwarder(object):
def __init__( self, msgBroker ):
self.__inOutLock = LockRing().getLock()
self.__msgBroker = msgBroker
self.__byClient = {}
self.__srvToCliTrid = {}
def addClient( self, cliTrid, destination, clientInitParams, connectParams ):
if cliTrid in self.__byClient:
gLogger.fatal( "Trid is duplicated!! this shouldn't happen" )
return
msgClient = MessageClient( destination, **clientInitParams )
msgClient.subscribeToDisconnect( self.__srvDisconnect )
msgClient.subscribeToAllMessages( self.msgFromSrv )
msgClient.setUniqueName( connectParams[0] )
result = msgClient.connect( **connectParams[1] )
if not result[ 'OK' ]:
return result
self.__inOutLock.acquire()
try:
self.__byClient[ cliTrid ] = { 'srvEnd' : msgClient,
'srvTrid' : msgClient.getTrid(),
'srvName' : destination }
self.__srvToCliTrid[ msgClient.getTrid() ] = cliTrid
finally:
self.__inOutLock.release()
return result
def __srvDisconnect( self, srvEndCli ):
try:
cliTrid = self.__srvToCliTrid[ srvEndCli.getTrid() ]
except IndexError:
gLogger.exception( "This shouldn't happen!" )
gLogger.info( "Service %s disconnected messaging connection" % self.__byClient[ cliTrid ][ 'srvName' ] )
self.__msgBroker.removeTransport( cliTrid )
self.__removeClient( cliTrid )
def cliDisconnect( self, cliTrid ):
if cliTrid not in self.__byClient:
gLogger.fatal( "This shouldn't happen!" )
return
gLogger.info( "Client to %s disconnected messaging connection" % self.__byClient[ cliTrid ][ 'srvName' ] )
self.__byClient[ cliTrid ][ 'srvEnd' ].disconnect()
self.__removeClient( cliTrid )
def __removeClient( self, cliTrid ):
self.__inOutLock.acquire()
try:
try:
srvTrid = self.__byClient[ cliTrid ][ 'srvTrid' ]
self.__byClient.pop( cliTrid )
self.__srvToCliTrid.pop( srvTrid )
except Exception as e:
gLogger.exception( "This shouldn't happen!" )
finally:
self.__inOutLock.release()
def msgFromClient( self, cliTrid, msgObj ):
gLogger.info( "Message %s to %s service" % ( msgObj.getName(), self.__byClient[ cliTrid ][ 'srvName' ] ) )
result = self.__byClient[ cliTrid ][ 'srvEnd' ].sendMessage( msgObj )
return result
def msgFromSrv( self, srvEndCli, msgObj ):
try:
cliTrid = self.__srvToCliTrid[ srvEndCli.getTrid() ]
except:
gLogger.exception( "This shouldn't happen" )
return S_ERROR( "MsgFromSrv -> Mismatched srv2cli trid" )
gLogger.info( "Message %s from %s service" % ( msgObj.getName(), self.__byClient[ cliTrid ][ 'srvName' ] ) )
return self.__msgBroker.sendMessage( cliTrid, msgObj )
| gpl-3.0 | -8,134,120,418,209,853,000 | 39.872211 | 116 | 0.662283 | false |
sauloal/cufflinksviewer | venvwin/Lib/encodings/cp856.py | 93 | 12986 | """ Python Character Mapping Codec cp856 generated from 'MAPPINGS/VENDORS/MISC/CP856.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp856',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u05d0' # 0x80 -> HEBREW LETTER ALEF
u'\u05d1' # 0x81 -> HEBREW LETTER BET
u'\u05d2' # 0x82 -> HEBREW LETTER GIMEL
u'\u05d3' # 0x83 -> HEBREW LETTER DALET
u'\u05d4' # 0x84 -> HEBREW LETTER HE
u'\u05d5' # 0x85 -> HEBREW LETTER VAV
u'\u05d6' # 0x86 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0x87 -> HEBREW LETTER HET
u'\u05d8' # 0x88 -> HEBREW LETTER TET
u'\u05d9' # 0x89 -> HEBREW LETTER YOD
u'\u05da' # 0x8A -> HEBREW LETTER FINAL KAF
u'\u05db' # 0x8B -> HEBREW LETTER KAF
u'\u05dc' # 0x8C -> HEBREW LETTER LAMED
u'\u05dd' # 0x8D -> HEBREW LETTER FINAL MEM
u'\u05de' # 0x8E -> HEBREW LETTER MEM
u'\u05df' # 0x8F -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0x90 -> HEBREW LETTER NUN
u'\u05e1' # 0x91 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0x92 -> HEBREW LETTER AYIN
u'\u05e3' # 0x93 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0x94 -> HEBREW LETTER PE
u'\u05e5' # 0x95 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0x96 -> HEBREW LETTER TSADI
u'\u05e7' # 0x97 -> HEBREW LETTER QOF
u'\u05e8' # 0x98 -> HEBREW LETTER RESH
u'\u05e9' # 0x99 -> HEBREW LETTER SHIN
u'\u05ea' # 0x9A -> HEBREW LETTER TAV
u'\ufffe' # 0x9B -> UNDEFINED
u'\xa3' # 0x9C -> POUND SIGN
u'\ufffe' # 0x9D -> UNDEFINED
u'\xd7' # 0x9E -> MULTIPLICATION SIGN
u'\ufffe' # 0x9F -> UNDEFINED
u'\ufffe' # 0xA0 -> UNDEFINED
u'\ufffe' # 0xA1 -> UNDEFINED
u'\ufffe' # 0xA2 -> UNDEFINED
u'\ufffe' # 0xA3 -> UNDEFINED
u'\ufffe' # 0xA4 -> UNDEFINED
u'\ufffe' # 0xA5 -> UNDEFINED
u'\ufffe' # 0xA6 -> UNDEFINED
u'\ufffe' # 0xA7 -> UNDEFINED
u'\ufffe' # 0xA8 -> UNDEFINED
u'\xae' # 0xA9 -> REGISTERED SIGN
u'\xac' # 0xAA -> NOT SIGN
u'\xbd' # 0xAB -> VULGAR FRACTION ONE HALF
u'\xbc' # 0xAC -> VULGAR FRACTION ONE QUARTER
u'\ufffe' # 0xAD -> UNDEFINED
u'\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0xB0 -> LIGHT SHADE
u'\u2592' # 0xB1 -> MEDIUM SHADE
u'\u2593' # 0xB2 -> DARK SHADE
u'\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\ufffe' # 0xB5 -> UNDEFINED
u'\ufffe' # 0xB6 -> UNDEFINED
u'\ufffe' # 0xB7 -> UNDEFINED
u'\xa9' # 0xB8 -> COPYRIGHT SIGN
u'\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0xBD -> CENT SIGN
u'\xa5' # 0xBE -> YEN SIGN
u'\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\ufffe' # 0xC6 -> UNDEFINED
u'\ufffe' # 0xC7 -> UNDEFINED
u'\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0xCF -> CURRENCY SIGN
u'\ufffe' # 0xD0 -> UNDEFINED
u'\ufffe' # 0xD1 -> UNDEFINED
u'\ufffe' # 0xD2 -> UNDEFINED
u'\ufffe' # 0xD3 -> UNDEFINEDS
u'\ufffe' # 0xD4 -> UNDEFINED
u'\ufffe' # 0xD5 -> UNDEFINED
u'\ufffe' # 0xD6 -> UNDEFINEDE
u'\ufffe' # 0xD7 -> UNDEFINED
u'\ufffe' # 0xD8 -> UNDEFINED
u'\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0xDB -> FULL BLOCK
u'\u2584' # 0xDC -> LOWER HALF BLOCK
u'\xa6' # 0xDD -> BROKEN BAR
u'\ufffe' # 0xDE -> UNDEFINED
u'\u2580' # 0xDF -> UPPER HALF BLOCK
u'\ufffe' # 0xE0 -> UNDEFINED
u'\ufffe' # 0xE1 -> UNDEFINED
u'\ufffe' # 0xE2 -> UNDEFINED
u'\ufffe' # 0xE3 -> UNDEFINED
u'\ufffe' # 0xE4 -> UNDEFINED
u'\ufffe' # 0xE5 -> UNDEFINED
u'\xb5' # 0xE6 -> MICRO SIGN
u'\ufffe' # 0xE7 -> UNDEFINED
u'\ufffe' # 0xE8 -> UNDEFINED
u'\ufffe' # 0xE9 -> UNDEFINED
u'\ufffe' # 0xEA -> UNDEFINED
u'\ufffe' # 0xEB -> UNDEFINED
u'\ufffe' # 0xEC -> UNDEFINED
u'\ufffe' # 0xED -> UNDEFINED
u'\xaf' # 0xEE -> MACRON
u'\xb4' # 0xEF -> ACUTE ACCENT
u'\xad' # 0xF0 -> SOFT HYPHEN
u'\xb1' # 0xF1 -> PLUS-MINUS SIGN
u'\u2017' # 0xF2 -> DOUBLE LOW LINE
u'\xbe' # 0xF3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0xF4 -> PILCROW SIGN
u'\xa7' # 0xF5 -> SECTION SIGN
u'\xf7' # 0xF6 -> DIVISION SIGN
u'\xb8' # 0xF7 -> CEDILLA
u'\xb0' # 0xF8 -> DEGREE SIGN
u'\xa8' # 0xF9 -> DIAERESIS
u'\xb7' # 0xFA -> MIDDLE DOT
u'\xb9' # 0xFB -> SUPERSCRIPT ONE
u'\xb3' # 0xFC -> SUPERSCRIPT THREE
u'\xb2' # 0xFD -> SUPERSCRIPT TWO
u'\u25a0' # 0xFE -> BLACK SQUARE
u'\xa0' # 0xFF -> NO-BREAK SPACE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit | -9,167,299,227,858,570,000 | 40.299674 | 107 | 0.517172 | false |
oopy/micropython | tests/float/math_fun.py | 24 | 2538 | # Tests the functions imported from math
try:
from math import *
except ImportError:
print("SKIP")
raise SystemExit
test_values = [-100., -1.23456, -1, -0.5, 0.0, 0.5, 1.23456, 100.]
test_values_small = [-10., -1.23456, -1, -0.5, 0.0, 0.5, 1.23456, 10.] # so we don't overflow 32-bit precision
unit_range_test_values = [-1., -0.75, -0.5, -0.25, 0., 0.25, 0.5, 0.75, 1.]
functions = [('sqrt', sqrt, test_values),
('exp', exp, test_values_small),
('log', log, test_values),
('cos', cos, test_values),
('sin', sin, test_values),
('tan', tan, test_values),
('acos', acos, unit_range_test_values),
('asin', asin, unit_range_test_values),
('atan', atan, test_values),
('ceil', ceil, test_values),
('fabs', fabs, test_values),
('floor', floor, test_values),
('trunc', trunc, test_values),
('radians', radians, test_values),
('degrees', degrees, test_values),
]
for function_name, function, test_vals in functions:
print(function_name)
for value in test_vals:
try:
print("{:.5g}".format(function(value)))
except ValueError as e:
print(str(e))
tuple_functions = [('frexp', frexp, test_values),
('modf', modf, test_values),
]
for function_name, function, test_vals in tuple_functions:
print(function_name)
for value in test_vals:
x, y = function(value)
print("{:.5g} {:.5g}".format(x, y))
binary_functions = [('copysign', copysign, [(23., 42.), (-23., 42.), (23., -42.),
(-23., -42.), (1., 0.0), (1., -0.0)]),
('pow', pow, ((1., 0.), (0., 1.), (2., 0.5), (-3., 5.), (-3., -4.),)),
('atan2', atan2, ((1., 0.), (0., 1.), (2., 0.5), (-3., 5.), (-3., -4.),)),
('fmod', fmod, ((1., 1.), (0., 1.), (2., 0.5), (-3., 5.), (-3., -4.),)),
('ldexp', ldexp, ((1., 0), (0., 1), (2., 2), (3., -2), (-3., -4),)),
('log', log, ((2., 2.), (3., 2.), (4., 5.), (0., 1.), (1., 0.), (-1., 1.), (1., -1.), (2., 1.))),
]
for function_name, function, test_vals in binary_functions:
print(function_name)
for value1, value2 in test_vals:
try:
print("{:.5g}".format(function(value1, value2)))
except (ValueError, ZeroDivisionError) as e:
print(type(e))
| mit | -1,109,621,542,418,095,900 | 39.285714 | 117 | 0.458629 | false |
zrax/moul-scripts | Python/clftYeeshaPage08.py | 6 | 9539 | # -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email [email protected]
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: clftYeeshaPage08
Age: Cleft
Date: May 2003
Author: Adam Van Ornum
Manages and records the finding of Yeesha Pages
!!! NOTE: This file only applies to the cleft but is only used in the global xYeeshaPagesGUI.max file. !!!
"""
from Plasma import *
from PlasmaTypes import *
from PlasmaKITypes import *
from PlasmaVaultConstants import *
from PlasmaNetConstants import *
# define the attributes that will be entered in max
actClickableBook = ptAttribNamedActivator(1,"Act: Clickable Yeesha Page")
GUIDialogObject = ptAttribSceneobject(2, "GUIDialog scene object")
RespOpen = ptAttribResponder(3, "Open Responder")
RespLoop = ptAttribResponder(4, "Loop Responder")
RespClose = ptAttribResponder(5, "Close Responder")
#Linking Books GUI tags
DialogName="YeeshaPageGUI"
kPageButton = 100
kYeeshaPage01 = 201
kYeeshaPage02 = 202
kYeeshaPage03 = 203
kYeeshaPage04 = 204
kYeeshaPage05 = 205
kYeeshaPage06 = 206
kYeeshaPage07 = 207
kYeeshaPage08 = 208
kYeeshaPage09 = 209
kYeeshaPage10 = 210
kYeeshaPage12 = 212
kYeeshaPage13 = 213
kYeeshaPage14 = 214
kYeeshaPage15 = 215
kYeeshaPage16 = 216
kYeeshaPage17 = 217
kYeeshaPage18 = 218
kYeeshaPage19 = 219
kYeeshaPage20 = 220
kYeeshaPage21 = 221
kYeeshaPage22 = 222
kYeeshaPage23 = 223
kYeeshaPage24 = 224
kYeeshaPage25 = 225
kYeeshaPageCancel = 299
isOpen = 0
class clftYeeshaPage08(ptModifier):
"The Yeesha Page 08 cleft imager python code"
def __init__(self):
ptModifier.__init__(self)
self.id = 5312
self.version = 1
print "__init__clftYeeshaPage08 v.", self.version
def OnFirstUpdate(self):
PtLoadDialog(DialogName, self.key)
pass
def __del__(self):
"destructor - get rid of any dialogs that we might have loaded"
#~ PtUnloadDialog(DialogName)
def OnNotify(self,state,id,events):
global LocalAvatar
global isOpen
if id == actClickableBook.id and state and PtWasLocallyNotified(self.key):
#if not PtIsDialogLoaded(DialogName):
# PtLoadDialog(DialogName,self.key)
self.SetStdGUIVisibility(0)
PtShowDialog(DialogName)
RespOpen.run(self.key)
isOpen = 1
elif id == actClickableBook.id and not state and PtWasLocallyNotified(self.key):
if not isOpen:
self.SetStdGUIVisibility(0)
PtShowDialog(DialogName)
RespOpen.run(self.key)
isOpen = 1
elif id == RespOpen.id:
RespLoop.run(self.key)
def OnGUINotify(self,id,control,event):
global isOpen
btnID = 0
if isinstance(control,ptGUIControlButton):
btnID = control.getTagID()
if event == kShowHide:
if control.isEnabled():
#control.show()
if self.GotPage():
mydialog = PtGetDialogFromString(DialogName)
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage08)).disable()
elif event == kAction and btnID == kYeeshaPage08:
PtDebugPrint("DEBUG: clftYeeshaPage08.OnGUINotify():\tPicked up page")
RespClose.run(self.key)
isOpen = 0
PtHideDialog(DialogName)
self.SetStdGUIVisibility(1)
if self.GotPage():
PtDebugPrint ("DEBUG: clftYeeshaPage08.py: You've already found Yeesha Page #8. Move along. Move along.")
return
else:
PtDebugPrint ("DEBUG: clftYeeshaPage08.py: Yeesha Page #8 is new to you.")
PtDebugPrint ("DEBUG: clftYeeshaPage08.py: Trying to update the value of the SDL variable %s to 1" % ("YeeshaPage8"))
vault = ptVault()
if type(vault) != type(None): #is the Vault online?
psnlSDL = vault.getPsnlAgeSDL()
if psnlSDL:
YeeshaPageVar = psnlSDL.findVar("YeeshaPage8")
YeeshaPageVar.setInt(1)
vault.updatePsnlAgeSDL (psnlSDL)
mydialog = PtGetDialogFromString(DialogName)
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage08)).disable()
PtSendKIMessageInt(kStartBookAlert,0)
elif event == kAction and btnID == kYeeshaPageCancel:
RespClose.run(self.key)
isOpen = 0
PtHideDialog(DialogName)
self.SetStdGUIVisibility(1)
def GotPage(self):
vault = ptVault()
if type(vault) != type(None): #is the Vault online?
psnlSDL = vault.getPsnlAgeSDL()
if psnlSDL:
YeeshaPageVar = psnlSDL.findVar("YeeshaPage8")
PtDebugPrint ("DEBUG: clftYeeshaPage08.py: The previous value of the SDL variable %s is %s" % ("YeeshaPage8", YeeshaPageVar.getInt()))
if YeeshaPageVar.getInt() != 0:
PtDebugPrint ("DEBUG: clftYeeshaPage08.py: You've already found Yeesha Page #8. Move along. Move along.")
return 1
else:
return 0
else:
PtDebugPrint("ERROR: clftYeeshaPage08: Error trying to access the Chronicle psnlSDL. psnlSDL = %s" % ( psnlSDL))
return 0
else:
PtDebugPrint("ERROR: clftYeeshaPage08: Error trying to access the Vault. Can't access YeeshaPageChanges chronicle." )
return 0
def SetStdGUIVisibility(self, visible):
global DialogName
if visible:
GUIDialogObject.value.draw.enable()
else:
mydialog = PtGetDialogFromString(DialogName)
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage01)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage02)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage03)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage04)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage05)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage06)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage07)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage09)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage10)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage12)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage13)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage14)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage15)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage16)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage17)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage18)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage19)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage20)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage21)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage22)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage23)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage24)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage25)).hide()
ptGUIControlButton(mydialog.getControlFromTag(kYeeshaPage08)).show()
GUIDialogObject.value.draw.disable() | gpl-3.0 | -3,568,804,313,210,101,000 | 38.421488 | 150 | 0.670406 | false |
leon-github/cloudlight | mars/common/log.py | 1 | 17223 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 CloudLight, Inc
# All Rights Reserved.
"""Logging handler.
"""
import ConfigParser
import cStringIO
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import sys
import traceback
import json
from oslo.config import cfg
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config',
metavar='PATH',
help='If this option is specified, the logging configuration '
'file specified is used and overrides any other logging '
'options specified. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=WARN'
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
pass
# context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
else:
instance_uuid = kwargs.pop('instance_uuid', None)
if instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project})
extra.update({"version": self.version})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return json.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config):
try:
logging.config.fileConfig(log_config)
except ConfigParser.Error as exc:
raise LogConfigError(log_config, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config:
_load_log_config(CONF.log_config)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not CONF.log_file:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
"""
if CONF.publish_errors:
handler = importutils.import_object(
"neutron.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
"""
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| apache-2.0 | 1,128,744,405,669,572,000 | 32.704501 | 78 | 0.582825 | false |
liorvh/golismero | tools/theHarvester/lib/graphs.py | 9 | 28558 | """
+-------------------------------------------------------------------+
| H T M L - G R A P H S (v4.8) |
| |
| Copyright Gerd Tentler www.gerd-tentler.de/tools |
| Created: Sep. 17, 2002 Last modified: Feb. 13, 2010 |
+-------------------------------------------------------------------+
| This program may be used and hosted free of charge by anyone for |
| personal purpose as long as this copyright notice remains intact. |
| |
| Obtain permission before selling the code for this program or |
| hosting this software on a commercial website or redistributing |
| this software over the Internet or in any other medium. In all |
| cases copyright must remain intact. |
+-------------------------------------------------------------------+
=====================================================================================================
Example:
import graphs
graph = graphs.BarGraph('hBar')
graph.values = [234, 125, 289, 147, 190]
print graph.create()
Returns HTML code
=====================================================================================================
"""
import re, math
class BarGraph:
"""creates horizontal and vertical bar graphs, progress bars and faders"""
def __init__(self, type = ''):
#----------------------------------------------------------------------------------------------------
# Configuration
#----------------------------------------------------------------------------------------------------
self.type = type and type or 'hBar' # graph type: "hBar", "vBar", "pBar", or "fader"
self.values = [] # graph data: list
self.graphBGColor = '' # graph background color: string
self.graphBorder = '' # graph border: string (CSS-spec: "size style color"; doesn't work with NN4)
self.graphPadding = 0 # graph padding: integer (pixels)
self.titles = [] # titles: array or string with comma-separated values
self.titleColor = 'black' # title font color: string
self.titleBGColor = '#C0E0FF' # title background color: string
self.titleBorder = '2px groove white' # title border: string (CSS specification)
self.titleFont = 'Arial, Helvetica' # title font family: string (CSS specification)
self.titleSize = 12 # title font size: integer (pixels)
self.titleAlign = 'center' # title text align: "left", "center", or "right"
self.titlePadding = 2 # title padding: integer (pixels)
self.labels = [] # label names: list or string with comma-separated values
self.labelColor = 'black' # label font color: string
self.labelBGColor = '#C0E0FF' # label background color: string
self.labelBorder = '2px groove white' # label border: string (CSS-spec: "size style color"; doesn't work with NN4)
self.labelFont = 'Arial, Helvetica' # label font family: string (CSS-spec)
self.labelSize = 12 # label font size: integer (pixels)
self.labelAlign = 'center' # label text align: "left", "center", or "right"
self.labelSpace = 0 # additional space between labels: integer (pixels)
self.barWidth = 20 # bar width: integer (pixels)
self.barLength = 1.0 # bar length ratio: float (from 0.1 to 2.9)
self.barColors = [] # bar colors OR bar images: list or string with comma-separated values
self.barBGColor = '' # bar background color: string
self.barBorder = '2px outset white' # bar border: string (CSS-spec: "size style color"; doesn't work with NN4)
self.barLevelColors = [] # bar level colors: ascending list (bLevel, bColor[,...]); draw bars >= bLevel with bColor
self.showValues = 0 # show values: 0 = % only, 1 = abs. and %, 2 = abs. only, 3 = none
self.baseValue = 0; # base value: integer or float (only hBar and vBar)
self.absValuesColor = 'black' # abs. values font color: string
self.absValuesBGColor = '#C0E0FF' # abs. values background color: string
self.absValuesBorder = '2px groove white' # abs. values border: string (CSS-spec: "size style color"; doesn't work with NN4)
self.absValuesFont = 'Arial, Helvetica' # abs. values font family: string (CSS-spec)
self.absValuesSize = 12 # abs. values font size: integer (pixels)
self.absValuesPrefix = '' # abs. values prefix: string (e.g. "$")
self.absValuesSuffix = '' # abs. values suffix: string (e.g. " kg")
self.percValuesColor = 'black' # perc. values font color: string
self.percValuesFont = 'Arial, Helvetica' # perc. values font family: string (CSS-spec)
self.percValuesSize = 12 # perc. values font size: integer (pixels)
self.percValuesDecimals = 0 # perc. values number of decimals: integer
self.charts = 1 # number of charts: integer
# hBar/vBar only:
self.legend = [] # legend items: list or string with comma-separated values
self.legendColor = 'black' # legend font color: string
self.legendBGColor = '#F0F0F0' # legend background color: string
self.legendBorder = '2px groove white' # legend border: string (CSS-spec: "size style color"; doesn't work with NN4)
self.legendFont = 'Arial, Helvetica' # legend font family: string (CSS-spec)
self.legendSize = 12 # legend font size: integer (pixels)
self.legendAlign = 'top' # legend vertical align: "top", "center", "bottom"
# debug mode: 0 = off, 1 = on; just views some extra information
self.debug = 0
#----------------------------------------------------------------------------------------------------
# default bar colors; only used if barColors isn't set
__colors = ('#0000FF', '#FF0000', '#00E000', '#A0A0FF', '#FFA0A0', '#00A000')
# error messages
__err_type = 'ERROR: Type must be "hBar", "vBar", "pBar", or "fader"'
# CSS names (don't change)
__cssGRAPH = ''
__cssBAR = ''
__cssBARBG = ''
__cssTITLE = ''
__cssLABEL = ''
__cssLABELBG = ''
__cssLEGEND = ''
__cssLEGENDBG = ''
__cssABSVALUES = ''
__cssPERCVALUES = ''
# search pattern for images
__img_pattern = re.compile(r'\.(jpg|jpeg|jpe|gif|png)')
def set_styles(self):
"""set graph styles"""
if self.graphBGColor: self.__cssGRAPH += 'background-color:' + self.graphBGColor + ';'
if self.graphBorder: self.__cssGRAPH += 'border:' + self.graphBorder + ';'
if self.barBorder: self.__cssBAR += 'border:' + self.barBorder + ';'
if self.barBGColor: self.__cssBARBG += 'background-color:' + self.barBGColor + ';'
if self.titleColor: self.__cssTITLE += 'color:' + self.titleColor + ';'
if self.titleBGColor: self.__cssTITLE += 'background-color:' + self.titleBGColor + ';'
if self.titleBorder: self.__cssTITLE += 'border:' + self.titleBorder + ';'
if self.titleFont: self.__cssTITLE += 'font-family:' + self.titleFont + ';'
if self.titleAlign: self.__cssTITLE += 'text-align:' + self.titleAlign + ';'
if self.titleSize: self.__cssTITLE += 'font-size:' + str(self.titleSize) + 'px;'
if self.titleBGColor: self.__cssTITLE += 'background-color:' + self.titleBGColor + ';'
if self.titlePadding: self.__cssTITLE += 'padding:' + str(self.titlePadding) + 'px;'
if self.labelColor: self.__cssLABEL += 'color:' + self.labelColor + ';'
if self.labelBGColor: self.__cssLABEL += 'background-color:' + self.labelBGColor + ';'
if self.labelBorder: self.__cssLABEL += 'border:' + self.labelBorder + ';'
if self.labelFont: self.__cssLABEL += 'font-family:' + self.labelFont + ';'
if self.labelSize: self.__cssLABEL += 'font-size:' + str(self.labelSize) + 'px;'
if self.labelAlign: self.__cssLABEL += 'text-align:' + self.labelAlign + ';'
if self.labelBGColor: self.__cssLABELBG += 'background-color:' + self.labelBGColor + ';'
if self.legendColor: self.__cssLEGEND += 'color:' + self.legendColor + ';'
if self.legendFont: self.__cssLEGEND += 'font-family:' + self.legendFont + ';'
if self.legendSize: self.__cssLEGEND += 'font-size:' + str(self.legendSize) + 'px;'
if self.legendBGColor: self.__cssLEGENDBG += 'background-color:' + self.legendBGColor + ';'
if self.legendBorder: self.__cssLEGENDBG += 'border:' + self.legendBorder + ';'
if self.absValuesColor: self.__cssABSVALUES += 'color:' + self.absValuesColor + ';'
if self.absValuesBGColor: self.__cssABSVALUES += 'background-color:' + self.absValuesBGColor + ';'
if self.absValuesBorder: self.__cssABSVALUES += 'border:' + self.absValuesBorder + ';'
if self.absValuesFont: self.__cssABSVALUES += 'font-family:' + self.absValuesFont + ';'
if self.absValuesSize: self.__cssABSVALUES += 'font-size:' + str(self.absValuesSize) + 'px;'
if self.percValuesColor: self.__cssPERCVALUES += 'color:' + self.percValuesColor + ';'
if self.percValuesFont: self.__cssPERCVALUES += 'font-family:' + self.percValuesFont + ';'
if self.percValuesSize: self.__cssPERCVALUES += 'font-size:' + str(self.percValuesSize) + 'px;'
def level_color(self, value, color):
"""return bar color for each level"""
if self.barLevelColors:
for i in range(0, len(self.barLevelColors), 2):
try:
if (self.barLevelColors[i] > 0 and value >= self.barLevelColors[i]) or \
(self.barLevelColors[i] < 0 and value <= self.barLevelColors[i]):
color = self.barLevelColors[i+1]
except IndexError: pass
return color
def build_bar(self, value, width, height, color):
"""return a single bar"""
title = self.absValuesPrefix + str(value) + self.absValuesSuffix
bg = self.__img_pattern.search(color) and 'background' or 'bgcolor'
bar = '<table border=0 cellspacing=0 cellpadding=0><tr>'
bar += '<td style="' + self.__cssBAR + '" ' + bg + '="' + color + '"'
bar += (value != '') and ' title="' + title + '">' or '>'
bar += '<div style="width:' + str(width) + 'px; height:' + str(height) + 'px;'
bar += ' line-height:1px; font-size:1px;"></div>'
bar += '</td></tr></table>'
return bar
def build_fader(self, value, width, height, x, color):
"""return a single fader"""
fader = '<table border=0 cellspacing=0 cellpadding=0><tr>'
x -= int(round(width / 2))
if x > 0: fader += '<td width=' + str(x) + '></td>'
fader += '<td>' + self.build_bar(value, width, height, color) + '</td>'
fader += '</tr></table>'
return fader
def build_value(self, val, max_dec, sum = 0, align = ''):
"""return a single bar/fader value"""
val = _number_format(val, max_dec)
if sum: sum = _number_format(sum, max_dec)
value = '<td style="' + self.__cssABSVALUES + '"'
if align: value += ' align=' + align
value += ' nowrap>'
value += ' ' + self.absValuesPrefix + str(val) + self.absValuesSuffix
if sum: value += ' / ' + self.absValuesPrefix + str(sum) + self.absValuesSuffix
value += ' </td>'
return value
def build_legend(self, barColors):
"""return the legend"""
if hasattr(self.legend, 'split'): self.legend = self.legend.split(',')
legend = '<table border=0 cellspacing=0 cellpadding=0><tr>'
legend += '<td style="' + self.__cssLEGENDBG + '">'
legend += '<table border=0 cellspacing=4 cellpadding=0>'
i = 0
for color in barColors:
if len(self.legend) >= i+1:
text = hasattr(self.legend[i], 'strip') and self.legend[i].strip() or str(self.legend[i])
else: text = ''
legend += '<tr>'
legend += '<td>' + self.build_bar('', self.barWidth, self.barWidth, color) + '</td>'
legend += '<td style="' + self.__cssLEGEND + '" nowrap>' + text + '</td>'
legend += '</tr>'
i += 1
legend += '</table></td></tr></table>'
return legend
def build_hTitle(self, titleLabel, titleValue, titleBar):
"""return horizontal titles"""
title = '<tr>'
title += '<td style="' + self.__cssTITLE + '">' + titleLabel + '</td>'
if titleValue != '': title += '<td style="' + self.__cssTITLE + '">' + titleValue + '</td>'
title += '<td style="' + self.__cssTITLE + '">' + titleBar + '</td>'
title += '</tr>'
return title
def create_hBar(self, value, percent, mPerc, mPerc_neg, max_neg, mul, valSpace, bColor, border, spacer, spacer_neg):
"""return a single horizontal bar with label and values (abs./perc.)"""
bar = '<table border=0 cellspacing=0 cellpadding=0 height=100%><tr>'
if percent < 0:
percent *= -1
bar += '<td style="' + self.__cssLABELBG + '" height=' + str(self.barWidth) + ' width=' + str(int(round((mPerc_neg - percent) * mul + valSpace))) + ' align=right nowrap>'
if self.showValues < 2: bar += '<span style="' + self.__cssPERCVALUES + '">' + str(_number_format(percent, self.percValuesDecimals)) + '%</span>'
bar += ' </td><td style="' + self.__cssLABELBG + '">'
bar += self.build_bar(value, int(round(percent * mul)), self.barWidth, bColor)
bar += '</td><td width=' + str(spacer) + '></td>'
else:
if max_neg:
bar += '<td style="' + self.__cssLABELBG + '" width=' + str(spacer_neg) + '>'
bar += '<table border=0 cellspacing=0 cellpadding=0><tr><td></td></tr></table></td>'
if percent:
bar += '<td>'
bar += self.build_bar(value, int(round(percent * mul)), self.barWidth, bColor)
bar += '</td>'
else: bar += '<td width=1 height=' + str(self.barWidth + (border * 2)) + '></td>'
bar += '<td style="' + self.__cssPERCVALUES + '" width=' + str(int(round((mPerc - percent) * mul + valSpace))) + ' align=left nowrap>'
if self.showValues < 2: bar += ' ' + str(_number_format(percent, self.percValuesDecimals)) + '%'
bar += ' </td>'
bar += '</tr></table>'
return bar
def create_vBar(self, value, percent, mPerc, mPerc_neg, max_neg, mul, valSpace, bColor, border, spacer, spacer_neg):
"""return a single vertical bar with label and values (abs./perc.)"""
bar = '<table border=0 cellspacing=0 cellpadding=0 width=100%><tr align=center>'
if percent < 0:
percent *= -1
bar += '<td height=' + str(spacer) + '></td></tr><tr align=center valign=top><td style="' + self.__cssLABELBG + '">'
bar += self.build_bar(value, self.barWidth, int(round(percent * mul)), bColor)
bar += '</td></tr><tr align=center valign=top>'
bar += '<td style="' + self.__cssLABELBG + '" height=' + str(int(round((mPerc_neg - percent) * mul + valSpace))) + ' nowrap>'
bar += (self.showValues < 2) and '<span style="' + self.__cssPERCVALUES + '">' + str(_number_format(percent, self.percValuesDecimals)) + '%</span>' or ' '
bar += '</td>'
else:
bar += '<td style="' + self.__cssPERCVALUES + '" valign=bottom height=' + str(int(round((mPerc - percent) * mul + valSpace))) + ' nowrap>'
if self.showValues < 2: bar += str(_number_format(percent, self.percValuesDecimals)) + '%'
bar += '</td>'
if percent:
bar += '</tr><tr align=center valign=bottom><td>'
bar += self.build_bar(value, self.barWidth, int(round(percent * mul)), bColor)
bar += '</td>'
else: bar += '</tr><tr><td width=' + str(self.barWidth + (border * 2)) + ' height=1></td>'
if max_neg:
bar += '</tr><tr><td style="' + self.__cssLABELBG + '" height=' + str(spacer_neg) + '>'
bar += '<table border=0 cellspacing=0 cellpadding=0><tr><td></td></tr></table></td>'
bar += '</tr></table>'
return bar
def create(self):
"""create a complete bar graph (horizontal, vertical, progress, or fader)"""
self.type = self.type.lower()
d = self.values
t = hasattr(self.titles, 'split') and self.titles.split(',') or self.titles
r = hasattr(self.labels, 'split') and self.labels.split(',') or self.labels
drc = hasattr(self.barColors, 'split') and self.barColors.split(',') or self.barColors
val = []
bc = []
if self.barLength < 0.1: self.barLength = 0.1
elif self.barLength > 2.9: self.barLength = 2.9
labels = (len(d) > len(r)) and len(d) or len(r)
if self.type == 'pbar' or self.type == 'fader':
if not self.barBGColor: self.barBGColor = self.labelBGColor
if self.labelBGColor == self.barBGColor and len(t) == 0:
self.labelBGColor = ''
self.labelBorder = ''
self.set_styles()
graph = '<table border=0 cellspacing=0 cellpadding=' + str(self.graphPadding) + '><tr>'
graph += '<td' + (self.__cssGRAPH and ' style="' + self.__cssGRAPH + '"' or '') + '>'
if self.legend and self.type != 'pbar' and self.type != 'fader':
graph += '<table border=0 cellspacing=0 cellpadding=0><tr><td>'
if self.charts > 1:
divide = math.ceil(labels / self.charts)
graph += '<table border=0 cellspacing=0 cellpadding=6><tr valign=top><td>'
else: divide = 0
sum = 0
max = 0
max_neg = 0
max_dec = 0
ccnt = 0
lcnt = 0
chart = 0
for i in range(labels):
if divide and i and not i % divide:
lcnt = 0
chart += 1
try: drv = len(d[i]) and [e for e in d[i]] or [d[i]]
except: drv = [d[i]]
j = 0
dec = 0
if len(val) <= chart: val.append([])
for v in drv:
s = str(v)
if s.find('.') != -1:
dec = len(s[s.find('.') + 1:])
if dec > max_dec: max_dec = dec
if len(val[chart]) <= lcnt: val[chart].append([])
val[chart][lcnt].append(v)
if v != 0: v -= self.baseValue
if v > max: max = v
elif v < max_neg: max_neg = v
if v < 0: v *= -1
sum += v
if len(bc) <= j:
if ccnt >= len(self.__colors): ccnt = 0
if len(drc) <= j or len(drc[j]) < 3:
bc.append(self.__colors[ccnt])
ccnt += 1
else: bc.append(drc[j].strip())
j += 1
lcnt += 1
border = int(self.barBorder[0])
mPerc = sum and int(round(max * 100.0 / sum)) or 0
if self.type == 'pbar' or self.type == 'fader': mul = 2
else: mul = mPerc and 100.0 / mPerc or 1
mul *= self.barLength
if self.showValues < 2:
if self.type == 'hbar':
valSpace = (self.percValuesDecimals * (self.percValuesSize / 1.6)) + (self.percValuesSize * 3.2)
else: valSpace = self.percValuesSize * 1.2
else: valSpace = self.percValuesSize
spacer = maxSize = int(round(mPerc * mul + valSpace + border * 2))
if max_neg:
mPerc_neg = sum and int(round(-max_neg * 100.0 / sum)) or 0
if mPerc_neg > mPerc and self.type != 'pbar' and self.type != 'fader':
mul = 100.0 / mPerc_neg * self.barLength
spacer_neg = int(round(mPerc_neg * mul + valSpace + border * 2))
maxSize += spacer_neg
else: mPerc_neg = spacer_neg = 0
titleLabel = ''
titleValue = ''
titleBar = ''
if len(t) > 0:
titleLabel = (t[0] == '') and ' ' or t[0]
if self.showValues == 1 or self.showValues == 2:
titleValue = (t[1] == '') and ' ' or t[1]
titleBar = (t[2] == '') and ' ' or t[2]
else: titleBar = (t[1] == '') and ' ' or t[1]
chart = 0
lcnt = 0
for v in val:
graph += '<table border=0 cellspacing=2 cellpadding=0>'
if self.type == 'hbar':
if len(t) > 0: graph += self.build_hTitle(titleLabel, titleValue, titleBar)
for i in range(len(v)):
label = (lcnt < len(r)) and r[lcnt].strip() or str(lcnt + 1)
rowspan = len(v[i])
graph += '<tr><td style="' + self.__cssLABEL + '"' + ((rowspan > 1) and ' rowspan=' + str(rowspan) or '') + '>'
graph += ' ' + label + ' </td>'
for j in range(len(v[i])):
value = v[i][j] and v[i][j] - self.baseValue or 0
percent = sum and value * 100.0 / sum or 0
value = _number_format(v[i][j], max_dec)
bColor = self.level_color(v[i][j], bc[j])
if self.showValues == 1 or self.showValues == 2:
graph += self.build_value(v[i][j], max_dec, 0, 'right')
graph += '<td' + (self.__cssBARBG and ' style="' + self.__cssBARBG + '"' or '') + ' height=100% width=' + str(maxSize) + '>'
graph += self.create_hBar(value, percent, mPerc, mPerc_neg, max_neg, mul, valSpace, bColor, border, spacer, spacer_neg)
graph += '</td></tr>'
if j < len(v[i]) - 1: graph += '<tr>'
if self.labelSpace and i < len(v)-1: graph += '<tr><td colspan=3 height=' + str(self.labelSpace) + '></td></tr>'
lcnt += 1
elif self.type == 'vbar':
graph += '<tr align=center valign=bottom>'
if titleBar != '':
titleBar = titleBar.replace('-', '-<br>')
graph += '<td style="' + self.__cssTITLE + '" valign=middle>' + titleBar + '</td>'
for i in range(len(v)):
for j in range(len(v[i])):
value = v[i][j] and v[i][j] - self.baseValue or 0
percent = sum and value * 100.0 / sum or 0
value = _number_format(v[i][j], max_dec)
bColor = self.level_color(v[i][j], bc[j])
graph += '<td' + (self.__cssBARBG and ' style="' + self.__cssBARBG + '"' or '') + '>'
graph += self.create_vBar(value, percent, mPerc, mPerc_neg, max_neg, mul, valSpace, bColor, border, spacer, spacer_neg)
graph += '</td>'
if self.labelSpace: graph += '<td width=' + str(self.labelSpace) + '></td>'
if self.showValues == 1 or self.showValues == 2:
graph += '</tr><tr align=center>'
if titleValue != '': graph += '<td style="' + self.__cssTITLE + '">' + titleValue + '</td>'
for i in range(len(v)):
for j in range(len(v[i])):
graph += self.build_value(v[i][j], max_dec)
if self.labelSpace: graph += '<td width=' + str(self.labelSpace) + '></td>'
graph += '</tr><tr>'
if titleLabel != '': graph += '<td style="' + self.__cssTITLE + '">' + titleLabel + '</td>'
for i in range(len(v)):
label = (lcnt < len(r)) and r[lcnt].strip() or str(lcnt + 1)
colspan = len(v[i])
graph += '<td style="' + self.__cssLABEL + '"' + ((colspan > 1) and ' colspan=' + str(colspan) or '') + '>'
graph += ' ' + label + ' </td>'
if self.labelSpace: graph += '<td width=' + str(self.labelSpace) + '></td>'
lcnt += 1
graph += '</tr>'
elif self.type == 'pbar' or self.type == 'fader':
if len(t) > 0: graph += self.build_hTitle(titleLabel, titleValue, titleBar)
for i in range(len(v)):
try: m = (len(v[i]) > 1) and True or False
except: m = False
if m or not i:
label = (lcnt < len(r)) and r[lcnt].strip() or str(i + 1)
graph += '<tr>'
if len(r):
graph += '<td style="' + self.__cssLABEL + '">'
graph += ' ' + label + ' </td>'
try: sum = v[i][1] and v[i][1] or v[-1][0]
except: sum = v[-1][0]
percent = sum and v[i][0] * 100.0 / sum or 0
value = _number_format(v[i][0], max_dec)
if self.showValues == 1 or self.showValues == 2:
graph += self.build_value(v[i][0], max_dec, sum, 'right')
graph += '<td' + (self.__cssBARBG and ' style="' + self.__cssBARBG + '"' or '') + '>'
self.barColors = (len(drc) >= i+1) and drc[i].strip() or self.__colors[0]
bColor = self.level_color(v[i][0], self.barColors)
graph += '<table border=0 cellspacing=0 cellpadding=0><tr><td>'
if self.type == 'fader':
graph += self.build_fader(value, int(round(self.barWidth / 2)), self.barWidth, int(round(percent * mul)), bColor)
else: graph += self.build_bar(value, int(round(percent * mul)), self.barWidth, bColor)
graph += '</td><td width=' + str(int(round((100 - percent) * mul))) + '></td>'
graph += '</tr></table></td>'
if self.showValues < 2: graph += '<td style="' + self.__cssPERCVALUES + '" nowrap> ' + str(_number_format(percent, self.percValuesDecimals)) + '%</td>'
graph += '</tr>'
if self.labelSpace and i < len(v)-1: graph += '<td colspan=3 height=' + str(self.labelSpace) + '></td>'
lcnt += 1
else: graph += '<tr><td>' + self.__err_type + '</td></tr>'
graph += '</table>'
if chart < self.charts - 1 and len(val[chart+1]):
graph += '</td>'
if self.type == 'vbar': graph += '</tr><tr valign=top>'
graph += '<td>'
chart += 1
if self.charts > 1: graph += '</td></tr></table>'
if self.legend and self.type != 'pbar' and self.type != 'fader':
graph += '</td><td width=10> </td><td' + (self.legendAlign and ' valign=' + self.legendAlign or '') + '>'
graph += self.build_legend(bc)
graph += '</td></tr></table>'
if self.debug:
graph += "<br>sum=%s max=%s max_neg=%s max_dec=%s " % (sum, max, max_neg, max_dec)
graph += "mPerc=%s mPerc_neg=%s mul=%s valSpace=%s" % (mPerc, mPerc_neg, mul, valSpace)
graph += '</td></tr></table>'
return graph
def _number_format(val, dec):
"""return float with dec decimals; if dec is 0, return integer"""
return dec and ('%.' + str(dec) + 'f') % val or int(round(val))
if __name__ == '__main__':
print __doc__
| gpl-2.0 | -6,549,902,497,065,745,000 | 51.690037 | 182 | 0.493837 | false |
adamchainz/ansible | test/units/plugins/strategy/test_strategy_base.py | 69 | 21292 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.mock.loader import DictDataLoader
import uuid
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.module_utils.six.moves import queue as Queue
from ansible.playbook.block import Block
from ansible.playbook.handler import Handler
from ansible.plugins.strategy import StrategyBase
class TestStrategyBase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_strategy_base_init(self):
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_tqm = MagicMock(TaskQueueManager)
mock_tqm._final_q = mock_queue
mock_tqm._options = MagicMock()
mock_tqm._notified_handlers = {}
mock_tqm._listening_handlers = {}
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base.cleanup()
def test_strategy_base_run(self):
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_tqm = MagicMock(TaskQueueManager)
mock_tqm._final_q = mock_queue
mock_tqm._stats = MagicMock()
mock_tqm._notified_handlers = {}
mock_tqm._listening_handlers = {}
mock_tqm.send_callback.return_value = None
for attr in ('RUN_OK', 'RUN_ERROR', 'RUN_FAILED_HOSTS', 'RUN_UNREACHABLE_HOSTS'):
setattr(mock_tqm, attr, getattr(TaskQueueManager, attr))
mock_iterator = MagicMock()
mock_iterator._play = MagicMock()
mock_iterator._play.handlers = []
mock_play_context = MagicMock()
mock_tqm._failed_hosts = dict()
mock_tqm._unreachable_hosts = dict()
mock_tqm._options = MagicMock()
mock_tqm._notified_handlers = {}
mock_tqm._listening_handlers = {}
strategy_base = StrategyBase(tqm=mock_tqm)
mock_host = MagicMock()
mock_host.name = 'host1'
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context), mock_tqm.RUN_OK)
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=TaskQueueManager.RUN_ERROR), mock_tqm.RUN_ERROR)
mock_tqm._failed_hosts = dict(host1=True)
mock_iterator.get_failed_hosts.return_value = [mock_host]
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), mock_tqm.RUN_FAILED_HOSTS)
mock_tqm._unreachable_hosts = dict(host1=True)
mock_iterator.get_failed_hosts.return_value = []
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), mock_tqm.RUN_UNREACHABLE_HOSTS)
strategy_base.cleanup()
def test_strategy_base_get_hosts(self):
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_hosts = []
for i in range(0, 5):
mock_host = MagicMock()
mock_host.name = "host%02d" % (i + 1)
mock_host.has_hostkey = True
mock_hosts.append(mock_host)
mock_inventory = MagicMock()
mock_inventory.get_hosts.return_value = mock_hosts
mock_tqm = MagicMock()
mock_tqm._final_q = mock_queue
mock_tqm._notified_handlers = {}
mock_tqm._listening_handlers = {}
mock_tqm.get_inventory.return_value = mock_inventory
mock_play = MagicMock()
mock_play.hosts = ["host%02d" % (i + 1) for i in range(0, 5)]
strategy_base = StrategyBase(tqm=mock_tqm)
mock_tqm._failed_hosts = []
mock_tqm._unreachable_hosts = []
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts)
mock_tqm._failed_hosts = ["host01"]
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[1:])
self.assertEqual(strategy_base.get_failed_hosts(play=mock_play), [mock_hosts[0]])
mock_tqm._unreachable_hosts = ["host02"]
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[2:])
strategy_base.cleanup()
@patch.object(WorkerProcess, 'run')
def test_strategy_base_queue_task(self, mock_worker):
def fake_run(self):
return
mock_worker.run.side_effect = fake_run
fake_loader = DictDataLoader()
mock_var_manager = MagicMock()
mock_host = MagicMock()
mock_host.get_vars.return_value = dict()
mock_host.has_hostkey = True
mock_inventory = MagicMock()
mock_inventory.get.return_value = mock_host
mock_options = MagicMock()
mock_options.module_path = None
tqm = TaskQueueManager(
inventory=mock_inventory,
variable_manager=mock_var_manager,
loader=fake_loader,
options=mock_options,
passwords=None,
)
tqm._initialize_processes(3)
tqm.hostvars = dict()
try:
strategy_base = StrategyBase(tqm=tqm)
strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
self.assertEqual(strategy_base._cur_worker, 1)
self.assertEqual(strategy_base._pending_results, 1)
strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
self.assertEqual(strategy_base._cur_worker, 2)
self.assertEqual(strategy_base._pending_results, 2)
strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
self.assertEqual(strategy_base._cur_worker, 0)
self.assertEqual(strategy_base._pending_results, 3)
finally:
tqm.cleanup()
def test_strategy_base_process_pending_results(self):
mock_tqm = MagicMock()
mock_tqm._terminated = False
mock_tqm._failed_hosts = dict()
mock_tqm._unreachable_hosts = dict()
mock_tqm.send_callback.return_value = None
mock_tqm._notified_handlers = {}
mock_tqm._listening_handlers = {}
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_tqm._final_q = mock_queue
mock_tqm._stats = MagicMock()
mock_tqm._stats.increment.return_value = None
mock_play = MagicMock()
mock_host = MagicMock()
mock_host.name = 'test01'
mock_host.vars = dict()
mock_host.get_vars.return_value = dict()
mock_host.has_hostkey = True
mock_task = MagicMock()
mock_task._role = None
mock_task._parent = None
mock_task.ignore_errors = False
mock_task._uuid = uuid.uuid4()
mock_task.loop = None
mock_task.copy.return_value = mock_task
mock_handler_task = MagicMock(Handler)
mock_handler_task.name = 'test handler'
mock_handler_task.action = 'foo'
mock_handler_task._parent = None
mock_handler_task.get_name.return_value = "test handler"
mock_handler_task.has_triggered.return_value = False
mock_handler_task._uuid = 'xxxxxxxxxxxxx'
mock_handler_task.copy.return_value = mock_handler_task
mock_iterator = MagicMock()
mock_iterator._play = mock_play
mock_iterator.mark_host_failed.return_value = None
mock_iterator.get_next_task_for_host.return_value = (None, None)
mock_iterator.get_original_task.return_value = mock_task
mock_handler_block = MagicMock()
mock_handler_block.block = [mock_handler_task]
mock_handler_block.rescue = []
mock_handler_block.always = []
mock_play.handlers = [mock_handler_block]
mock_tqm._notified_handlers = {mock_handler_task._uuid: []}
mock_tqm._listening_handlers = {}
mock_group = MagicMock()
mock_group.add_host.return_value = None
def _get_host(host_name):
if host_name == 'test01':
return mock_host
return None
def _get_group(group_name):
if group_name in ('all', 'foo'):
return mock_group
return None
mock_inventory = MagicMock()
mock_inventory._hosts_cache = dict()
mock_inventory.hosts.return_value = mock_host
mock_inventory.get_host.side_effect = _get_host
mock_inventory.get_group.side_effect = _get_group
mock_inventory.clear_pattern_cache.return_value = None
mock_inventory.get_host_vars.return_value = {}
mock_inventory.hosts.get.return_value = mock_host
mock_var_mgr = MagicMock()
mock_var_mgr.set_host_variable.return_value = None
mock_var_mgr.set_host_facts.return_value = None
mock_var_mgr.get_vars.return_value = dict()
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._inventory = mock_inventory
strategy_base._variable_manager = mock_var_mgr
strategy_base._blocked_hosts = dict()
def _has_dead_workers():
return False
strategy_base._tqm.has_dead_workers.side_effect = _has_dead_workers
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True))
queue_items.append(task_result)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"failed":true}')
queue_items.append(task_result)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
mock_iterator.is_failed.return_value = True
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
# self.assertIn('test01', mock_tqm._failed_hosts)
# del mock_tqm._failed_hosts['test01']
mock_iterator.is_failed.return_value = False
task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"unreachable": true}')
queue_items.append(task_result)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
self.assertIn('test01', mock_tqm._unreachable_hosts)
del mock_tqm._unreachable_hosts['test01']
task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"skipped": true}')
queue_items.append(task_result)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_host=dict(host_name='newhost01', new_groups=['foo']))))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_group=dict(group_name='foo'))))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True, _ansible_notify=['test handler'])))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
self.assertIn(mock_handler_task._uuid, strategy_base._notified_handlers)
self.assertIn(mock_host, strategy_base._notified_handlers[mock_handler_task._uuid])
# queue_items.append(('set_host_var', mock_host, mock_task, None, 'foo', 'bar'))
# results = strategy_base._process_pending_results(iterator=mock_iterator)
# self.assertEqual(len(results), 0)
# self.assertEqual(strategy_base._pending_results, 1)
# queue_items.append(('set_host_facts', mock_host, mock_task, None, 'foo', dict()))
# results = strategy_base._process_pending_results(iterator=mock_iterator)
# self.assertEqual(len(results), 0)
# self.assertEqual(strategy_base._pending_results, 1)
# queue_items.append(('bad'))
# self.assertRaises(AnsibleError, strategy_base._process_pending_results, iterator=mock_iterator)
strategy_base.cleanup()
def test_strategy_base_load_included_file(self):
fake_loader = DictDataLoader({
"test.yml": """
- debug: msg='foo'
""",
"bad.yml": """
""",
})
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_tqm = MagicMock()
mock_tqm._final_q = mock_queue
mock_tqm._notified_handlers = {}
mock_tqm._listening_handlers = {}
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._loader = fake_loader
strategy_base.cleanup()
mock_play = MagicMock()
mock_block = MagicMock()
mock_block._play = mock_play
mock_block.vars = dict()
mock_task = MagicMock()
mock_task._block = mock_block
mock_task._role = None
mock_task._parent = None
mock_iterator = MagicMock()
mock_iterator.mark_host_failed.return_value = None
mock_inc_file = MagicMock()
mock_inc_file._task = mock_task
mock_inc_file._filename = "test.yml"
res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
mock_inc_file._filename = "bad.yml"
res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
self.assertEqual(res, [])
@patch.object(WorkerProcess, 'run')
def test_strategy_base_run_handlers(self, mock_worker):
def fake_run(*args):
return
mock_worker.side_effect = fake_run
mock_play_context = MagicMock()
mock_handler_task = MagicMock(Handler)
mock_handler_task.action = 'foo'
mock_handler_task.get_name.return_value = "test handler"
mock_handler_task.has_triggered.return_value = False
mock_handler_task.listen = None
mock_handler_task._role = None
mock_handler_task._parent = None
mock_handler_task._uuid = 'xxxxxxxxxxxxxxxx'
mock_handler = MagicMock()
mock_handler.block = [mock_handler_task]
mock_handler.flag_for_host.return_value = False
mock_play = MagicMock()
mock_play.handlers = [mock_handler]
mock_host = MagicMock(Host)
mock_host.name = "test01"
mock_host.has_hostkey = True
mock_inventory = MagicMock()
mock_inventory.get_hosts.return_value = [mock_host]
mock_inventory.get.return_value = mock_host
mock_var_mgr = MagicMock()
mock_var_mgr.get_vars.return_value = dict()
mock_iterator = MagicMock()
mock_iterator._play = mock_play
mock_iterator.get_original_task.return_value = mock_handler_task
fake_loader = DictDataLoader()
mock_options = MagicMock()
mock_options.module_path = None
tqm = TaskQueueManager(
inventory=mock_inventory,
variable_manager=mock_var_mgr,
loader=fake_loader,
options=mock_options,
passwords=None,
)
tqm._initialize_processes(3)
tqm._initialize_notified_handlers(mock_play)
tqm.hostvars = dict()
try:
strategy_base = StrategyBase(tqm=tqm)
strategy_base._inventory = mock_inventory
strategy_base._notified_handlers = {mock_handler_task._uuid: [mock_host]}
task_result = TaskResult(Host('host01'), Handler(), dict(changed=False))
tqm._final_q.put(task_result)
result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context)
finally:
strategy_base.cleanup()
tqm.cleanup()
| gpl-3.0 | 4,507,831,916,795,941,400 | 37.925046 | 157 | 0.626198 | false |
carpyncho/feets | doc/source/JSAnimation/examples.py | 4 | 3126 | import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
from JSAnimation import IPython_display
def basic_animation(frames=100, interval=30):
"""Plot a basic sine wave with oscillating amplitude"""
fig = plt.figure()
ax = plt.axes(xlim=(0, 10), ylim=(-2, 2))
line, = ax.plot([], [], lw=2)
x = np.linspace(0, 10, 1000)
def init():
line.set_data([], [])
return line,
def animate(i):
y = np.cos(i * 0.02 * np.pi) * np.sin(x - i * 0.02 * np.pi)
line.set_data(x, y)
return line,
return animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=interval)
def lorenz_animation(N_trajectories=20, rseed=1, frames=200, interval=30):
"""Plot a 3D visualization of the dynamics of the Lorenz system"""
from scipy import integrate
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import cnames
def lorentz_deriv(coords, t0, sigma=10., beta=8./3, rho=28.0):
"""Compute the time-derivative of a Lorentz system."""
x, y, z = coords
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
# Choose random starting points, uniformly distributed from -15 to 15
np.random.seed(rseed)
x0 = -15 + 30 * np.random.random((N_trajectories, 3))
# Solve for the trajectories
t = np.linspace(0, 2, 500)
x_t = np.asarray([integrate.odeint(lorentz_deriv, x0i, t)
for x0i in x0])
# Set up figure & 3D axis for animation
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection='3d')
ax.axis('off')
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, N_trajectories))
# set up lines and points
lines = sum([ax.plot([], [], [], '-', c=c)
for c in colors], [])
pts = sum([ax.plot([], [], [], 'o', c=c, ms=4)
for c in colors], [])
# prepare the axes limits
ax.set_xlim((-25, 25))
ax.set_ylim((-35, 35))
ax.set_zlim((5, 55))
# set point-of-view: specified by (altitude degrees, azimuth degrees)
ax.view_init(30, 0)
# initialization function: plot the background of each frame
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return lines + pts
# animation function: called sequentially
def animate(i):
# we'll step two time-steps per frame. This leads to nice results.
i = (2 * i) % x_t.shape[1]
for line, pt, xi in zip(lines, pts, x_t):
x, y, z = xi[:i + 1].T
line.set_data(x, y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return lines + pts
return animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=interval)
| mit | -6,624,626,424,762,738,000 | 31.226804 | 75 | 0.564939 | false |
welliam/data-structures | src/test_traversal.py | 1 | 5030 | '''Test traversal of adjacency_list.py.'''
import pytest
from .graph import Graph
@pytest.fixture
def self_looped():
g = Graph()
g.add_edge('a', 'a', 0)
return g
# The following fixtures for graphs have two associated variables:
# <NAME>_DEPTH and <NAME>_BREADTH
# which are lists of tuples of two values. The second of which must
# come after the first when the test is being run with the proper
# traversal method for the variable started at 'a'
def comesbefore(t, a, b):
"""Used in testing traversal methods.
Because which branch we traverse first is not guaranteed (or
relevant), we can't test simple equality on the output of
traversal methods-- this method is used instead."""
return b in t[t.index(a):]
@pytest.fixture
def simple():
"""A simple, non-looped graph."""
g = Graph()
g.add_edge('a', 'b', 0)
g.add_edge('b', 'c', 0)
g.add_edge('b', 'd', 0)
return g
SIMPLE_DEPTH = [('a', 'b'), ('b', 'c'), ('b', 'd')]
SIMPLE_BREADTH = SIMPLE_DEPTH # same in this case
@pytest.fixture
def complex():
"""A graph with a non-self referential loop."""
g = Graph()
g.add_edge('a', 'b', 0)
g.add_edge('b', 'c', 0)
g.add_edge('c', 'a', 0)
g.add_edge('a', 'dead end', 0)
return g
COMPLEX_DEPTH = [('a', 'b'), ('b', 'c'), ('a', 'dead end')]
COMPLEX_BREADTH = [('a', 'b'), ('b', 'c'), ('dead end', 'c')]
@pytest.fixture
def complex_2():
"""A complex graph with multiple loops."""
g = Graph()
g.add_edge('a', 'b', 0)
g.add_edge('b', 'c', 0)
g.add_edge('c', 'a', 0)
g.add_edge('c', 'b', 0)
g.add_edge('a', 'dead end', 0)
return g
# the same variables as for complex are relevant
@pytest.fixture
def tree():
"""A graph which resembles a binary tree."""
g = Graph()
g.add_edge('0-0', '1-0', 0)
g.add_edge('0-0', '1-1', 0)
g.add_edge('1-0', '2-0', 0)
g.add_edge('1-0', '2-1', 0)
g.add_edge('1-1', '2-2', 0)
g.add_edge('1-1', '2-3', 0)
return g
TREE_DEPTH = [
('0-0', '1-0'),
('1-0', '2-0'),
('1-0', '2-1'),
('0-0', '1-1'),
('1-1', '2-2'),
('1-1', '2-3')
]
TREE_BREADTH = [
('0-0', '1-0'),
('0-0', '1-1'),
('1-0', '2-0'),
('1-0', '2-1'),
('1-0', '2-2'),
('1-0', '2-3'),
('1-1', '2-0'),
('1-1', '2-1'),
('1-1', '2-2'),
('1-1', '2-3')
]
# depth first
def test_depth_traversal_empty(self_looped):
"""Test that depth first traversal throws error on an absent node."""
with pytest.raises(KeyError):
self_looped.depth_first_traversal('b')
def test_depth_traversal_self_looped(self_looped):
"""Test that depth first traversal is traversing at all."""
assert self_looped.depth_first_traversal('a') == ['a']
@pytest.mark.parametrize('a, b', SIMPLE_DEPTH)
def test_depth_traversal_simple(simple, a, b):
"""Test that depth first traverses a nonlooped graph."""
assert comesbefore(simple.depth_first_traversal('a'), a, b)
@pytest.mark.parametrize('a, b', COMPLEX_DEPTH)
def test_depth_traversal_complex(complex, a, b):
"""Test that depth first traverses a more complex looped graph."""
assert comesbefore(complex.depth_first_traversal('a'), a, b)
@pytest.mark.parametrize('a, b', COMPLEX_DEPTH)
def test_depth_traversal_complex_2(complex_2, a, b):
"""Test that depth first traverses an even more complex graph."""
assert comesbefore(complex_2.depth_first_traversal('a'), a, b)
@pytest.mark.parametrize('a, b', TREE_DEPTH)
def test_depth_traversal_tree(tree, a, b):
"""Test that depth first traverses an even more complex graph."""
assert comesbefore(tree.depth_first_traversal('0-0'), a, b)
# breadth first
def test_breadth_traversal_empty(self_looped):
"""Test that breadth first traversal throws error on an absent node."""
with pytest.raises(KeyError):
self_looped.breadth_first_traversal('b')
def test_breadth_traversal_self_looped(self_looped):
"""Test that breadth first traversal is traversing at all."""
assert self_looped.breadth_first_traversal('a') == ['a']
@pytest.mark.parametrize('a, b', SIMPLE_BREADTH)
def test_breadth_traversal_simple(simple, a, b):
"""Test that breadth first traverses a nonlooped graph."""
assert comesbefore(simple.breadth_first_traversal('a'), a, b)
@pytest.mark.parametrize('a, b', COMPLEX_BREADTH)
def test_breadth_traversal_complex(complex, a, b):
"""Test that breadth first traverses a more complex looped graph."""
assert comesbefore(complex.breadth_first_traversal('a'), a, b)
@pytest.mark.parametrize('a, b', COMPLEX_BREADTH)
def test_breadth_traversal_complex_2(complex_2, a, b):
"""Test that breadth first traverses an even more complex graph."""
assert comesbefore(complex_2.breadth_first_traversal('a'), a, b)
@pytest.mark.parametrize('a, b', TREE_BREADTH)
def test_breadth_traversal_tree(tree, a, b):
"""Test that breadth first traverses an even more complex graph."""
assert comesbefore(tree.breadth_first_traversal('0-0'), a, b)
| mit | 2,107,962,297,825,124,900 | 27.258427 | 75 | 0.622068 | false |
seanaedmiston/gensim | gensim/corpora/lowcorpus.py | 73 | 7185 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Corpus in GibbsLda++ format of List-Of-Words.
"""
from __future__ import with_statement
import logging
from gensim import utils
from gensim.corpora import IndexedCorpus
from six import iteritems, iterkeys
from six.moves import xrange, zip as izip
logger = logging.getLogger('gensim.corpora.lowcorpus')
def split_on_space(s):
return [word for word in utils.to_unicode(s).strip().split(' ') if word]
class LowCorpus(IndexedCorpus):
"""
List_Of_Words corpus handles input in GibbsLda++ format.
Quoting http://gibbslda.sourceforge.net/#3.2_Input_Data_Format::
Both data for training/estimating the model and new data (i.e., previously
unseen data) have the same format as follows:
[M]
[document1]
[document2]
...
[documentM]
in which the first line is the total number for documents [M]. Each line
after that is one document. [documenti] is the ith document of the dataset
that consists of a list of Ni words/terms.
[documenti] = [wordi1] [wordi2] ... [wordiNi]
in which all [wordij] (i=1..M, j=1..Ni) are text strings and they are separated
by the blank character.
"""
def __init__(self, fname, id2word=None, line2words=split_on_space):
"""
Initialize the corpus from a file.
`id2word` and `line2words` are optional parameters.
If provided, `id2word` is a dictionary mapping between word_ids (integers)
and words (strings). If not provided, the mapping is constructed from
the documents.
`line2words` is a function which converts lines into tokens. Defaults to
simple splitting on spaces.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s" % fname)
self.fname = fname # input file, see class doc for format
self.line2words = line2words # how to translate lines into words (simply split on space by default)
self.num_docs = self._calculate_num_docs()
if not id2word:
# build a list of all word types in the corpus (distinct words)
logger.info("extracting vocabulary from the corpus")
all_terms = set()
self.use_wordids = False # return documents as (word, wordCount) 2-tuples
for doc in self:
all_terms.update(word for word, wordCnt in doc)
all_terms = sorted(all_terms) # sort the list of all words; rank in that list = word's integer id
self.id2word = dict(izip(xrange(len(all_terms)), all_terms)) # build a mapping of word id(int) -> word (string)
else:
logger.info("using provided word mapping (%i ids)" % len(id2word))
self.id2word = id2word
self.num_terms = len(self.word2id)
self.use_wordids = True # return documents as (wordIndex, wordCount) 2-tuples
logger.info("loaded corpus with %i documents and %i terms from %s" %
(self.num_docs, self.num_terms, fname))
def _calculate_num_docs(self):
# the first line in input data is the number of documents (integer). throws exception on bad input.
with utils.smart_open(self.fname) as fin:
try:
result = int(next(fin))
except StopIteration:
result = 0
return result
def __len__(self):
return self.num_docs
def line2doc(self, line):
words = self.line2words(line)
if self.use_wordids:
# get all distinct terms in this document, ignore unknown words
uniq_words = set(words).intersection(iterkeys(self.word2id))
# the following creates a unique list of words *in the same order*
# as they were in the input. when iterating over the documents,
# the (word, count) pairs will appear in the same order as they
# were in the input (bar duplicates), which looks better.
# if this was not needed, we might as well have used useWords = set(words)
use_words, marker = [], set()
for word in words:
if (word in uniq_words) and (word not in marker):
use_words.append(word)
marker.add(word)
# construct a list of (wordIndex, wordFrequency) 2-tuples
doc = list(zip(map(self.word2id.get, use_words),
map(words.count, use_words)))
else:
uniq_words = set(words)
# construct a list of (word, wordFrequency) 2-tuples
doc = list(zip(uniq_words, map(words.count, uniq_words)))
# return the document, then forget it and move on to the next one
# note that this way, only one doc is stored in memory at a time, not the whole corpus
return doc
def __iter__(self):
"""
Iterate over the corpus, returning one bag-of-words vector at a time.
"""
with utils.smart_open(self.fname) as fin:
for lineno, line in enumerate(fin):
if lineno > 0: # ignore the first line = number of documents
yield self.line2doc(line)
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""
Save a corpus in the List-of-words format.
This function is automatically called by `LowCorpus.serialize`; don't
call it directly, call `serialize` instead.
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
logger.info("storing corpus in List-Of-Words format into %s" % fname)
truncated = 0
offsets = []
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8('%i\n' % len(corpus)))
for doc in corpus:
words = []
for wordid, value in doc:
if abs(int(value) - value) > 1e-6:
truncated += 1
words.extend([utils.to_unicode(id2word[wordid])] * int(value))
offsets.append(fout.tell())
fout.write(utils.to_utf8('%s\n' % ' '.join(words)))
if truncated:
logger.warning("List-of-words format can only save vectors with "
"integer elements; %i float entries were truncated to integer value" %
truncated)
return offsets
def docbyoffset(self, offset):
"""
Return the document stored at file position `offset`.
"""
with utils.smart_open(self.fname) as f:
f.seek(offset)
return self.line2doc(f.readline())
@property
def id2word(self):
return self._id2word
@id2word.setter
def id2word(self, val):
self._id2word = val
self.word2id = dict((v, k) for k, v in iteritems(val))
# endclass LowCorpus
| gpl-3.0 | 1,105,859,564,387,707,900 | 36.815789 | 123 | 0.597912 | false |
BeATz-UnKNoWN/python-for-android | python3-alpha/python3-src/Lib/abc.py | 52 | 7650 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
from _weakrefset import WeakSet
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractclassmethod(classmethod):
"""A decorator indicating abstract classmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractclassmethod
def my_abstract_classmethod(cls, ...):
...
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractstaticmethod(staticmethod):
"""A decorator indicating abstract staticmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractstaticmethod
def my_abstract_staticmethod(...):
...
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C(metaclass=ABCMeta):
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = {name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False)}
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(subclass, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file)
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print("%s: %r" % (name, value), file=file)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking
subclass = instance.__class__
if subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is subclass:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subclass in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subclass)
return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
| apache-2.0 | 1,353,834,054,705,804,000 | 35.255924 | 79 | 0.612026 | false |
phillxnet/rockstor-core | src/rockstor/storageadmin/models/user.py | 2 | 3142 | """
Copyright (c) 2012-2021 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import grp
import chardet
from django.conf import settings
from django.contrib.auth.models import User as DjangoUser
from django.core.validators import validate_email
from django.db import models
from storageadmin.models import Group
from system.users import ifp_get_groupname
class User(models.Model):
user = models.OneToOneField(DjangoUser, null=True, blank=True, related_name="suser")
username = models.CharField(max_length=4096, unique=True, default="")
uid = models.IntegerField(default=settings.START_UID)
gid = models.IntegerField(default=settings.START_UID)
public_key = models.CharField(max_length=4096, null=True, blank=True)
smb_shares = models.ManyToManyField("SambaShare", related_name="admin_users")
shell = models.CharField(max_length=1024, null=True)
homedir = models.CharField(max_length=1024, null=True)
email = models.CharField(
max_length=1024, null=True, blank=True, validators=[validate_email]
)
# 'admin' field represents indicator of Rockstor web admin capability.
admin = models.BooleanField(default=True)
group = models.ForeignKey(Group, null=True, blank=True)
@property
def groupname(self, *args, **kwargs):
if self.group is not None:
return self.group.groupname
if self.gid is not None:
try:
groupname = grp.getgrgid(self.gid).gr_name
charset = chardet.detect(groupname)
groupname = groupname.decode(charset["encoding"])
return groupname
except Exception:
# Failed to fetch user using grp, so let's try with infofipe
return ifp_get_groupname(self.gid)
return None
@property
def managed_user(self, *args, **kwargs):
return getattr(self, "editable", True)
@managed_user.setter
def managed_user(self, val, *args, **kwargs):
self.editable = val
@property
def has_pincard(self, *args, **kwargs):
return getattr(self, "pincard_exist", False)
@has_pincard.setter
def has_pincard(self, val, *args, **kwargs):
self.pincard_exist = val
@property
def pincard_allowed(self, *args, **kwargs):
return getattr(self, "pincard_enabled", "no")
@pincard_allowed.setter
def pincard_allowed(self, val, *args, **kwargs):
self.pincard_enabled = val
class Meta:
app_label = "storageadmin"
| gpl-3.0 | 7,771,979,358,606,026,000 | 35.114943 | 88 | 0.690006 | false |
juniorh/dummyDbGen | postgresql/genDocDb.postgres.testData.py | 1 | 3203 | #!/usr/bin/env python
# How to use
# >python genDocDb.postgres.py -h localhost -P 5432 -u username -p password -d database -t table -n 1000 -r 1
# library:
# pip install psycopg2
import argparse
import psycopg2
import random
import math
import time
import sys
def get_args_parser():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"-h", "--host",
default="localhost",
nargs='?',
type=str,
help="Connect to host.")
parser.add_argument(
"-P", "--port",
default=5432,
nargs='?',
type=int,
help="Port number to use for connection.")
parser.add_argument(
"-u", "--username",
default=None,
nargs='?',
type=str,
help="Username for login to server.")
parser.add_argument(
"-p", "--password",
default=None,
nargs='?',
type=str,
help="Password for login to server.")
parser.add_argument(
"-d", "--database",
default=None,
nargs='?',
type=str,
help="Select database.")
parser.add_argument(
"-t", "--table",
default=None,
nargs='?',
type=str,
help="Select table")
parser.add_argument(
"-r", "--report",
default=0,
nargs='?',
type=int,
help="Print report every r second")
parser.add_argument(
"-i", "--input",
default=None,
nargs='?',
type=str,
help="Store key to file")
parser.add_argument(
"-v", "--verbose",
default=False,
action='store_true',
help="Verbose query")
parser.add_argument(
"--help",
default=False,
action='store_true',
help="Show this help"
)
return parser
scheme = "public"
defaultdb = "postgres"
if __name__ == '__main__':
parser = get_args_parser()
args = parser.parse_args()
conn = None
db = None
t_start = None
r_ok = 0
r_fail = 0
r_multi = 0
f = None
if args.help or not args.database or not args.table or not args.username or not args.password :
parser.print_help()
parser.exit()
sys.exit()
try:
conn = psycopg2.connect(host=args.host,port=int(args.port),user=args.username,password=args.password,database=args.database)
db = conn.cursor()
#print "Connection: "+str(conn.status)
except Exception, err:
print err
sys.exit()
if args.input:
try:
f = open(args.input,"r")
except Exception, err:
print err
sys.exit()
# Generate dummy data
t = time.time()
t_start = t
while True:
line = f.readline()
if not line:
break
keys = line.split(';')
query = "select * from "+args.table+" where name = '"+keys[0]+"';"
# print query
db.execute(query)
res = db.fetchall()
if args.verbose:
print query
print res
if len(res):
r_ok = r_ok+1
if len(res) > 1:
r_multi = r_multi+1
else:
r_fail = r_fail+1
if args.report:
if time.time() - t > args.report:
t = time.time()
print "r_ok:"+str(r_ok)+" r_fail:"+str(r_fail)+" r_multi:"+str(r_multi)+" current_value:"+str(res)
conn.close()
print "Last_value:"+str(res)+"\n"
print "Finish test read from postgres : "+"r_ok:"+str(r_ok)+" r_fail:"+str(r_fail)+" r_multi:"+str(r_multi)+" time:"+str(time.time()-t_start)
| gpl-2.0 | 4,912,814,830,365,071,000 | 22.725926 | 143 | 0.589447 | false |
asimshankar/tensorflow | tensorflow/contrib/graph_editor/tests/edit_test.py | 132 | 3104 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import graph_editor as ge
from tensorflow.contrib.graph_editor.tests import match
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class EditTest(test.TestCase):
"""edit module test.
Generally the tests are in two steps:
- modify an existing graph.
- then make sure it has the expected topology using the graph matcher.
"""
def setUp(self):
self.graph = ops.Graph()
with self.graph.as_default():
self.a = constant_op.constant([1., 1.], shape=[2], name="a")
with ops.name_scope("foo"):
self.b = constant_op.constant([2., 2.], shape=[2], name="b")
self.c = math_ops.add(self.a, self.b, name="c")
self.d = constant_op.constant([3., 3.], shape=[2], name="d")
with ops.name_scope("bar"):
self.e = math_ops.add(self.c, self.d, name="e")
self.f = math_ops.add(self.c, self.d, name="f")
self.g = math_ops.add(self.c, self.a, name="g")
with ops.control_dependencies([self.c.op]):
self.h = math_ops.add(self.f, self.g, name="h")
def test_detach(self):
"""Test for ge.detach."""
sgv = ge.sgv(self.c.op, self.a.op)
control_outputs = ge.ControlOutputs(self.graph)
ge.detach(sgv, control_ios=control_outputs)
# make sure the detached graph is as expected.
self.assertTrue(
match.OpMatcher("^foo/c$").input_ops("a", "geph__b_0")(self.c.op))
def test_connect(self):
"""Test for ge.connect."""
with self.graph.as_default():
x = constant_op.constant([1., 1.], shape=[2], name="x")
y = constant_op.constant([2., 2.], shape=[2], name="y")
z = math_ops.add(x, y, name="z")
sgv = ge.sgv(x.op, y.op, z.op)
ge.connect(sgv, ge.sgv(self.e.op).remap_inputs([0]))
self.assertTrue(
match.OpMatcher("^foo/bar/e$").input_ops("^z$", "foo/d$")(self.e.op))
def test_bypass(self):
"""Test for ge.bypass."""
ge.bypass(ge.sgv(self.f.op).remap_inputs([0]))
self.assertTrue(
match.OpMatcher("^foo/bar/h$").input_ops("^foo/c$", "foo/bar/g$")(
self.h.op))
if __name__ == "__main__":
test.main()
| apache-2.0 | -2,853,992,519,027,261,400 | 37.320988 | 80 | 0.633054 | false |
yatish27/mase | src/stockflow.py | 9 | 2560 | from __future__ import print_function, division
import sys
sys.dont_write_bytecode = True
from ok import *
import random
r = random.random
isa = isinstance
"""
# Compartmental Modeling
## Diapers
q +-----+ r +-----+
---->| C |---->| D |--> s
^ +-----+ +-+---+
| |
+-----------------+
C = stock of clean diapers
D = stock of dirty diapers
q = inflow of clean diapers
r = flow of clean diapers to dirty diapers
s = out-flow of dirty diapers
"""
class o:
"""Emulate Javascript's uber simple objects.
Note my convention: I use "`i`" not "`this`."""
def has(i) : return i.__dict__
def __init__(i,**d) : i.has().update(d)
def __setitem__(i,k,v) : i.has()[k] = v
def __getitem__(i,k) : return i.has()[k]
def __repr__(i) : return 'o'+str(i.has())
def copy(i):
j = o()
for k in i.has(): j[k] = i[k]
return j
def asList(i,keys=[]):
keys = keys or i.keys()
return [i[k] for k in keys]
class Has:
def __init__(i,init,lo=0,hi=100):
i.init,i.lo,i.hi = init,lo,hi
def restrain(i,x):
return max(i.lo,
min(i.hi, x))
def rank(i):
if isa(i,Flow) : return 3
if isa(i,Stock): return 1
if isa(i,Aux) : return 2
def __repr__(i):
return str(dict(what=i.__class__.__name__,
name= i.name,init= i.init,
lo = i.lo, hi = i.hi))
class Flow(Has) : pass
class Stock(Has): pass
class Aux(Has) : pass
F,S,A=Flow,Stock,Aux
class Model:
def about(i):
tmp=i.have()
for k,v in tmp.has().items():
v.name = k
return tmp
def run(i,dt=1,tmax=100):
print(r())
t,u, keep = 0, o(), []
about = i.about()
keys = sorted(about.keys,
key=lambda z:z.rank())
print(keys)
for k,a in about.items():
u[k] = a.init
keep = [["t"] + keys,
[0] + about.asList(u,keys)]
while t < tmax:
v = copy(u)
i.step(dt,t,u,v)
for k in about:
v[k] = about[k].restrain(v[k])
keep += [[dt] + about.asList(u,keys)]
t += dt
return keep
class Diapers(Model):
def have(i):
return o(C = S(20), D = S(0),
q = F(0), r = F(8), s = F(0))
def step(i,dt,t,u,v):
def saturday(x): return int(x) % 7 == 6
v.C += dt*(u.q - u.r)
v.D += dt*(u.r - u.s)
v.q = 70 if saturday(t) else 0
v.s = u.D if saturday(t) else 0
if t == 27: # special case (the day i forget)
v.s = 0
@ok
def _diapers1():
print(Diapers().about()) | unlicense | -4,875,680,603,400,262,000 | 22.934579 | 50 | 0.496094 | false |
tonybaloney/st2 | contrib/linux/sensors/file_watch_sensor.py | 1 | 1965 | import os
from logshipper.tail import Tail
from st2reactor.sensor.base import Sensor
class FileWatchSensor(Sensor):
def __init__(self, sensor_service, config=None):
super(FileWatchSensor, self).__init__(sensor_service=sensor_service,
config=config)
self._trigger_ref = 'linux.file_watch.line'
self._logger = self._sensor_service.get_logger(__name__)
self._file_paths = [] # stores a list of file paths we are monitoring
self._tail = None
def setup(self):
self._tail = Tail(filenames=[])
self._tail.handler = self._handle_line
self._tail.should_run = True
def run(self):
self._tail.run()
def cleanup(self):
if self._tail:
self._tail.should_run = False
try:
self._tail.notifier.stop()
except Exception:
pass
def add_trigger(self, trigger):
file_path = trigger['parameters'].get('file_path', None)
if not file_path:
self._logger.error('Received trigger type without "file_path" field.')
return
self._tail.add_file(filename=file_path)
self._logger.info('Added file "%s"' % (file_path))
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
file_path = trigger['parameters'].get('file_path', None)
if not file_path:
self._logger.error('Received trigger type without "file_path" field.')
return
self._tail.remove_file(filename=file_path)
self._logger.info('Removed file "%s"' % (file_path))
def _handle_line(self, file_path, line):
trigger = self._trigger_ref
payload = {
'file_path': file_path,
'file_name': os.path.basename(file_path),
'line': line
}
self.sensor_service.dispatch(trigger=trigger, payload=payload)
| apache-2.0 | -604,379,355,046,718,600 | 28.328358 | 82 | 0.573537 | false |
skumar07/Air-Share-Real | boilerplate/external/babel/messages/tests/data/setup.py | 19 | 1044 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8
# =============================================================================
# $Id: setup.py 114 2007-06-14 21:17:14Z palgarvio $
# =============================================================================
# $URL: http://svn.edgewall.org/repos/babel/tags/0.9.6/babel/messages/test/data/setup.py $
# $LastChangedDate: 2007-06-14 23:17:14 +0200 (do, 14 jun 2007) $
# $Rev: 114 $
# $LastChangedBy: palgarvio $
# =============================================================================
# Copyright (C) 2006 Ufsoft.org - Pedro Algarvio <[email protected]>
#
# Please view LICENSE for additional licensing information.
# =============================================================================
# THIS IS A BOGUS PROJECT
from setuptools import setup, find_packages
setup(
name = 'TestProject',
version = '0.1',
license = 'BSD',
author = 'Foo Bar',
author_email = '[email protected]',
packages = find_packages(),
)
| lgpl-3.0 | -5,982,756,631,565,937,000 | 35.285714 | 90 | 0.441571 | false |
mlc0202/ssdb | deps/cpy/antlr3/tokens.py | 99 | 12016 | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from antlr3.constants import EOF, DEFAULT_CHANNEL, INVALID_TOKEN_TYPE
############################################################################
#
# basic token interface
#
############################################################################
class Token(object):
"""@brief Abstract token baseclass."""
def getText(self):
"""@brief Get the text of the token.
Using setter/getter methods is deprecated. Use o.text instead.
"""
raise NotImplementedError
def setText(self, text):
"""@brief Set the text of the token.
Using setter/getter methods is deprecated. Use o.text instead.
"""
raise NotImplementedError
def getType(self):
"""@brief Get the type of the token.
Using setter/getter methods is deprecated. Use o.type instead."""
raise NotImplementedError
def setType(self, ttype):
"""@brief Get the type of the token.
Using setter/getter methods is deprecated. Use o.type instead."""
raise NotImplementedError
def getLine(self):
"""@brief Get the line number on which this token was matched
Lines are numbered 1..n
Using setter/getter methods is deprecated. Use o.line instead."""
raise NotImplementedError
def setLine(self, line):
"""@brief Set the line number on which this token was matched
Using setter/getter methods is deprecated. Use o.line instead."""
raise NotImplementedError
def getCharPositionInLine(self):
"""@brief Get the column of the tokens first character,
Columns are numbered 0..n-1
Using setter/getter methods is deprecated. Use o.charPositionInLine instead."""
raise NotImplementedError
def setCharPositionInLine(self, pos):
"""@brief Set the column of the tokens first character,
Using setter/getter methods is deprecated. Use o.charPositionInLine instead."""
raise NotImplementedError
def getChannel(self):
"""@brief Get the channel of the token
Using setter/getter methods is deprecated. Use o.channel instead."""
raise NotImplementedError
def setChannel(self, channel):
"""@brief Set the channel of the token
Using setter/getter methods is deprecated. Use o.channel instead."""
raise NotImplementedError
def getTokenIndex(self):
"""@brief Get the index in the input stream.
An index from 0..n-1 of the token object in the input stream.
This must be valid in order to use the ANTLRWorks debugger.
Using setter/getter methods is deprecated. Use o.index instead."""
raise NotImplementedError
def setTokenIndex(self, index):
"""@brief Set the index in the input stream.
Using setter/getter methods is deprecated. Use o.index instead."""
raise NotImplementedError
def getInputStream(self):
"""@brief From what character stream was this token created.
You don't have to implement but it's nice to know where a Token
comes from if you have include files etc... on the input."""
raise NotImplementedError
def setInputStream(self, input):
"""@brief From what character stream was this token created.
You don't have to implement but it's nice to know where a Token
comes from if you have include files etc... on the input."""
raise NotImplementedError
############################################################################
#
# token implementations
#
# Token
# +- CommonToken
# \- ClassicToken
#
############################################################################
class CommonToken(Token):
"""@brief Basic token implementation.
This implementation does not copy the text from the input stream upon
creation, but keeps start/stop pointers into the stream to avoid
unnecessary copy operations.
"""
def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None,
input=None, start=None, stop=None, oldToken=None):
Token.__init__(self)
if oldToken is not None:
self.type = oldToken.type
self.line = oldToken.line
self.charPositionInLine = oldToken.charPositionInLine
self.channel = oldToken.channel
self.index = oldToken.index
self._text = oldToken._text
if isinstance(oldToken, CommonToken):
self.input = oldToken.input
self.start = oldToken.start
self.stop = oldToken.stop
else:
self.type = type
self.input = input
self.charPositionInLine = -1 # set to invalid position
self.line = 0
self.channel = channel
#What token number is this from 0..n-1 tokens; < 0 implies invalid index
self.index = -1
# We need to be able to change the text once in a while. If
# this is non-null, then getText should return this. Note that
# start/stop are not affected by changing this.
self._text = text
# The char position into the input buffer where this token starts
self.start = start
# The char position into the input buffer where this token stops
# This is the index of the last char, *not* the index after it!
self.stop = stop
def getText(self):
if self._text is not None:
return self._text
if self.input is None:
return None
return self.input.substring(self.start, self.stop)
def setText(self, text):
"""
Override the text for this token. getText() will return this text
rather than pulling from the buffer. Note that this does not mean
that start/stop indexes are not valid. It means that that input
was converted to a new string in the token object.
"""
self._text = text
text = property(getText, setText)
def getType(self):
return self.type
def setType(self, ttype):
self.type = ttype
def getLine(self):
return self.line
def setLine(self, line):
self.line = line
def getCharPositionInLine(self):
return self.charPositionInLine
def setCharPositionInLine(self, pos):
self.charPositionInLine = pos
def getChannel(self):
return self.channel
def setChannel(self, channel):
self.channel = channel
def getTokenIndex(self):
return self.index
def setTokenIndex(self, index):
self.index = index
def getInputStream(self):
return self.input
def setInputStream(self, input):
self.input = input
def __str__(self):
if self.type == EOF:
return "<EOF>"
channelStr = ""
if self.channel > 0:
channelStr = ",channel=" + str(self.channel)
txt = self.text
if txt is not None:
txt = txt.replace("\n","\\\\n")
txt = txt.replace("\r","\\\\r")
txt = txt.replace("\t","\\\\t")
else:
txt = "<no text>"
return "[@%d,%d:%d=%r,<%d>%s,%d:%d]" % (
self.index,
self.start, self.stop,
txt,
self.type, channelStr,
self.line, self.charPositionInLine
)
class ClassicToken(Token):
"""@brief Alternative token implementation.
A Token object like we'd use in ANTLR 2.x; has an actual string created
and associated with this object. These objects are needed for imaginary
tree nodes that have payload objects. We need to create a Token object
that has a string; the tree node will point at this token. CommonToken
has indexes into a char stream and hence cannot be used to introduce
new strings.
"""
def __init__(self, type=None, text=None, channel=DEFAULT_CHANNEL,
oldToken=None
):
Token.__init__(self)
if oldToken is not None:
self.text = oldToken.text
self.type = oldToken.type
self.line = oldToken.line
self.charPositionInLine = oldToken.charPositionInLine
self.channel = oldToken.channel
self.text = text
self.type = type
self.line = None
self.charPositionInLine = None
self.channel = channel
self.index = None
def getText(self):
return self.text
def setText(self, text):
self.text = text
def getType(self):
return self.type
def setType(self, ttype):
self.type = ttype
def getLine(self):
return self.line
def setLine(self, line):
self.line = line
def getCharPositionInLine(self):
return self.charPositionInLine
def setCharPositionInLine(self, pos):
self.charPositionInLine = pos
def getChannel(self):
return self.channel
def setChannel(self, channel):
self.channel = channel
def getTokenIndex(self):
return self.index
def setTokenIndex(self, index):
self.index = index
def getInputStream(self):
return None
def setInputStream(self, input):
pass
def toString(self):
channelStr = ""
if self.channel > 0:
channelStr = ",channel=" + str(self.channel)
txt = self.text
if txt is None:
txt = "<no text>"
return "[@%r,%r,<%r>%s,%r:%r]" % (self.index,
txt,
self.type,
channelStr,
self.line,
self.charPositionInLine
)
__str__ = toString
__repr__ = toString
EOF_TOKEN = CommonToken(type=EOF)
INVALID_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
# In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR
# will avoid creating a token for this symbol and try to fetch another.
SKIP_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
| bsd-3-clause | 8,470,351,317,011,310,000 | 27.884615 | 87 | 0.593792 | false |
paradoxxxzero/pyjade | pyjade/parser.py | 1 | 10534 | from __future__ import absolute_import
from .lexer import Lexer
from . import nodes
import six
class Parser(object):
def __init__(self,str,filename=None,**options):
self.input = str
self.lexer = Lexer(str,**options)
self.filename = filename
self.bloks = {}
self.options = options
self.contexts = [self]
self.extending = False
self._spaces = None
def context(self,parser):
if parser: self.context.append(parser)
else: self.contexts.pop()
def advance(self):
return self.lexer.advance()
def skip(self,n):
while n>1: # > 0?
self.advance()
n -= 1
def peek(self):
p = self.lookahead(1)
return p
def line(self):
return self.lexer.lineno
def lookahead(self,n):
return self.lexer.lookahead(n)
def parse(self):
block = nodes.Block()
parser = None
block.line = self.line()
while 'eos' != self.peek().type:
if 'newline' == self.peek().type: self.advance()
else: block.append(self.parseExpr())
parser = self.extending
if parser:
self.context(parser)
ast = parser.parse()
self.context()
return ast
return block
def expect(self,type):
t = self.peek().type
if t == type: return self.advance()
else:
raise Exception('expected "%s" but got "%s" in file %s on line %d' %
(type, t, self.filename, self.line()))
def accept(self,type):
if self.peek().type == type: return self.advance()
def parseExpr(self):
t = self.peek().type
if 'yield' == t:
self.advance()
block = nodes.Block()
block._yield = True
return block
elif t in ('id','class'):
tok = self.advance()
new_div = self.lexer.tok('tag','div')
new_div.inline_level = tok.inline_level
self.lexer.stash.append(new_div)
self.lexer.stash.append(tok)
return self.parseExpr()
funcName = 'parse%s'%t.capitalize()
if hasattr(self,funcName):
return getattr(self,funcName)()
else:
raise Exception('unexpected token "%s" in file %s on line %d' %
(t, self.filename, self.line()))
def parseString(self):
tok = self.expect('string')
node = nodes.String(tok.val, inline=tok.inline_level > 0)
node.line = self.line()
return node
def parseText(self):
tok = self.expect('text')
node = nodes.Text(tok.val)
node.line = self.line()
return node
def parseBlockExpansion(self):
if ':'== self.peek().type:
self.advance()
return nodes.Block(self.parseExpr())
else:
return self.block()
def parseAssignment(self):
tok = self.expect('assignment')
return nodes.Assignment(tok.name,tok.val)
def parseCode(self):
tok = self.expect('code')
if not tok.val and 'indent'==self.peek().type:
node = nodes.BlockCode(
tok.val, self.parseTextBlock(), tok.buffer, tok.escape)
node.line = self.line()
else:
node = nodes.Code(tok.val,tok.buffer,tok.escape) #tok.escape
block,i = None,1
node.line = self.line()
while self.lookahead(i) and 'newline'==self.lookahead(i).type:
i+= 1
block = 'indent' == self.lookahead(i).type
if block:
self.skip(i-1)
node.block = self.block()
return node
def parseComment(self):
tok = self.expect('comment')
if 'indent'==self.peek().type:
node = nodes.BlockComment(tok.val, self.block(), tok.buffer)
else:
node = nodes.Comment(tok.val,tok.buffer)
node.line = self.line()
return node
def parseDoctype(self):
tok = self.expect('doctype')
node = nodes.Doctype(tok.val)
node.line = self.line()
return node
def parseFilter(self):
tok = self.expect('filter')
attrs = self.accept('attrs')
self.lexer.pipeless = True
block = self.parseTextBlock()
self.lexer.pipeless = False
node = nodes.Filter(tok.val, block, attrs and attrs.attrs)
node.line = self.line()
return node
def parseASTFilter(self):
tok = self.expect('tag')
attrs = self.accept('attrs')
self.expect(':')
block = self.block()
node = nodes.Filter(tok.val, block, attrs and attrs.attrs)
node.line = self.line()
return node
def parseEach(self):
tok = self.expect('each')
node = nodes.Each(tok.code, tok.keys)
node.line = self.line()
node.block = self.block()
return node
def parseConditional(self):
tok = self.expect('conditional')
node = nodes.Conditional(tok.val, tok.sentence)
node.line = self.line()
node.block = self.block()
while True:
t = self.peek()
if 'conditional' == t.type and node.can_append(t.val):
node.append(self.parseConditional())
else:
break
return node
def parseExtends(self):
path = self.expect('extends').val.strip('"\'')
return nodes.Extends(path)
def parseCall(self):
tok = self.expect('call')
name = tok.val
args = tok.args
if args is None:
args = ""
block = self.block() if 'indent' == self.peek().type else None
return nodes.Mixin(name,args,block,True)
def parseMixin(self):
tok = self.expect('mixin')
name = tok.val
args = tok.args
if args is None:
args = ""
block = self.block() if 'indent' == self.peek().type else None
return nodes.Mixin(name,args,block,block is None)
def parseBlock(self):
block = self.expect('block')
mode = block.mode
name = block.val.strip()
block = self.block(cls=nodes.CodeBlock) if 'indent'==self.peek().type else nodes.CodeBlock(nodes.Literal(''))
block.mode = mode
block.name = name
return block
def parseInclude(self):
path = self.expect('include').val.strip()
return nodes.Include(path)
def parseTextBlock(self, tag=None):
text = nodes.Text()
text.line = self.line()
if (tag):
text.parent == tag
spaces = self.expect('indent').val
if not self._spaces: self._spaces = spaces
indent = ' '*(spaces-self._spaces)
while 'outdent' != self.peek().type:
t = self.peek().type
if 'newline'==t:
text.append('\n')
self.advance()
elif 'indent'==t:
text.append('\n')
for node in self.parseTextBlock().nodes: text.append(node)
text.append('\n')
else:
text.append(indent+self.advance().val)
if spaces == self._spaces: self._spaces = None
self.expect('outdent')
return text
def block(self,cls=nodes.Block):
block = cls()
block.line = self.line()
self.expect('indent')
while 'outdent' != self.peek().type:
if 'newline'== self.peek().type:
self.advance()
else:
block.append(self.parseExpr())
self.expect('outdent')
return block
def processInline(self, current_tag, current_level):
next_level = current_level + 1
while self.peek().inline_level == next_level:
current_tag.block.append(self.parseExpr())
if self.peek().inline_level > next_level:
self.processInline(current_tag, next_level)
def processTagText(self, tag):
if self.peek().inline_level < tag.inline_level:
return
if not self.lookahead(2).inline_level > tag.inline_level:
tag.text = self.parseText()
return
while self.peek().inline_level == tag.inline_level and self.peek().type == 'string':
tag.block.append(self.parseExpr())
if self.peek().inline_level > tag.inline_level:
self.processInline(tag, tag.inline_level)
def parseTag(self):
i = 2
if 'attrs'==self.lookahead(i).type: i += 1
if ':'==self.lookahead(i).type:
if 'indent' == self.lookahead(i+1).type:
raise Exception('unexpected token "indent" in file %s on line %d' %
(self.filename, self.line()))
tok = self.advance()
tag = nodes.Tag(tok.val)
tag.inline_level = tok.inline_level
dot = None
tag.line = self.line()
while True:
t = self.peek().type
if t in ('id','class'):
tok = self.advance()
tag.setAttribute(tok.type,'"%s"'%tok.val,True)
continue
elif 'attrs'==t:
tok = self.advance()
for n,v in six.iteritems(tok.attrs):
tag.setAttribute(n,v,n in tok.static_attrs)
continue
else:
break
v = self.peek().val
if '.'== v:
dot = tag.textOnly = True
self.advance()
elif '<'== v: # For inline elements
tag.inline = True
self.advance()
t = self.peek().type
if 'code'==t: tag.code = self.parseCode()
elif ':'==t:
self.advance()
tag.block = nodes.Block()
tag.block.append(self.parseExpr())
elif 'string'==t: self.processTagText(tag)
elif 'text'==t: tag.text = self.parseText()
while 'newline' == self.peek().type: self.advance()
if 'indent' == self.peek().type:
if tag.textOnly:
self.lexer.pipeless = True
tag.block = self.parseTextBlock(tag)
self.lexer.pipeless = False
else:
block = self.block()
if tag.block:
for node in block.nodes:
tag.block.append(node)
else:
tag.block = block
return tag
| mit | 3,046,096,047,009,413,600 | 29.622093 | 117 | 0.522594 | false |
highlando/krypy | krypy/recycling/linsys.py | 1 | 5723 | # -*- coding: utf8 -*-
import numpy
from .. import utils, deflation, linsys
from . import factories, evaluators
class _RecyclingSolver(object):
'''Base class for recycling solvers.'''
def __init__(self, DeflatedSolver,
vector_factory=None
):
'''Initialize recycling solver base.
:param DeflatedSolver: a deflated solver from
:py:mod:`~krypy.deflation`.
:param vector_factory: (optional) An instance of a subclass of
:py:class:`krypy.recycling.factories._DeflationVectorFactory`
that constructs deflation vectors for recycling. Defaults to `None`
which means that no recycling is used.
Also the following strings are allowed as shortcuts:
* ``'RitzApproxKrylov'``: uses the approximate Krylov subspace bound
evaluator :py:class:`krypy.recycling.evaluators.RitzApproxKrylov`.
* ``'RitzAprioriCg'``: uses the CG :math:`\kappa`-bound
(:py:class:`krypy.utils.BoundCG`) as an a priori bound with
:py:class:`krypy.recycling.evaluators.RitzApriori`.
* ``'RitzAprioriMinres'``: uses the MINRES bound
(:py:class:`krypy.utils.BoundMinres`) as an a priori bound with
:py:class:`krypy.recycling.evaluators.RitzApriori`.
After a run of the provided ``DeflatedSolver`` via :py:meth:`solve`,
the resulting instance of the ``DeflatedSolver`` is available in the
attribute ``last_solver``.
'''
self._DeflatedSolver = DeflatedSolver
self._vector_factory = vector_factory
self.timings = utils.Timings()
'''Timings from last run of :py:meth:`solve`.
Timings of the vector factory runs and the actual solution processes.
'''
self.last_solver = None
'''``DeflatedSolver`` instance from last run of :py:meth:`solve`.
Instance of ``DeflatedSolver`` that resulted from the last call to
:py:meth:`solve`. Initialized with ``None`` before the first run.'''
def solve(self, linear_system,
vector_factory=None,
*args, **kwargs):
'''Solve the given linear system with recycling.
The provided `vector_factory` determines which vectors are used for
deflation.
:param linear_system: the :py:class:`~krypy.linsys.LinearSystem` that
is about to be solved.
:param vector_factory: (optional) see description in constructor.
All remaining arguments are passed to the ``DeflatedSolver``.
:returns: instance of ``DeflatedSolver`` which was used to obtain the
approximate solution. The approximate solution is available under the
attribute ``xk``.
'''
# replace linear_system with equivalent TimedLinearSystem on demand
if not isinstance(linear_system, linsys.TimedLinearSystem):
linear_system = linsys.ConvertedTimedLinearSystem(linear_system)
with self.timings['vector_factory']:
if vector_factory is None:
vector_factory = self._vector_factory
# construct vector_factory if strings are provided
if vector_factory == 'RitzApproxKrylov':
vector_factory = factories.RitzFactory(
subset_evaluator=evaluators.RitzApproxKrylov()
)
elif vector_factory == 'RitzAprioriCg':
vector_factory = factories.RitzFactory(
subset_evaluator=evaluators.RitzApriori(
Bound=utils.BoundCG
)
)
elif vector_factory == 'RitzAprioriMinres':
vector_factory = factories.RitzFactory(
subset_evaluator=evaluators.RitzApriori(
Bound=utils.BoundMinres
)
)
# get deflation vectors
if self.last_solver is None or vector_factory is None:
U = numpy.zeros((linear_system.N, 0))
else:
U = vector_factory.get(self.last_solver)
with self.timings['solve']:
# solve deflated linear system
self.last_solver = self._DeflatedSolver(linear_system,
U=U,
store_arnoldi=True,
*args, **kwargs)
# return solver instance
return self.last_solver
class RecyclingCg(_RecyclingSolver):
'''Recycling preconditioned CG method.
See :py:class:`~krypy.recycling.linsys._RecyclingSolver` for the
documentation of the available parameters.
'''
def __init__(self, *args, **kwargs):
super(RecyclingCg, self).__init__(deflation.DeflatedCg,
*args, **kwargs)
class RecyclingMinres(_RecyclingSolver):
'''Recycling preconditioned MINRES method.
See :py:class:`~krypy.recycling.linsys._RecyclingSolver` for the
documentation of the available parameters.
'''
def __init__(self, *args, **kwargs):
super(RecyclingMinres, self).__init__(deflation.DeflatedMinres,
*args, **kwargs)
class RecyclingGmres(_RecyclingSolver):
'''Recycling preconditioned GMRES method.
See :py:class:`~krypy.recycling.linsys._RecyclingSolver` for the
documentation of the available parameters.
'''
def __init__(self, *args, **kwargs):
super(RecyclingGmres, self).__init__(deflation.DeflatedGmres,
*args, **kwargs)
| mit | 4,568,900,102,347,378,000 | 38.743056 | 79 | 0.589376 | false |
apagac/cfme_tests | cfme/common/vm.py | 1 | 41764 | # -*- coding: utf-8 -*-
"""Module containing classes with common behaviour for both VMs and Instances of all types."""
import json
from datetime import date
from datetime import datetime
from datetime import timedelta
import attr
from cached_property import cached_property
from riggerlib import recursive_update
from cfme.base.login import BaseLoggedInPage
from cfme.common import CustomButtonEventsMixin
from cfme.common import PolicyProfileAssignable
from cfme.common import Taggable
from cfme.common.vm_console import ConsoleMixin
from cfme.common.vm_views import DriftAnalysis
from cfme.common.vm_views import DriftHistory
from cfme.common.vm_views import VMPropertyDetailView
from cfme.exceptions import CFMEException
from cfme.exceptions import ItemNotFound
from cfme.exceptions import OptionNotAvailable
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.services.requests import RequestsView
from cfme.utils import ParamClassName
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.net import find_pingable
from cfme.utils.pretty import Pretty
from cfme.utils.rest import assert_response
from cfme.utils.timeutil import parsetime
from cfme.utils.update import Updateable
from cfme.utils.version import LOWEST
from cfme.utils.version import VersionPicker
from cfme.utils.virtual_machines import deploy_template
from cfme.utils.wait import wait_for
def base_types(template=False):
from pkg_resources import iter_entry_points
search = "template" if template else "vm"
return {
ep.name: ep.resolve() for ep in iter_entry_points('manageiq.{}_categories'.format(search))
}
def instance_types(category, template=False):
from pkg_resources import iter_entry_points
search = "template" if template else "vm"
return {
ep.name: ep.resolve() for ep in iter_entry_points(
'manageiq.{}_types.{}'.format(search, category))
}
def all_types(template=False):
all_types = base_types(template)
for category in all_types.keys():
all_types.update(instance_types(category, template))
return all_types
class _TemplateMixin(object):
pass
@attr.s
class BaseVM(
BaseEntity,
Pretty,
Updateable,
PolicyProfileAssignable,
Taggable,
ConsoleMixin,
CustomButtonEventsMixin,
):
"""Base VM and Template class that holds the largest common functionality between VMs,
instances, templates and images.
In order to inherit these, you have to implement the ``on_details`` method.
"""
pretty_attrs = ['name', 'provider', 'template_name']
###
# To be set or implemented
#
ALL_LIST_LOCATION = None
TO_OPEN_EDIT = None # Name of the item in Configuration that puts you in the form
QUADICON_TYPE = "vm"
# Titles of the delete buttons in configuration
REMOVE_SELECTED = 'Remove selected items from Inventory'
REMOVE_SINGLE = 'Remove Virtual Machine from Inventory'
RETIRE_DATE_FMT = parsetime.saved_report_title_format
_param_name = ParamClassName('name')
DETAILS_VIEW_CLASS = None
###
# Shared behaviour
#
PROVISION_CANCEL = 'Add of new VM Provision Request was cancelled by the user'
PROVISION_START = ('VM Provision Request was Submitted, you will be notified when your VMs '
'are ready')
name = attr.ib()
provider = attr.ib()
def __new__(cls, *args, **kwargs):
if cls in [BaseVM, VM, Template]:
raise NotImplementedError('This class cannot be instantiated.')
else:
# magic {waves hands}
return object.__new__(cls)
###
# Properties
#
@property
def is_vm(self):
return not isinstance(self, _TemplateMixin)
@property
def quadicon_type(self):
return self.QUADICON_TYPE
###
# Methods
#
def check_compliance(self, timeout=240):
"""Initiates compliance check and waits for it to finish."""
view = navigate_to(self, "Details")
original_state = self.compliance_status
view.toolbar.policy.item_select("Check Compliance of Last Known Configuration",
handle_alert=True)
view.flash.assert_no_error()
wait_for(
lambda: self.compliance_status != original_state,
num_sec=timeout, delay=5, message="compliance of {} checked".format(self.name)
)
@property
def compliance_status(self):
"""Returns the title of the compliance SummaryTable. The title contains datetime so it can
be compared.
Returns:
:py:class:`NoneType` if no title is present (no compliance checks before), otherwise str
"""
view = navigate_to(self, "Details")
view.toolbar.reload.click()
return view.entities.summary("Compliance").get_text_of("Status")
@property
def compliant(self):
"""Check if the VM is compliant.
Returns:
:py:class:`bool`
"""
text = self.compliance_status.strip().lower()
if text.startswith("non-compliant"):
return False
elif text.startswith("compliant"):
return True
else:
raise ValueError("{} is not a known state for compliance".format(text))
def delete(self, cancel=False, from_details=False):
"""Deletes the VM/Instance from the VMDB.
Args:
cancel: Whether to cancel the action in the alert.
from_details: Whether to use the details view or list view.
"""
if from_details:
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select(self.REMOVE_SINGLE,
handle_alert=not cancel)
else:
view = navigate_to(self.parent, 'All')
self.find_quadicon().check()
view.toolbar.configuration.item_select(self.REMOVE_SELECTED, handle_alert=not cancel)
@property
def ip_address(self):
"""Fetches IP Address of VM
First looks to see if any of the mgmt ips returned by 'all_ips' are pingable
Then defaults to whatever mgmt.ip returns
"""
return find_pingable(self.mgmt)
@property
def all_ip_addresses(self):
"""Fetches all IP Addresses of a VM, pingable or otherwise."""
# TODO: Implement sentaku for this property with ViaMGMT impl
view = navigate_to(self, "Details", use_resetter=False)
try:
return view.entities.summary('Properties').get_text_of("IP Address")
except NameError:
# since some providers have plural 'Addresses'.
return view.entities.summary('Properties').get_text_of("IP Addresses").split(", ")
@property
def mac_address(self):
"""Fetches MAC Address of VM"""
# TODO: We should update this with wrapanapi method when it becomes available.
view = navigate_to(self, "Details", use_resetter=False)
try:
return view.entities.summary('Properties').get_text_of("MAC Address")
except NameError:
# since some providers have plural 'Addresses'.
return view.entities.summary('Properties').get_text_of("MAC Addresses")
@property
def is_retired(self):
"""Check retirement status of vm"""
view = navigate_to(self, "Details", use_resetter=False)
if view.entities.summary('Lifecycle').get_text_of('Retirement Date').lower() != 'never':
try:
retirement_state = VersionPicker({
LOWEST: 'Retirement state',
'5.10': 'Retirement State'
})
status = view.entities.summary('Lifecycle').get_text_of(retirement_state).lower()
return status == 'retired'
except NameError:
return False
else:
return False
def find_quadicon(self, from_any_provider=False, from_archived_all=False,
from_orphaned_all=False, use_search=True):
"""Find and return a quadicon belonging to a specific vm
Args:
from_any_provider: Whether to look for it anywhere (root of the tree). Useful when
looking up archived or orphaned VMs
Returns: entity of appropriate type
Raises: ItemNotFound
"""
# TODO(all): Refactor this method replace it with vm methods like get_state
if from_any_provider:
view = navigate_to(self.parent, 'All')
elif from_archived_all:
view = navigate_to(self.appliance.provider_based_collection(self.provider),
'ArchivedAll')
elif from_orphaned_all:
view = navigate_to(self.appliance.provider_based_collection(self.provider),
'OrphanedAll')
else:
view = navigate_to(self, 'AllForProvider', use_resetter=False)
view.toolbar.view_selector.select('Grid View')
try:
return view.entities.get_entity(name=self.name, surf_pages=True, use_search=use_search)
except ItemNotFound:
raise ItemNotFound("VM '{}' not found in UI!".format(self.name))
def open_console(self, console='VM Console', invokes_alert=None):
"""
Initiates the opening of one of the console types supported by the Access
button. Presently we only support VM Console, which is the HTML5 Console.
In case of VMware provider it could be VMRC, VNC/HTML5, WebMKS, but we only
support VNC/HTML5.
Possible values for 'console' could be 'VM Console' and 'Web Console', but Web
Console is not supported as well.
Args:
console: one of the supported console types given by the Access button.
invokes_alert: If the particular console will invoke a CFME popup/alert
setting this to true will handle this.
"""
# TODO: implement vmrc vm console
if console not in ['VM Console']:
raise NotImplementedError('Not supported console type: {}'.format(console))
view = navigate_to(self, 'Details')
# Click console button given by type
view.toolbar.access.item_select(console, handle_alert=invokes_alert)
self.vm_console
def open_details(self, properties=None):
"""Clicks on details infoblock"""
view = navigate_to(self, 'Details')
view.entities.summary(properties[0]).click_at(properties[1])
return self.create_view(VMPropertyDetailView)
@property
def last_analysed(self):
"""Returns the contents of the ``Last Analysed`` field in summary"""
view = navigate_to(self, "Details")
view.toolbar.reload.click()
return view.entities.summary("Lifecycle").get_text_of("Last Analyzed").strip()
def load_details(self, refresh=False, from_any_provider=False):
"""Navigates to an VM's details page.
Args:
refresh: Refreshes the VM page if already there
from_any_provider: Archived/Orphaned VMs need this
"""
if from_any_provider:
view = navigate_to(self, 'AnyProviderDetails', use_resetter=False)
else:
view = navigate_to(self, 'Details', use_resetter=False)
if refresh:
view.toolbar.reload.click()
view.wait_displayed()
return view
def open_edit(self):
"""Loads up the edit page of the object."""
return navigate_to(self, 'Edit')
def open_timelines(self):
"""Navigates to an VM's timeline page.
Returns:
:py:class:`TimelinesView` object
"""
return navigate_to(self, 'Timelines')
def rediscover(self):
"""Deletes the VM from the provider and lets it discover again"""
self.delete(from_details=True)
self.wait_for_delete()
self.provider.refresh_provider_relationships()
self.wait_to_appear()
def rediscover_if_analysis_data_present(self):
"""Rediscovers the object if it has some analysis data present.
Returns:
Boolean if the rediscovery happened.
"""
if self.last_analysed.lower() != 'never':
self.rediscover()
return True
return False
def refresh_relationships(self, from_details=False, cancel=False, from_any_provider=False):
"""Executes a refresh of relationships.
Args:
from_details: Whether or not to perform action from instance details page
cancel: Whether or not to cancel the refresh relationships action
"""
if from_details:
view = navigate_to(self, 'Details', use_resetter=False)
else:
view = navigate_to(self.parent, 'All')
self.find_quadicon(from_any_provider=from_any_provider).check()
view.toolbar.configuration.item_select("Refresh Relationships and Power States",
handle_alert=not cancel)
@property
def retirement_date(self):
"""Returns the retirement date of the selected machine, or 'Never'
Returns:
:py:class:`str` object
"""
view = navigate_to(self, "Details")
return view.entities.summary("Lifecycle").get_text_of("Retirement Date").strip()
def smartstate_scan(self, cancel=False, from_details=False, wait_for_task_result=False):
"""Initiates fleecing from the UI.
Args:
cancel: Whether or not to cancel the refresh relationships action
from_details: Whether or not to perform action from instance details page
"""
if from_details:
view = navigate_to(self, 'Details', use_resetter=False)
else:
view = navigate_to(self.parent, 'All')
self.find_quadicon().check()
view.toolbar.configuration.item_select('Perform SmartState Analysis',
handle_alert=not cancel)
if wait_for_task_result:
task = self.appliance.collections.tasks.instantiate(
name='Scan from Vm {}'.format(self.name), tab='AllTasks')
task.wait_for_finished()
return task
def wait_to_disappear(self, timeout=600):
"""Wait for a VM to disappear within CFME
Args:
timeout: time (in seconds) to wait for it to appear
"""
wait_for(
lambda: self.exists,
num_sec=timeout, delay=5, fail_func=self.browser.refresh, fail_condition=True,
message="wait for vm to not exist")
wait_for_delete = wait_to_disappear # An alias for more fitting verbosity
def wait_to_appear(self, timeout=600, load_details=True):
"""Wait for a VM to appear within CFME
Args:
timeout: time (in seconds) to wait for it to appear
load_details: when found, should it load the vm details
"""
def _refresh():
self.provider.refresh_provider_relationships()
self.appliance.browser.widgetastic.browser.refresh() # strange because ViaUI
wait_for(
lambda: self.exists,
num_sec=timeout, delay=5, fail_func=_refresh,
message="wait for vm to appear")
if load_details:
navigate_to(self, "Details", use_resetter=False)
def set_ownership(self, user=None, group=None, click_cancel=False, click_reset=False):
"""Set instance ownership
Args:
user (User): user object for ownership
group (Group): group object for ownership
click_cancel (bool): Whether to cancel form submission
click_reset (bool): Whether to reset form after filling
"""
view = navigate_to(self, 'SetOwnership', wait_for_view=0)
fill_result = view.form.fill({
'user_name': user.name if user else None,
'group_name': group.description if group else group})
if not fill_result:
view.form.cancel_button.click()
view = self.create_view(navigator.get_class(self, 'Details').VIEW)
view.flash.assert_success_message('Set Ownership was cancelled by the user')
return
# Only if the form changed
if click_reset:
view.form.reset_button.click()
view.flash.assert_message('All changes have been reset', 'warning')
# Cancel after reset
assert view.form.is_displayed
view.form.cancel_button.click()
elif click_cancel:
view.form.cancel_button.click()
view.flash.assert_success_message('Set Ownership was cancelled by the user')
else:
# save the form
view.form.save_button.click()
view = self.create_view(navigator.get_class(self, 'Details').VIEW)
view.flash.assert_success_message('Ownership saved for selected {}'
.format(self.VM_TYPE))
def unset_ownership(self):
"""Remove user ownership and return group to EvmGroup-Administrator"""
view = navigate_to(self, 'SetOwnership', wait_for_view=0)
fill_result = view.form.fill({
'user_name': '<No Owner>', 'group_name': 'EvmGroup-administrator'
})
if fill_result:
view.form.save_button.click()
msg = 'Ownership saved for selected {}'.format(self.VM_TYPE)
else:
view.form.cancel_button.click()
logger.warning('No change during unset_ownership')
msg = 'Set Ownership was cancelled by the user'
view = self.create_view(navigator.get_class(self, 'Details').VIEW)
view.flash.assert_success_message(msg)
def rename(self, new_vm_name, cancel=False, reset=False):
"""Rename the VM
Args:
new_vm_name: object for renaming vm
cancel (bool): Whether to cancel form submission
reset (bool): Whether to reset form after filling
"""
view = navigate_to(self, 'Rename')
changed = view.vm_name.fill(new_vm_name)
if changed:
if reset:
view.reset_button.click()
view.flash.assert_no_error()
view.cancel_button.click()
else:
# save the form
view.save_button.click()
view.flash.assert_no_error()
self.name = new_vm_name
return self
if cancel:
view.cancel_button.click()
view.flash.assert_no_error()
@attr.s
class BaseVMCollection(BaseCollection):
ENTITY = BaseVM
def instantiate(self, name, provider, template_name=None):
"""Factory class method that determines the correct subclass for given provider.
For reference how does that work, refer to the entrypoints in the setup.py
Args:
name: Name of the VM/Instance as it appears in the UI
provider: The provider object (not the string!)
template_name: Source template name. Useful when the VM/Instance does not exist and you
want to create it.
"""
# When this collection is filtered and used for instantiation, the ENTITY attribute
# points to BaseVM instead of a specific VM type ENTITY class
# For this reason we don't use self.ENTITY, but instead lookup the entity class
# through the provider's attributes
if isinstance(self, TemplateCollection):
# This is a Template derived class, not a VM
return provider.template_class.from_collection(self, name, provider)
else:
return provider.vm_class.from_collection(self, name, provider, template_name)
def create(self, vm_name, provider, form_values=None, cancel=False, check_existing=False,
find_in_cfme=False, wait=True, request_description=None, auto_approve=False,
override=False):
"""Provisions an vm/instance with the given properties through CFME
Args:
vm_name: the vm/instance's name
provider: provider object
form_values: dictionary of form values for provisioning, structured into tabs
cancel: boolean, whether or not to cancel form filling
check_existing: verify if such vm_name exists
find_in_cfme: verify that vm was created and appeared in CFME
wait: wait for vm provision request end
request_description: request description that test needs to search in request table.
auto_approve: if true the request is approved before waiting for completion.
override: To override any failure related exception
Note:
Calling create on a sub-class of instance will generate the properly formatted
dictionary when the correct fields are supplied.
"""
vm = self.instantiate(vm_name, provider)
if check_existing and vm.exists:
return vm
if not provider.is_refreshed():
provider.refresh_provider_relationships()
wait_for(provider.is_refreshed, func_kwargs={'refresh_delta': 10}, timeout=600)
if not form_values:
form_values = vm.vm_default_args
else:
inst_args = vm.vm_default_args
form_values = recursive_update(inst_args, form_values)
env = form_values.get('environment') or {}
if env.get('automatic_placement'):
form_values['environment'] = {'automatic_placement': True}
form_values.update({'provider_name': provider.name})
if not form_values.get('template_name'):
template_name = (provider.data.get('provisioning').get('image', {}).get('name') or
provider.data.get('provisioning').get('template'))
vm.template_name = template_name
form_values.update({'template_name': template_name})
view = navigate_to(self, 'Provision')
view.form.fill(form_values)
if cancel:
view.form.cancel_button.click()
view = self.browser.create_view(BaseLoggedInPage)
view.flash.assert_success_message(self.ENTITY.PROVISION_CANCEL)
view.flash.assert_no_error()
else:
view.form.submit_button.click()
view = vm.appliance.browser.create_view(RequestsView)
if not BZ(1608967, forced_streams=['5.10']).blocks:
wait_for(lambda: view.flash.messages, fail_condition=[], timeout=10, delay=2,
message='wait for Flash Success')
# This flash message is not flashed in 5.10.
if self.appliance.version < 5.10:
wait_for(lambda: view.flash.messages, fail_condition=[], timeout=10, delay=2,
message='wait for Flash Success')
view.flash.assert_no_error()
if wait:
if request_description is None:
request_description = 'Provision from [{}] to [{}]'.format(
form_values.get('template_name'), vm.name)
provision_request = vm.appliance.collections.requests.instantiate(
request_description)
logger.info('Waiting for cfme provision request for vm %s', vm.name)
if auto_approve:
provision_request.approve_request(method='ui', reason="Approved")
provision_request.wait_for_request(method='ui', num_sec=1200)
if provision_request.is_succeeded(method='ui'):
logger.info('Waiting for vm %s to appear on provider %s', vm.name,
provider.key)
wait_for(provider.mgmt.does_vm_exist, [vm.name],
handle_exception=True, num_sec=600)
elif override:
logger.info('Overriding exception to check failure condition.')
else:
raise Exception(
"Provisioning vm {} failed with: {}"
.format(vm.name, provision_request.row.last_message.text)
)
if find_in_cfme:
vm.wait_to_appear(timeout=800)
return vm
def create_rest(self, vm_name, provider, form_values=None, check_existing=False):
"""Provisions a VM/Instance with the default self.vm_default_args_rest.
self.vm_default_args_rest may be overridden by form_values.
For more details about rest attributes please check:
https://access.redhat.com/documentation/en-us/red_hat_cloudforms/4.6/html-single/
red_hat_cloudforms_rest_api/index#provision-request-supported-attributes or
http://manageiq.org/docs/reference/fine/api/appendices/provision_attributes
NOTE: placement_auto defaults to True for requests made from the API or CloudForms Automate.
Args:
vm_name: vm name
provider: provider object
form_values: overrides default provision arguments or extends it.
check_existing: cancel creation if VM exists
Return: Instance object
"""
vm = self.instantiate(vm_name, provider)
if check_existing and vm.exists:
return vm
else:
if not provider.is_refreshed():
provider.refresh_provider_relationships()
wait_for(provider.is_refreshed, func_kwargs={'refresh_delta': 10}, timeout=600)
if not form_values:
form_values = vm.vm_default_args_rest
else:
inst_args = vm.vm_default_args_rest
form_values = recursive_update(inst_args, form_values)
response = self.appliance.rest_api.collections.provision_requests.action.create(
**form_values)[0]
assert_response(self.appliance)
provision_request = vm.appliance.collections.requests.instantiate(
description=response.description)
provision_request.wait_for_request(num_sec=900)
if provision_request.is_succeeded():
wait_for(lambda: provider.mgmt.does_vm_exist(vm.name), num_sec=1000, delay=5,
message="VM {} becomes visible".format(vm.name))
else:
logger.error("Provisioning failed with the message {}".
format(provision_request.rest.message))
raise CFMEException(provision_request.rest.message)
return vm
@attr.s
class VM(BaseVM):
template_name = attr.ib(default=None)
TO_RETIRE = None
# May be overriden by implementors of BaseVM
STATE_ON = "on"
STATE_OFF = "off"
STATE_PAUSED = "paused"
STATE_SUSPENDED = "suspended"
@cached_property
def mgmt(self):
"""
Returns the wrapanapi VM entity object to manipulate this VM directly via the provider API
"""
return self.provider.mgmt.get_vm(self.name)
@property
def exists_on_provider(self):
return self.provider.mgmt.does_vm_exist(self.name)
def retire(self):
view = navigate_to(self, 'Details', use_resetter=False)
view.toolbar.reload.click()
view.toolbar.lifecycle.item_select(self.TO_RETIRE, handle_alert=True)
view.flash.assert_no_error()
def power_control_from_cfme(self, option, cancel=True, from_details=False):
"""Power controls a VM from within CFME
Args:
option: corresponds to option values under the power button
cancel: Whether or not to cancel the power operation on confirmation
from_details: Whether or not to perform action from instance details page
Raises:
OptionNotAvailable: option param is not visible or enabled
"""
if from_details:
view = navigate_to(self, 'Details', use_resetter=False)
else:
view = navigate_to(self.parent, 'All')
if self.is_pwr_option_available_in_cfme(option=option, from_details=from_details):
view.toolbar.power.item_select(option, handle_alert=not cancel)
logger.info(
"Power control action of VM/instance %s, option %s, cancel %s executed",
self.name, option, str(cancel))
else:
raise OptionNotAvailable(option + " is not visible or enabled")
def wait_candu_data_available(self, timeout=600):
"""Waits until C&U data are available for this VM/Instance
Args:
timeout: Timeout passed to :py:func:`utils.wait.wait_for`
"""
view = navigate_to(self, 'Details', use_resetter=False)
view.toolbar.reload.click()
wait_for(
lambda: view.toolbar.monitoring.item_enabled("Utilization"),
delay=10, handle_exception=True, num_sec=timeout,
fail_func=view.toolbar.reload.click)
def capture_historical_data(self, interval="hourly", back="6.days"):
"""Capture historical utilization data for this VM/Instance
Args:
interval: Data interval (hourly/ daily)
back: back time interval from which you want data
"""
ret = self.appliance.ssh_client.run_rails_command(
"'vm = Vm.where(:ems_id => {prov_id}).where(:name => {vm_name})[0];\
vm.perf_capture({interval}, {back}.ago.utc, Time.now.utc)'".format(
prov_id=self.provider.id,
vm_name=json.dumps(self.name),
interval=json.dumps(interval),
back=back,
)
)
return ret.success
def wait_for_vm_state_change(self, desired_state=None, timeout=300, from_details=False,
with_relationship_refresh=True, from_any_provider=False):
"""Wait for VM to come to desired state in the UI.
This function waits just the needed amount of time thanks to wait_for.
Args:
desired_state: on, off, suspended... for available states, see
:py:class:`EC2Instance` and :py:class:`OpenStackInstance`
timeout: Specify amount of time (in seconds) to wait
from_any_provider: Archived/Orphaned vms need this
Raises:
TimedOutError:
When instance does not come up to desired state in specified period of time.
ItemNotFound:
When unable to find the instance passed
"""
def _looking_for_state_change():
if from_details:
view = navigate_to(self, "Details", use_resetter=False)
view.toolbar.reload.click()
current_state = view.entities.summary("Power Management").get_text_of("Power State")
return current_state == desired_state
else:
return self.find_quadicon(
from_any_provider=from_any_provider).data['state'] == desired_state
return wait_for(
_looking_for_state_change,
num_sec=timeout,
delay=30,
fail_func=lambda: self.refresh_relationships(from_details=from_details,
from_any_provider=from_any_provider) if
with_relationship_refresh else None)
def is_pwr_option_available_in_cfme(self, option, from_details=False):
"""Checks to see if a power option is available on the VM
Args:
option: corresponds to option values under the power button,
see :py:class:`EC2Instance` and :py:class:`OpenStackInstance`
from_details: Whether or not to perform action from instance details page
"""
if from_details:
view = navigate_to(self, 'Details', use_resetter=False)
view.toolbar.reload.click()
else:
view = navigate_to(self.parent, "All")
entity = self.find_quadicon()
entity.check()
if view.toolbar.power.has_item(option):
return view.toolbar.power.item_enabled(option)
else:
return False
def create_on_provider(self, timeout=900, find_in_cfme=False, delete_on_failure=True, **kwargs):
"""Create the VM on the provider via MgmtSystem. `deploy_template` handles errors during
VM provision on MgmtSystem sideNS deletes VM if provisioned incorrectly
Args:
timeout: Number of seconds to wait for the VM to appear in CFME
Will not wait at all, if set to 0 (Defaults to ``900``)
find_in_cfme: Verifies that VM exists in CFME UI
delete_on_failure: Attempts to remove VM on UI navigation failure
"""
vm = deploy_template(self.provider.key, self.name, self.template_name, **kwargs)
try:
if find_in_cfme:
self.wait_to_appear(timeout=timeout, load_details=False)
except Exception:
logger.warning("Couldn't find VM or Instance '%s' in CFME", self.name)
if delete_on_failure:
logger.info("Removing VM or Instance from mgmt system")
self.cleanup_on_provider()
raise
return vm
def cleanup_on_provider(self):
"""Clean up entity on the provider if it has been created on the provider
Helper method to avoid NotFoundError's during test case tear down.
"""
if self.exists_on_provider:
self.mgmt.cleanup()
else:
logger.debug('cleanup_on_provider: entity "%s" does not exist', self.name)
def set_retirement_date(self, when=None, offset=None, warn=None):
"""Overriding common method to use widgetastic views/widgets properly
Args:
when: :py:class:`datetime.datetime` object, when to retire (date in future)
offset: :py:class:`dict` with months, weeks, days, hours keys. other keys ignored
warn: When to warn, fills the select in the form in case the ``when`` is specified.
Note: this should be moved up to the common VM class when infra+cloud+common are all WT
If when and offset are both None, this removes retirement date
Examples:
# To set a specific retirement date 2 days from today
two_days_later = datetime.date.today() + datetime.timedelta(days=2)
vm.set_retirement_date(when=two_days_later)
# To set a retirement offset 2 weeks from now
vm.set_retirement_date(offset={weeks=2})
Offset is dict to remove ambiguity between timedelta/datetime and months/weeks/days/hours
timedelta supports creation with weeks, but not months
timedelta supports days attr, but not weeks or months
timedelta days attr will report a total summary, not the component that was passed to it
For these reasons timedelta isn't appropriate for offset
An enhancement to cfme.utils.timeutil extending timedelta would be great for making this a
bit cleaner
"""
view = navigate_to(self, 'SetRetirement')
fill_date = None
fill_offset = None
# explicit is/not None use here because of empty strings and dicts
if when is not None and offset is not None:
raise ValueError('set_retirement_date takes when or offset, but not both')
if when is not None and not isinstance(when, (datetime, date)):
raise ValueError('when argument must be a datetime object')
# due to major differences between the forms and their interaction, I'm splitting this
# method into two major blocks, one for each version. As a result some patterns will be
# repeated in both blocks
# This will allow for making changes to one version or the other without strange
# interaction in the logic
# format the date
# needs 4 digit year for fill
# displayed 2 digit year for flash message
# 59z/G-release retirement
changed = False # just in case it isn't set in logic
if when is not None and offset is None:
# Specific datetime retire, H+M are 00:00 by default if just date passed
fill_date = when.strftime('%m/%d/%Y %H:%M') # 4 digit year
msg_date = when.strftime('%m/%d/%y %H:%M UTC') # two digit year and timestamp
msg = 'Retirement date set to {}'.format(msg_date)
elif when is None and offset is None:
# clearing retirement date with space in textinput,
# using space here as with empty string calendar input is not cleared correctly
fill_date = ' '
msg = 'Retirement date removed'
elif offset is not None:
# retirement by offset
fill_date = None
fill_offset = {k: v
for k, v in offset.items()
if k in ['months', 'weeks', 'days', 'hours']}
# hack together an offset
# timedelta can take weeks, but not months
# copy and pop, only used to generate message, not used for form fill
offset_copy = fill_offset.copy()
if 'months' in offset_copy:
new_weeks = offset_copy.get('weeks', 0) + int(offset_copy.pop('months', 0)) * 4
offset_copy.update({'weeks': new_weeks})
msg_date = datetime.utcnow() + timedelta(**offset_copy)
msg = 'Retirement date set to {}'.format(msg_date.strftime('%m/%d/%y %H:%M UTC'))
# TODO move into before_fill when no need to click away from datetime picker
view.form.fill({
'retirement_mode':
'Time Delay from Now' if fill_offset else 'Specific Date and Time'})
view.flush_widget_cache() # since retirement_date is conditional widget
if fill_date is not None: # specific check because of empty string
# two part fill, widget seems to block warn selection when open
changed_date = view.form.fill({
'retirement_date': {'datetime_select': fill_date}})
view.title.click() # close datetime widget
changed_warn = view.form.fill({'retirement_warning': warn})
changed = changed_date or changed_warn
elif fill_offset:
changed = view.form.fill({
'retirement_date': fill_offset, 'retirement_warning': warn})
# Form save and flash messages are the same between versions
if changed:
view.form.save.click()
else:
logger.info('No form changes for setting retirement, clicking cancel')
view.form.cancel.click()
msg = 'Set/remove retirement date was cancelled by the user'
if self.DETAILS_VIEW_CLASS is not None:
view = self.create_view(self.DETAILS_VIEW_CLASS, wait='5s')
view.flash.assert_success_message(msg)
def equal_drift_results(self, drift_section, section, *indexes):
"""Compares drift analysis results of a row specified by it's title text.
Args:
drift_section (str): Title text of the row to compare
section (str): Accordion section where the change happened
indexes: Indexes of results to compare starting with 1 for first row (latest result).
Compares all available drifts, if left empty (default)
Note:
There have to be at least 2 drift results available for this to work.
Returns:
:py:class:`bool`
"""
def _select_rows(indexes):
for i in indexes:
drift_history_view.history_table[i][0].click()
# mark by indexes or mark all
details_view = navigate_to(self, "Details")
details_view.entities.summary("Relationships").click_at("Drift History")
drift_history_view = self.create_view(DriftHistory, wait='10s')
if indexes:
_select_rows(indexes)
else:
# We can't compare more than 10 drift results at once
# so when selecting all, we have to limit it to the latest 10
rows_number = len(list(drift_history_view.history_table.rows()))
if rows_number > 10:
_select_rows(list(range(10)))
else:
_select_rows(list(range(rows_number)))
drift_history_view.analyze_button.click()
drift_analysis_view = self.create_view(DriftAnalysis, wait='10s')
drift_analysis_view.drift_sections.check_node(section)
drift_analysis_view.apply_button.click()
if not drift_analysis_view.toolbar.all_attributes.active:
drift_analysis_view.toolbar.all_attributes.click()
return drift_analysis_view.drift_analysis.is_changed(drift_section)
@attr.s
class VMCollection(BaseVMCollection):
ENTITY = VM
@attr.s
class Template(BaseVM, _TemplateMixin):
"""A base class for all templates.
"""
@cached_property
def mgmt(self):
"""Holds wrapanapi template entity object for this template."""
return self.provider.mgmt.get_template(self.name)
@property
def exists_on_provider(self):
return self.provider.mgmt.does_template_exist(self.name)
@attr.s
class TemplateCollection(BaseVMCollection):
ENTITY = Template
| gpl-2.0 | 622,023,943,164,201,900 | 40.680639 | 100 | 0.614524 | false |
petewarden/tensorflow | tensorflow/python/autograph/pyct/ast_util_test.py | 8 | 8400 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ast_util module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import textwrap
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import loader
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.platform import test
class AstUtilTest(test.TestCase):
def setUp(self):
super(AstUtilTest, self).setUp()
self._invocation_counts = collections.defaultdict(lambda: 0)
def test_rename_symbols_basic(self):
node = parser.parse('a + b')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.QN('a'): qual_names.QN('renamed_a')})
self.assertIsInstance(node.value.left.id, str)
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), '(renamed_a + b)')
def test_rename_symbols_attributes(self):
node = parser.parse('b.c = b.c.d')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b.c'): qual_names.QN('renamed_b_c')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'renamed_b_c = renamed_b_c.d')
def test_rename_symbols_nonlocal(self):
node = parser.parse('nonlocal a, b, c')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b'): qual_names.QN('renamed_b')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'nonlocal a, renamed_b, c')
def test_rename_symbols_global(self):
node = parser.parse('global a, b, c')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b'): qual_names.QN('renamed_b')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'global a, renamed_b, c')
def test_rename_symbols_annotations(self):
node = parser.parse('a[i]')
node = qual_names.resolve(node)
anno.setanno(node, 'foo', 'bar')
orig_anno = anno.getanno(node, 'foo')
node = ast_util.rename_symbols(node,
{qual_names.QN('a'): qual_names.QN('b')})
self.assertIs(anno.getanno(node, 'foo'), orig_anno)
def test_rename_symbols_function(self):
node = parser.parse('def f():\n pass')
node = ast_util.rename_symbols(node,
{qual_names.QN('f'): qual_names.QN('f1')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'def f1():\n pass')
def test_copy_clean(self):
node = parser.parse(
textwrap.dedent("""
def f(a):
return a + 1
"""))
setattr(node, '__foo', 'bar')
new_node = ast_util.copy_clean(node)
self.assertIsNot(new_node, node)
self.assertFalse(hasattr(new_node, '__foo'))
def test_copy_clean_preserves_annotations(self):
node = parser.parse(
textwrap.dedent("""
def f(a):
return a + 1
"""))
anno.setanno(node, 'foo', 'bar')
anno.setanno(node, 'baz', 1)
new_node = ast_util.copy_clean(node, preserve_annos={'foo'})
self.assertEqual(anno.getanno(new_node, 'foo'), 'bar')
self.assertFalse(anno.hasanno(new_node, 'baz'))
def test_keywords_to_dict(self):
keywords = parser.parse_expression('f(a=b, c=1, d=\'e\')').keywords
d = ast_util.keywords_to_dict(keywords)
# Make sure we generate a usable dict node by attaching it to a variable and
# compiling everything.
node = parser.parse('def f(b): pass')
node.body.append(ast.Return(d))
result, _, _ = loader.load_ast(node)
self.assertDictEqual(result.f(3), {'a': 3, 'c': 1, 'd': 'e'})
def assertMatch(self, target_str, pattern_str):
node = parser.parse_expression(target_str)
pattern = parser.parse_expression(pattern_str)
self.assertTrue(ast_util.matches(node, pattern))
def assertNoMatch(self, target_str, pattern_str):
node = parser.parse_expression(target_str)
pattern = parser.parse_expression(pattern_str)
self.assertFalse(ast_util.matches(node, pattern))
def test_matches_symbols(self):
self.assertMatch('foo', '_')
self.assertNoMatch('foo()', '_')
self.assertMatch('foo + bar', 'foo + _')
self.assertNoMatch('bar + bar', 'foo + _')
self.assertNoMatch('foo - bar', 'foo + _')
def test_matches_function_args(self):
self.assertMatch('super(Foo, self).__init__(arg1, arg2)',
'super(_).__init__(_)')
self.assertMatch('super().__init__()', 'super(_).__init__(_)')
self.assertNoMatch('super(Foo, self).bar(arg1, arg2)',
'super(_).__init__(_)')
self.assertMatch('super(Foo, self).__init__()', 'super(Foo, _).__init__(_)')
self.assertNoMatch('super(Foo, self).__init__()',
'super(Bar, _).__init__(_)')
def _mock_apply_fn(self, target, source):
target = parser.unparse(target, include_encoding_marker=False)
source = parser.unparse(source, include_encoding_marker=False)
self._invocation_counts[(target.strip(), source.strip())] += 1
def test_apply_to_single_assignments_dynamic_unpack(self):
node = parser.parse('a, b, c = d')
ast_util.apply_to_single_assignments(node.targets, node.value,
self._mock_apply_fn)
self.assertDictEqual(self._invocation_counts, {
('a', 'd[0]'): 1,
('b', 'd[1]'): 1,
('c', 'd[2]'): 1,
})
def test_apply_to_single_assignments_static_unpack(self):
node = parser.parse('a, b, c = d, e, f')
ast_util.apply_to_single_assignments(node.targets, node.value,
self._mock_apply_fn)
self.assertDictEqual(self._invocation_counts, {
('a', 'd'): 1,
('b', 'e'): 1,
('c', 'f'): 1,
})
def test_parallel_walk(self):
src = """
def f(a):
return a + 1
"""
node = parser.parse(textwrap.dedent(src))
for child_a, child_b in ast_util.parallel_walk(node, node):
self.assertEqual(child_a, child_b)
def test_parallel_walk_string_leaves(self):
src = """
def f(a):
global g
"""
node = parser.parse(textwrap.dedent(src))
for child_a, child_b in ast_util.parallel_walk(node, node):
self.assertEqual(child_a, child_b)
def test_parallel_walk_inconsistent_trees(self):
node_1 = parser.parse(
textwrap.dedent("""
def f(a):
return a + 1
"""))
node_2 = parser.parse(
textwrap.dedent("""
def f(a):
return a + (a * 2)
"""))
node_3 = parser.parse(
textwrap.dedent("""
def f(a):
return a + 2
"""))
with self.assertRaises(ValueError):
for _ in ast_util.parallel_walk(node_1, node_2):
pass
# There is not particular reason to reject trees that differ only in the
# value of a constant.
# TODO(mdan): This should probably be allowed.
with self.assertRaises(ValueError):
for _ in ast_util.parallel_walk(node_1, node_3):
pass
def assertLambdaNodes(self, matching_nodes, expected_bodies):
self.assertEqual(len(matching_nodes), len(expected_bodies))
for node in matching_nodes:
self.assertIsInstance(node, gast.Lambda)
self.assertIn(
parser.unparse(node.body, include_encoding_marker=False).strip(),
expected_bodies)
if __name__ == '__main__':
test.main()
| apache-2.0 | 7,127,344,858,476,025,000 | 34 | 80 | 0.623929 | false |
balazssimon/ml-playground | udemy/lazyprogrammer/reinforcement-learning-python/grid_world.py | 1 | 2827 | class Grid: # Environment
def __init__(self, width, height, start):
self.width = width
self.height = height
self.i = start[0]
self.j = start[1]
def set(self, rewards, actions):
# rewards should be a dict of: (i, j): r (row, col): reward
# actions should be a dict of: (i, j): A (row, col): list of possible actions
self.rewards = rewards
self.actions = actions
def set_state(self, s):
self.i = s[0]
self.j = s[1]
def current_state(self):
return (self.i, self.j)
def is_terminal(self, s):
return s not in self.actions
def move(self, action):
# check if legal move first
if action in self.actions[(self.i, self.j)]:
if action == 'U':
self.i -= 1
elif action == 'D':
self.i += 1
elif action == 'R':
self.j += 1
elif action == 'L':
self.j -= 1
# return a reward (if any)
return self.rewards.get((self.i, self.j), 0)
def undo_move(self, action):
# these are the opposite of what U/D/L/R should normally do
if action == 'U':
self.i += 1
elif action == 'D':
self.i -= 1
elif action == 'R':
self.j -= 1
elif action == 'L':
self.j += 1
# raise an exception if we arrive somewhere we shouldn't be
# should never happen
assert(self.current_state() in self.all_states())
def game_over(self):
# returns true if game is over, else false
# true if we are in a state where no actions are possible
return (self.i, self.j) not in self.actions
def all_states(self):
# possibly buggy but simple way to get all states
# either a position that has possible next actions
# or a position that yields a reward
return set(self.actions.keys()) | set(self.rewards.keys())
def standard_grid():
# define a grid that describes the reward for arriving at each state
# and possible actions at each state
# the grid looks like this
# x means you can't go there
# s means start position
# number means reward at that state
# . . . 1
# . x . -1
# s . . .
g = Grid(3, 4, (2, 0))
rewards = {(0, 3): 1, (1, 3): -1}
actions = {
(0, 0): ('D', 'R'),
(0, 1): ('L', 'R'),
(0, 2): ('L', 'D', 'R'),
(1, 0): ('U', 'D'),
(1, 2): ('U', 'D', 'R'),
(2, 0): ('U', 'R'),
(2, 1): ('L', 'R'),
(2, 2): ('L', 'R', 'U'),
(2, 3): ('L', 'U'),
}
g.set(rewards, actions)
return g
def negative_grid(step_cost=-0.1):
# in this game we want to try to minimize the number of moves
# so we will penalize every move
g = standard_grid()
g.rewards.update({
(0, 0): step_cost,
(0, 1): step_cost,
(0, 2): step_cost,
(1, 0): step_cost,
(1, 2): step_cost,
(2, 0): step_cost,
(2, 1): step_cost,
(2, 2): step_cost,
(2, 3): step_cost,
})
return g
| apache-2.0 | -6,391,394,378,777,689,000 | 25.420561 | 81 | 0.553237 | false |
nordri/check_domains | lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/base.py | 31 | 7885 | """
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
from django.conf import settings
from django.db.backends import (BaseDatabaseFeatures, BaseDatabaseWrapper,
BaseDatabaseValidation)
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
from django.db.backends.postgresql_psycopg2.client import DatabaseClient
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.db.backends.postgresql_psycopg2.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.utils import InterfaceError
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeText, SafeBytes
from django.utils.timezone import utc
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
def utc_tzinfo_factory(offset):
if offset != 0:
raise AssertionError("database connection isn't set to UTC")
return utc
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = True
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
can_defer_constraint_checks = True
has_select_for_update = True
has_select_for_update_nowait = True
has_bulk_insert = True
uses_savepoints = True
supports_tablespaces = True
supports_transactions = True
can_introspect_ip_address_field = True
can_introspect_small_integer_field = True
can_distinct_on_fields = True
can_rollback_ddl = True
supports_combined_alters = True
nulls_order_largest = True
closed_cursor_error_class = InterfaceError
has_case_insensitive_like = False
requires_sqlparse_for_splitting = False
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
pattern_ops = {
'startswith': "LIKE %s || '%%%%'",
'istartswith': "LIKE UPPER(%s) || '%%%%'",
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
opts = self.settings_dict["OPTIONS"]
RC = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
self.isolation_level = opts.get('isolation_level', RC)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'] or 'postgres',
}
conn_params.update(settings_dict['OPTIONS'])
if 'autocommit' in conn_params:
del conn_params['autocommit']
if 'isolation_level' in conn_params:
del conn_params['isolation_level']
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
return Database.connect(**conn_params)
def init_connection_state(self):
settings_dict = self.settings_dict
self.connection.set_client_encoding('UTF8')
tz = 'UTC' if settings.USE_TZ else settings_dict.get('TIME_ZONE')
if tz:
try:
get_parameter_status = self.connection.get_parameter_status
except AttributeError:
# psycopg2 < 2.0.12 doesn't have get_parameter_status
conn_tz = None
else:
conn_tz = get_parameter_status('TimeZone')
if conn_tz != tz:
cursor = self.connection.cursor()
try:
cursor.execute(self.ops.set_time_zone_sql(), [tz])
finally:
cursor.close()
# Commit after setting the time zone (see #17062)
if not self.get_autocommit():
self.connection.commit()
def create_cursor(self):
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def _set_isolation_level(self, isolation_level):
assert isolation_level in range(1, 5) # Use set_autocommit for level = 0
if self.psycopg2_version >= (2, 4, 2):
self.connection.set_session(isolation_level=isolation_level)
else:
self.connection.set_isolation_level(isolation_level)
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
if self.psycopg2_version >= (2, 4, 2):
self.connection.autocommit = autocommit
else:
if autocommit:
level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
else:
level = self.isolation_level
self.connection.set_isolation_level(level)
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
def schema_editor(self, *args, **kwargs):
"Returns a new instance of this backend's SchemaEditor"
return DatabaseSchemaEditor(self, *args, **kwargs)
@cached_property
def psycopg2_version(self):
version = psycopg2.__version__.split(' ', 1)[0]
return tuple(int(v) for v in version.split('.'))
@cached_property
def pg_version(self):
with self.temporary_connection():
return get_version(self.connection)
| gpl-3.0 | 5,726,886,290,288,302,000 | 36.369668 | 105 | 0.639442 | false |
knifenomad/django | tests/delete/tests.py | 222 | 18346 | from __future__ import unicode_literals
from math import ceil
from django.db import IntegrityError, connection, models
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils.six.moves import range
from .models import (
A, M, MR, R, S, T, Avatar, Base, Child, HiddenUser, HiddenUserProfile,
M2MFrom, M2MTo, MRNull, Parent, RChild, User, create_a, get_default_r,
)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue.pk)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault.pk)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
self.assertRaises(IntegrityError, a.protect.delete)
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_do_nothing_qscount(self):
"""
Test that a models.DO_NOTHING relation doesn't trigger a query.
"""
b = Base.objects.create()
with self.assertNumQueries(1):
# RelToBase should not be queried.
b.delete()
self.assertEqual(Base.objects.count(), 0)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').remote_field.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
s = S.objects.create(r=R.objects.create())
for i in range(2 * GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append(list(a.pk for a in obj.setnull_set.all()))
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertIsNone(obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertIsNone(a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
T.objects.create(pk=1, s=s1)
T.objects.create(pk=2, s=s2)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.pre_delete.disconnect(log_pre_delete)
def test_relational_post_delete_signals_happen_before_parent_object(self):
deletions = []
def log_post_delete(instance, **kwargs):
self.assertTrue(R.objects.filter(pk=instance.r_id))
self.assertIs(type(instance), S)
deletions.append(instance.id)
r = R.objects.create(pk=1)
S.objects.create(pk=1, r=r)
models.signals.post_delete.connect(log_post_delete, sender=S)
try:
r.delete()
finally:
models.signals.post_delete.disconnect(log_post_delete)
self.assertEqual(len(deletions), 1)
self.assertEqual(deletions[0], 1)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to delete the avatar
# The important thing is that when we can defer constraint checks there
# is no need to do an UPDATE on User.avatar to null it out.
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
self.assertNumQueries(3, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
@skipIfDBFeature("can_defer_constraint_checks")
def test_cannot_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
a = Avatar.objects.get(pk=u.avatar_id)
# The below doesn't make sense... Why do we need to null out
# user.avatar if we are going to delete the user immediately after it,
# and there are no more cascades.
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to null out user.avatar, because we can't defer the constraint
# 1 query to delete the avatar
self.assertNumQueries(4, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
def test_hidden_related(self):
r = R.objects.create()
h = HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h)
r.delete()
self.assertEqual(HiddenUserProfile.objects.count(), 0)
def test_large_delete(self):
TEST_SIZE = 2000
objs = [Avatar() for i in range(0, TEST_SIZE)]
Avatar.objects.bulk_create(objs)
# Calculate the number of queries needed.
batch_size = connection.ops.bulk_batch_size(['pk'], objs)
# The related fetches are done in batches.
batches = int(ceil(float(len(objs)) / batch_size))
# One query for Avatar.objects.all() and then one related fast delete for
# each batch.
fetches_to_mem = 1 + batches
# The Avatar objects are going to be deleted in batches of GET_ITERATOR_CHUNK_SIZE
queries = fetches_to_mem + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE
self.assertNumQueries(queries, Avatar.objects.all().delete)
self.assertFalse(Avatar.objects.exists())
def test_large_delete_related(self):
TEST_SIZE = 2000
s = S.objects.create(r=R.objects.create())
for i in range(TEST_SIZE):
T.objects.create(s=s)
batch_size = max(connection.ops.bulk_batch_size(['pk'], range(TEST_SIZE)), 1)
# TEST_SIZE // batch_size (select related `T` instances)
# + 1 (select related `U` instances)
# + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE (delete `T` instances in batches)
# + 1 (delete `s`)
expected_num_queries = (ceil(TEST_SIZE // batch_size) +
ceil(TEST_SIZE // GET_ITERATOR_CHUNK_SIZE) + 2)
self.assertNumQueries(expected_num_queries, s.delete)
self.assertFalse(S.objects.exists())
self.assertFalse(T.objects.exists())
def test_delete_with_keeping_parents(self):
child = RChild.objects.create()
parent_id = child.r_ptr_id
child.delete(keep_parents=True)
self.assertFalse(RChild.objects.filter(id=child.id).exists())
self.assertTrue(R.objects.filter(id=parent_id).exists())
def test_queryset_delete_returns_num_rows(self):
"""
QuerySet.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
Avatar.objects.bulk_create([Avatar(desc='a'), Avatar(desc='b'), Avatar(desc='c')])
avatars_count = Avatar.objects.count()
deleted, rows_count = Avatar.objects.all().delete()
self.assertEqual(deleted, avatars_count)
# more complex example with multiple object types
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
}
deleted, deleted_objs = R.objects.all().delete()
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_model_delete_returns_num_rows(self):
"""
Model.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
h2 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
HiddenUserProfile.objects.create(user=h2)
m1 = M.objects.create()
m2 = M.objects.create()
MR.objects.create(r=r, m=m1)
r.m_set.add(m1)
r.m_set.add(m2)
r.save()
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
M.m2m.through._meta.label: M.m2m.through.objects.count(),
}
deleted, deleted_objs = r.delete()
self.assertEqual(deleted, sum(existed_objs.values()))
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
class FastDeleteTests(TestCase):
def test_fast_delete_fk(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to fast-delete the user
# 1 query to delete the avatar
self.assertNumQueries(2, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_fast_delete_m2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete f, 1 to fast-delete m2m for f
self.assertNumQueries(2, f.delete)
def test_fast_delete_revm2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete t, 1 to fast-delete t's m_set
self.assertNumQueries(2, f.delete)
def test_fast_delete_qs(self):
u1 = User.objects.create()
u2 = User.objects.create()
self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_joined_qs(self):
a = Avatar.objects.create(desc='a')
User.objects.create(avatar=a)
u2 = User.objects.create()
expected_queries = 1 if connection.features.update_can_self_select else 2
self.assertNumQueries(expected_queries,
User.objects.filter(avatar__desc='a').delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_inheritance(self):
c = Child.objects.create()
p = Parent.objects.create()
# 1 for self, 1 for parent
# However, this doesn't work as child.parent access creates a query,
# and this means we will be generating extra queries (a lot for large
# querysets). This is not a fast-delete problem.
# self.assertNumQueries(2, c.delete)
c.delete()
self.assertFalse(Child.objects.exists())
self.assertEqual(Parent.objects.count(), 1)
self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1)
# 1 for self delete, 1 for fast delete of empty "child" qs.
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
# 1 for self delete, 1 for fast delete of empty "child" qs.
c = Child.objects.create()
p = c.parent_ptr
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_fast_delete_large_batch(self):
User.objects.bulk_create(User() for i in range(0, 2000))
# No problems here - we aren't going to cascade, so we will fast
# delete the objects in a single query.
self.assertNumQueries(1, User.objects.all().delete)
a = Avatar.objects.create(desc='a')
User.objects.bulk_create(User(avatar=a) for i in range(0, 2000))
# We don't hit parameter amount limits for a, so just one query for
# that + fast delete of the related objs.
self.assertNumQueries(2, a.delete)
self.assertEqual(User.objects.count(), 0)
| bsd-3-clause | -8,799,619,714,270,338,000 | 36.212982 | 95 | 0.610651 | false |
joshmoore/openmicroscopy | components/tools/OmeroWeb/omeroweb/webclient/webclient_gateway.py | 1 | 76419 | #!/usr/bin/env python
#
# webclient_gateway
#
# Copyright (c) 2008-2011 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
# Carlos Neves <carlos(at)glencoesoftware(dot)com>, 2008
#
# Version: 1.0
#
import cStringIO
import traceback
import logging
logger = logging.getLogger('webclient_gateway')
try:
from PIL import Image, ImageDraw # see ticket:2597
except ImportError:
try:
import Image, ImageDraw # see ticket:2597
except:
logger.error("You need to install the Python Imaging Library. Get it at http://www.pythonware.com/products/pil/")
logger.error(traceback.format_exc())
from StringIO import StringIO
import time
from datetime import datetime
from types import IntType, ListType, TupleType, UnicodeType, StringType
import Ice
import Glacier2
import omero.gateway
import omero.scripts
from omero.rtypes import *
from omero.model import FileAnnotationI, TagAnnotationI, \
DatasetI, ProjectI, ImageI, ScreenI, PlateI, \
DetectorI, FilterI, ObjectiveI, InstrumentI, \
LaserI
from omero.gateway import TagAnnotationWrapper, ExperimenterWrapper, \
ExperimenterGroupWrapper, WellWrapper, AnnotationWrapper, \
OmeroGatewaySafeCallWrapper
from omero.sys import ParametersI
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from django.conf import settings
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
try:
PAGE = settings.PAGE
except:
PAGE = 200
class OmeroWebGateway (omero.gateway.BlitzGateway):
def __init__ (self, *args, **kwargs):
"""
Create the connection wrapper. Does not attempt to connect at this stage
Initialises the omero.client
@param username: User name. If not specified, use 'omero.gateway.anon_user'
@type username: String
@param passwd: Password.
@type passwd: String
@param client_obj: omero.client
@param group: name of group to try to connect to
@type group: String
@param clone: If True, overwrite anonymous with False
@type clone: Boolean
@param try_super: Try to log on as super user ('system' group)
@type try_super: Boolean
@param host: Omero server host.
@type host: String
@param port: Omero server port.
@type port: Integer
@param extra_config: Dictionary of extra configuration
@type extra_config: Dict
@param secure: Initial underlying omero.client connection type (True=SSL/False=insecure)
@type secure: Boolean
@param anonymous:
@type anonymous: Boolean
@param useragent: Log which python clients use this connection. E.g. 'OMERO.webadmin'
@type useragent: String
@param _shareId: Active share ID
@type _shareId: Long
"""
super(OmeroWebGateway, self).__init__(*args, **kwargs)
self._shareId = None
def connect (self, *args, **kwargs):
"""
Creates or retrieves connection for the given sessionUuid and
removes some groups from the event context
Returns True if connected.
@param sUuid: session uuid
@type sUuid: omero_model_SessionI
@return: Boolean
"""
rv = super(OmeroWebGateway, self).connect(*args,**kwargs)
if rv: # No _ctx available otherwise #3218
if self._ctx.userName!="guest":
self.removeGroupFromContext()
return rv
def attachToShare (self, share_id):
"""
Turns on the access control lists attached to the given share for the
current session. Warning: this will slow down the execution of the
current session for all database reads. Writing to the database will not
be allowed. If share does not exist or is not accessible (non-members) or
is disabled, then an ValidationException is thrown.
@param shareId: share id
@type shareId: Long
"""
sh = self._proxies['share'].getShare(long(share_id))
if self._shareId is None:
self._proxies['share'].activate(sh.id.val)
self._shareId = sh.id.val
def getShareId(self):
"""
Returns active share id .
@return: Share ID
@rtype: Long
"""
if self.getEventContext().shareId is not None:
if self.getEventContext().shareId != self._shareId and self._shareId > 0:
self._shareId = self.getEventContext().shareId
return self._shareId
def removeGroupFromContext (self):
"""
Removes group "User" from the current context.
"""
a = self.getAdminService()
gr_u = a.lookupGroup('user')
try:
self._ctx.memberOfGroups.remove(gr_u.id.val)
self._ctx.leaderOfGroups.remove(gr_u.id.val)
except:
pass
##############################################
# Session methods #
def changeActiveGroup(self, gid): # TODO: should be moved to ISession
"""
Every time session is created default group becomes active group
and is loaded with the security for the current user and thread.
Public data has to be created in the context of the group where user,
who would like to look at these data, is a member of.
Public data can be only visible by the member of group and owners.
@param gid: New active group ID
@type gid: Long
@return: Boolean
"""
try:
for k in self._proxies.keys():
self._proxies[k].close()
self.c.sf.setSecurityContext(omero.model.ExperimenterGroupI(gid, False))
self.getAdminService().setDefaultGroup(self.getUser()._obj, omero.model.ExperimenterGroupI(gid, False))
self._ctx = self.getAdminService().getEventContext()
return True
except omero.SecurityViolation:
logger.error(traceback.format_exc())
return False
except:
logger.error(traceback.format_exc())
return False
##############################################
## Forgotten password ##
def isForgottenPasswordSet(self):
"""
Retrieves a configuration value "omero.resetpassword.config" for
Forgotten password form from the backend store.
@return: Boolean
"""
conf = self.getConfigService()
try:
return bool(conf.getConfigValue("omero.resetpassword.config").title())
except:
logger.error(traceback.format_exc())
return False
def reportForgottenPassword(self, username, email):
"""
Allows to reset the password (temporary password is sent). The
given email must match the email for the user listed under the name
argument.
@param username: omename
@type username: String
@param email: email address
@type email: String
"""
admin_serv = self.getAdminService()
admin_serv.reportForgottenPassword(username, email)
##############################################
## IAdmin ##
def isAnythingCreated(self):
"""
Checks if any of the experimenter was created before
@return: Boolean
"""
q = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
p.map["default_names"] = rlist([rstring("user"), rstring("system"), rstring("guest")])
f = omero.sys.Filter()
f.limit = rint(1)
p.theFilter = f
sql = "select g from ExperimenterGroup as g where g.name not in (:default_names)"
if len(q.findAllByQuery(sql, p)) > 0:
return False
return True
def listLdapAuthExperimenters(self):
"""
Lists all IDs of experimenters who are authenticated by LDAP
(has set dn on password table).
@return: List of experimetner IDs
@rtype: L{Dict of String: Long}
"""
admin_serv = self.getAdminService()
return admin_serv.lookupLdapAuthExperimenters()
def getLdapAuthExperimenter(self, eid):
"""
Return DN of the specific experimenter if uses LDAP authentication
(has set dn on password table) or None.
@param eid: experimenter ID
@type eid: L{Long}
@return: Distinguished Name
@rtype: String
"""
admin_serv = self.getAdminService()
return admin_serv.lookupLdapAuthExperimenter(long(eid))
def getExperimenters(self):
"""
Return all experimenters apart from current user.
@return: Generator yielding experimetners list
@rtype: L{ExperimenterWrapper} generator
"""
q = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
p.map["id"] = rlong(self.getEventContext().userId)
sql = "select e from Experimenter as e where e.id != :id "
for e in q.findAllByQuery(sql, p):
yield ExperimenterWrapper(self, e)
#def getCurrentSupervisor(self):
# """
# Gets the owner of a group for current user.
#
# @return: ExperimenterWrapper
# """
#
# p = omero.sys.ParametersI()
# p.map = {}
# p.map["id"] = rlong(self.getEventContext().groupId)
# # TODO: there can now be multiple supervisors
# p.page(0,1)
# supervisor = self.getQueryService().findByQuery(\
# """select e from ExperimenterGroup as g
# join g.groupExperimenterMap as m join m.child as e
# where m.owner = true and g.id = :id""", p)
# return ExperimenterWrapper(self, supervisor)
#def getScriptwithDetails(self, sid):
# script_serv = self.getScriptService()
# return script_serv.getScriptWithDetails(long(sid))
#def lookupScripts(self):
# script_serv = self.getScriptService()
# return script_serv.getScripts()
def getServerVersion(self):
"""
Retrieves a configuration value "omero.version" from the backend store.
@return: String
"""
conf = self.getConfigService()
return conf.getConfigValue("omero.version")
#########################################################
## From Bram b(dot)gerritsen(at)nki(dot)nl ##
def findWellInPlate (self, plate_name, row, column):
q = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
p.map['pname'] = rstring(str(plate_name))
p.map['row'] = rint(int(row))
p.map['column'] = rint(int(column))
sql = """select well from Well as well
left outer join fetch well.plate as pt
left outer join fetch well.wellSamples as ws
inner join fetch ws.image as img
where well.plate.name = :pname and well.row = :row
and well.column = :column"""
well = q.findByQuery(sql, p)
if well is None:
return None
else:
return WellWrapper(self, well, None)
####################################################################################
## Container Queries ###
####################################################################################
def listTags(self, eid=None):
params = omero.sys.ParametersI()
params.orphan()
params.map = {}
params.map['ns'] = rstring(omero.constants.metadata.NSINSIGHTTAGSET)
sql = "select tg from TagAnnotation tg where ((ns=:ns) or (ns is null and not exists ( select aal from AnnotationAnnotationLink as aal where aal.child=tg.id))) "
if eid is not None:
params.map["eid"] = rlong(long(eid))
sql+=" and tg.details.owner.id = :eid"
q = self.getQueryService()
for ann in q.findAllByQuery(sql, params):
yield TagAnnotationWrapper(self, ann)
def countOrphans (self, obj_type, eid=None):
links = {'Dataset':('ProjectDatasetLink', DatasetWrapper),
'Image':('DatasetImageLink', ImageWrapper),
'Plate':('ScreenPlateLink', PlateWrapper)}
if obj_type not in links:
raise TypeError("'%s' is not valid object type. Must use one of %s" % (obj_type, links.keys()) )
q = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
links = {'Dataset':('ProjectDatasetLink', DatasetWrapper),
'Image':('DatasetImageLink', ImageWrapper),
'Plate':('ScreenPlateLink', PlateWrapper)}
if obj_type not in links:
raise TypeError("'%s' is not valid object type. Must use one of %s" % (obj_type, links.keys()) )
q = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
if eid is not None:
p.map["eid"] = rlong(long(eid))
eidFilter = "obj.details.owner.id=:eid and "
eidWsFilter = " and ws.details.owner.id=:eid"
else:
eidFilter = ""
eidWsFilter = ""
sql = "select count(obj.id) from %s as obj " \
"join obj.details.creationEvent "\
"join obj.details.owner join obj.details.group " \
"where %s" \
"not exists (select obl from %s as obl where " \
"obl.child=obj.id)" % (obj_type, eidFilter, links[obj_type][0])
if obj_type == 'Image':
sql += "and not exists ( "\
"select ws from WellSample as ws "\
"where ws.image=obj.id %s)" % eidWsFilter
rslt = q.projection(sql, p)
if len(rslt) > 0:
if len(rslt[0]) > 0:
return rslt[0][0].val
return 0
def listOrphans (self, obj_type, eid=None, page=None):
"""
List orphaned Datasets, Images, Plates controlled by the security system,
Optionally filter by experimenter 'eid'
@param obj_type: 'Dataset', 'Image', 'Plate'
@param eid: experimenter id
@type eid: Long
@param page: page number
@type page: Long
@return: Generator yielding Datasets
@rtype: L{DatasetWrapper} generator
"""
links = {'Dataset':('ProjectDatasetLink', DatasetWrapper),
'Image':('DatasetImageLink', ImageWrapper),
'Plate':('ScreenPlateLink', PlateWrapper)}
if obj_type not in links:
raise TypeError("'%s' is not valid object type. Must use one of %s" % (obj_type, links.keys()) )
q = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
if eid is not None:
p.map["eid"] = rlong(long(eid))
eidFilter = "obj.details.owner.id=:eid and "
eidWsFilter = " and ws.details.owner.id=:eid"
else:
eidFilter = ""
eidWsFilter = ""
if page is not None:
f = omero.sys.Filter()
f.limit = rint(PAGE)
f.offset = rint((int(page)-1)*PAGE)
p.theFilter = f
sql = "select obj from %s as obj " \
"join fetch obj.details.creationEvent "\
"join fetch obj.details.owner join fetch obj.details.group " % (obj_type)
sql += "where %s" \
"not exists (select obl from %s as obl where " \
"obl.child=obj.id)" % (eidFilter, links[obj_type][0])
if obj_type == 'Image':
sql += "and not exists ( "\
"select ws from WellSample as ws "\
"where ws.image=obj.id %s)" % eidWsFilter
for e in q.findAllByQuery(sql, p):
yield links[obj_type][1](self, e)
def listImagesInDataset (self, oid, eid=None, page=None):
"""
List Images in the given Dataset.
Optinally filter by experimenter 'eid'
@param eid: experimenter id
@type eid: Long
@param page: page number
@type page: Long
@return: Generator yielding Images
@rtype: L{ImageWrapper} generator
"""
q = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
p.map["oid"] = rlong(long(oid))
if page is not None:
f = omero.sys.Filter()
f.limit = rint(PAGE)
f.offset = rint((int(page)-1)*PAGE)
p.theFilter = f
sql = "select im from Image im "\
"join fetch im.details.creationEvent "\
"join fetch im.details.owner join fetch im.details.group " \
"left outer join fetch im.datasetLinks dil "\
"left outer join fetch dil.parent d " \
"where d.id = :oid"
if eid is not None:
p.map["eid"] = rlong(long(eid))
sql += " and im.details.owner.id=:eid"
sql+=" order by im.name ASC"
for e in q.findAllByQuery(sql, p):
kwargs = {'link': omero.gateway.BlitzObjectWrapper(self, e.copyDatasetLinks()[0])}
yield ImageWrapper(self, e, None, **kwargs)
# DATA RETRIVAL BY TAGs
def findTag (self, name, desc=None):
"""
Retrieves Tag by given Name and description
@param name name of tag
@type name String
@param desc description of tag
@type desc String
@return: TagAnnotation
@rtype: AnnotationWrapper
"""
"""TODO: #1015
It does not support SPW"""
query_serv = self.getQueryService()
res = list()
p = omero.sys.Parameters()
p.map = {}
p.map["text"] = rstring(str(name))
if desc is not None:
p.map["desc"] = rstring(str(desc))
#p.map["eid"] = rlong(self.getEventContext().userId)
f = omero.sys.Filter()
f.limit = rint(1)
p.theFilter = f
sql = "select tg from TagAnnotation tg " \
"where tg.textValue=:text"
if desc is not None:
sql+= " and tg.description=:desc"
sql+=" and tg.ns is null order by tg.textValue"
res = query_serv.findAllByQuery(sql, p)
if len(res) > 0:
return TagAnnotationWrapper(self, res[0])
return None
# AVATAR #
def uploadMyUserPhoto(self, filename, format, data):
"""
Uploads a photo for the user which will be displayed on his/her profile.
This photo will be saved as an OriginalFile object
with the given format, and attached to the user's Experimenter
object via an File Annotation with
the namespace: "openmicroscopy.org/omero/experimenter/photo" (NSEXPERIMENTERPHOTO).
If such an OriginalFile instance already exists,
it will be overwritten. If more than one photo is present, the oldest
version will be modified (i.e. the highest updateEvent id).
Note: as outlined in ticket:1794, this photo will be placed in the "user"
group and therefore will be visible to everyone on the system.
@param filename name which will be used.
@type filename String
@param format Format.value string. 'image/jpeg' and 'image/png' are common values.
@type format String
@param data Data from the image. This will be written to disk.
@type data String
@return ID of the overwritten or newly created user photo OriginalFile object.
@rtype Long
"""
admin_serv = self.getAdminService()
pid = admin_serv.uploadMyUserPhoto(filename, format, data)
if pid is not None:
return pid
def hasExperimenterPhoto(self, oid=None):
"""
Check if File annotation with the namespace:
"openmicroscopy.org/omero/experimenter/photo" (NSEXPERIMENTERPHOTO) is linked
to the given user ID. If user id not set, owned by the current user.
@param oid experimenter ID
@type oid Long
@return True or False
@rtype Boolean
"""
photo = None
meta = self.getMetadataService()
try:
if oid is None:
ann = meta.loadAnnotations("Experimenter", [self.getEventContext().userId], None, None, None).get(self.getEventContext().userId, [])[0]
else:
ann = meta.loadAnnotations("Experimenter", [long(oid)], None, None, None).get(long(oid), [])[0]
if ann is not None:
return True
else:
return False
except:
return False
def getExperimenterPhoto(self, oid=None):
"""
Get File annotation with the namespace:
"openmicroscopy.org/omero/experimenter/photo" (NSEXPERIMENTERPHOTO) linked
to the given user ID. If user id not set, owned by the current user.
@param oid experimenter ID
@type oid Long
@return Data from the image.
@rtype String
"""
photo = None
meta = self.getMetadataService()
try:
if oid is None:
ann = meta.loadAnnotations("Experimenter", [self.getEventContext().userId], None, None, None).get(self.getEventContext().userId, [])
else:
ann = meta.loadAnnotations("Experimenter", [long(oid)], None, None, None).get(long(oid), [])
if len(ann) > 0:
ann = ann[0]
store = self.createRawFileStore()
store.setFileId(ann.file.id.val)
photo = store.read(0,long(ann.file.size.val))
else:
photo = self.getExperimenterDefaultPhoto()
except:
logger.error(traceback.format_exc())
photo = self.getExperimenterDefaultPhoto()
if photo == None:
photo = self.getExperimenterDefaultPhoto()
return photo
def getExperimenterPhotoSize(self, oid=None):
"""
Get size of File annotation with the namespace:
"openmicroscopy.org/omero/experimenter/photo" (NSEXPERIMENTERPHOTO) linked
to the given user ID. If user id not set, owned by the current user.
@param oid experimenter ID
@type oid Long
@return Tuple including dimention and size of the file
@rtype Tuple
"""
photo = None
meta = self.getMetadataService()
try:
if oid is None:
ann = meta.loadAnnotations("Experimenter", [self.getEventContext().userId], None, None, None).get(self.getEventContext().userId, [])[0]
else:
ann = meta.loadAnnotations("Experimenter", [long(oid)], None, None, None).get(long(oid), [])[0]
store = self.createRawFileStore()
store.setFileId(ann.file.id.val)
photo = store.read(0,long(ann.file.size.val))
try:
im = Image.open(StringIO(photo))
except:
logger.error(traceback.format_exc())
return None
else:
return (im.size, ann.file.size.val)
except:
return None
def cropExperimenterPhoto(self, box, oid=None):
"""
Crop File annotation with the namespace:
"openmicroscopy.org/omero/experimenter/photo" (NSEXPERIMENTERPHOTO) linked
to the given user ID. If user id not set, owned by the current user.
New dimentions are defined by squer positions box = (x1,y1,x2,y2)
@param box tuple of new square positions
@type box Tuple
@param oid experimenter ID
@type oid Long
"""
# TODO: crop method could be moved to the server side
photo = None
meta = self.getMetadataService()
ann = None
try:
if oid is None:
ann = meta.loadAnnotations("Experimenter", [self.getEventContext().userId], None, None, None).get(self.getEventContext().userId, [])[0]
else:
ann = meta.loadAnnotations("Experimenter", [long(oid)], None, None, None).get(long(oid), [])[0]
store = self.createRawFileStore()
store.setFileId(ann.file.id.val)
photo = store.read(0,long(ann.file.size.val))
except:
logger.error(traceback.format_exc())
raise IOError("Photo does not exist.")
else:
region = None
try:
im = Image.open(StringIO(photo))
region = im.crop(box)
except IOError:
logger.error(traceback.format_exc())
raise IOError("Cannot open that photo.")
else:
imdata=StringIO()
region.save(imdata, format=im.format)
self.uploadMyUserPhoto(ann.file.name.val, ann.file.mimetype.val, imdata.getvalue())
def getExperimenterDefaultPhoto(self):
"""
If file annotation with the namespace:
"openmicroscopy.org/omero/experimenter/photo" (NSEXPERIMENTERPHOTO)
is not linked to experimenter this method generate default picture of the person.
@return Data from the image.
@rtype String
"""
img = Image.open(settings.DEFAULT_USER)
img.thumbnail((32,32), Image.ANTIALIAS)
draw = ImageDraw.Draw(img)
f = cStringIO.StringIO()
img.save(f, "PNG")
f.seek(0)
return f.read()
def getFileFormat(self, format):
"""
Get file annotation format for the given value.
@return Omero File format
@rtype String
"""
query_serv = self.getQueryService()
return query_serv.findByString("Format", "value", format).getValue().val;
################################################
## Counters
def getCollectionCount(self, parent, child, ids):
"""
Counts the number of members in a collection for a given object.
@param parent The fully-qualified classname of the object to be tested
@type parent String
@param child Name of the property on that class, omitting getters and setters.
@type child String
@param ids Set of Longs, the ids of the objects to test
@type ids L{Long}
@return A map from id integer to count integer
@rtype L{(Long, Long)}
"""
container = self.getContainerService()
return container.getCollectionCount(parent, child, ids, None)
################################################
## Validators
def checkOmeName(self, ome_name, old_omeName=None):
if ome_name == old_omeName:
return False
query_serv = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
p.map["omeName"] = rstring(smart_str(ome_name))
sql = "select e from Experimenter as e where e.omeName = (:omeName)"
exps = query_serv.findAllByQuery(sql, p)
if len(exps) > 0:
return True
else:
return False
def checkGroupName(self, name, old_name=None):
if name == old_name:
return False
query_serv = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
p.map["name"] = rstring(smart_str(name))
sql = "select g from ExperimenterGroup as g where g.name = (:name)"
grs = query_serv.findAllByQuery(sql, p)
if len(grs) > 0:
return True
else:
return False
def checkEmail(self, email, old_email=None):
if email == "":
return False
if email == old_email:
return False
query_serv = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
p.map["email"] = rstring(smart_str(email))
sql = "select e from Experimenter as e where e.email = (:email)"
exps = query_serv.findAllByQuery(sql, p)
if len(exps) > 0:
return True
else:
return False
def defaultThumbnail(self, size=(120,120)):
if isinstance(size, int):
size = (size,size)
if len(size) == 1:
size = (size[0],size[0])
img = Image.open(settings.DEFAULT_IMG)
img.thumbnail(size, Image.ANTIALIAS)
draw = ImageDraw.Draw(img)
f = cStringIO.StringIO()
img.save(f, "PNG")
f.seek(0)
return f.read()
##############################################
## Sets methods ##
def changeUserPassword(self, omeName, password, my_password):
"""
Change the password for the a given user.
@param omeName Experimetner omename
@type omeName String
@param password Must pass validation in the security sub-system.
@type password String
@param my_password Must pass validation in the security sub-system.
@type my_password String
"""
admin_serv = self.getAdminService()
self.c.sf.setSecurityPassword(my_password)
admin_serv.changeUserPassword(omeName, rstring(str(password)))
def changeMyPassword(self, password, old_password):
"""
Change the password for the current user by passing the old password.
@param password Must pass validation in the security sub-system.
@type password String
@param old_password Old password
@type old_password String
@return None or error message if password could not be changed
@rtype String
"""
admin_serv = self.getAdminService()
admin_serv.changePasswordWithOldPassword(rstring(str(old_password)), rstring(str(password)))
def createExperimenter(self, experimenter, defaultGroup, otherGroups, password):
"""
Create and return a new user in the given groups with password.
@param experimenter A new Experimenter instance.
@type experimenter ExperimenterI
@param defaultGroup Instance of ExperimenterGroup selected as a first active group.
@type defaultGroup ExperimenterGroupI
@param otherGroups List of ExperimenterGroup instances. Can be empty.
@type otherGroups L{ExperimenterGroupI}
@param password Must pass validation in the security sub-system.
@type password String
@return ID of the newly created Experimenter Not null.
@rtype Long
"""
admin_serv = self.getAdminService()
return admin_serv.createExperimenterWithPassword(experimenter, rstring(str(password)), defaultGroup, otherGroups)
def updateExperimenter(self, experimenter, defaultGroup, addGroups, rmGroups):
"""
Update an existing user including groups user is a member of.
Password cannot be changed by calling that method.
@param experimenter An existing Experimenter instance.
@type experimenter ExperimenterI
@param defaultGroup Instance of ExperimenterGroup selected as a new active group.
@type defaultGroup ExperimenterGroupI
@param addGroups List of new ExperimenterGroup instances user will be a member of. Can be empty.
@type addGroups L{ExperimenterGroupI}
@param rmGroups List of old ExperimenterGroup instances user no longer be a member of. Can be empty.
@type rmGroups L{ExperimenterGroupI}
"""
admin_serv = self.getAdminService()
admin_serv.updateExperimenter(experimenter)
if len(addGroups) > 0:
admin_serv.addGroups(experimenter, addGroups)
admin_serv.setDefaultGroup(experimenter, defaultGroup)
if len(rmGroups) > 0:
admin_serv.removeGroups(experimenter, rmGroups)
def setMembersOfGroup(self, group, add_exps, rm_exps):
"""
Change members of the group.
@param group An existing ExperimenterGroup instance.
@type group ExperimenterGroupI
@param add_exps List of new Experimenters instances. Can be empty.
@type add_exps L{ExperimenterI}
@param rm_exps List of old Experimenters instances no longer be a member of that group. Can be empty.
@type rm_exps L{ExperimenterI}
"""
admin_serv = self.getAdminService()
for e in add_exps:
admin_serv.addGroups(e, [group])
for e in rm_exps:
admin_serv.removeGroups(e, [group])
#def deleteExperimenter(self, experimenter):
# """
# Removes a user by removing the password information for that user as well
# as all GroupExperimenterMap instances.
#
# @param user Experimenter to be deleted. Not null.
# @type user ExperimenterI
# """
# admin_serv = self.getAdminService()
# admin_serv.deleteExperimenter(experimenter)
def createGroup(self, group, group_owners):
"""
Create and return a new group with the given owners.
@param group A new ExperimenterGroup instance.
@type group ExperimenterGroupI
@param group_owners List of Experimenter instances. Can be empty.
@type group_owners L{ExperimenterI}
@return ID of the newly created ExperimenterGroup Not null.
@rtype Long
"""
admin_serv = self.getAdminService()
gr_id = admin_serv.createGroup(group)
new_gr = admin_serv.getGroup(gr_id)
admin_serv.addGroupOwners(new_gr, group_owners)
return gr_id
def updateGroup(self, group, add_exps, rm_exps, perm=None):
"""
Update an existing user including groups user is a member of.
Password cannot be changed by calling that method.
@param group An existing ExperimenterGroup instance.
@type group ExperimenterGroupI
@param add_exps List of new Experimenter instances. Can be empty.
@type add_exps L{ExperimenterI}
@param rm_exps List of old Experimenter instances who no longer will be a member of. Can be empty.
@type rm_exps L{ExperimenterI}
@param perm Permissions set on the given group
@type perm PermissionsI
"""
admin_serv = self.getAdminService()
# Should we update updateGroup so this would be atomic?
admin_serv.updateGroup(group)
if perm is not None:
logger.warning("WARNING: changePermissions was called!!!")
admin_serv.changePermissions(group, perm)
self._user = self.getObject("Experimenter", self._userid)
admin_serv.addGroupOwners(group, add_exps)
admin_serv.removeGroupOwners(group, rm_exps)
def updateMyAccount(self, experimenter, defultGroup):
"""
Allows a user to update his/her own information and set the default group for a given user.
@param experimenter A data transfer object. Only the fields: firstName, middleName,
lastName, email, and institution are checked. Not null.
@type experimenter ExperimenterI
@param defultGroup The group which should be set as default group for this user. Not null
@type defultGroup ExperimenterGroupI
"""
admin_serv = self.getAdminService()
admin_serv.updateSelf(experimenter)
admin_serv.setDefaultGroup(experimenter, defultGroup)
self.changeActiveGroup(defultGroup.id.val)
self._user = self.getObject("Experimenter", self._userid)
def updatePermissions(self, obj, perm):
"""
Allow to change the permission on the object.
@param obj An entity or an unloaded reference to an entity. Not null.
@type obj ObjectI
@param perm The permissions value for this entity. Not null.
@type perm PermissionsI
"""
admin_serv = self.getAdminService()
if perm is not None:
logger.warning("WARNING: changePermissions was called!!!")
admin_serv.changePermissions(obj, perm)
self._user = self.getObject("Experimenter", self._userid)
def saveObject (self, obj):
"""
Provide method for directly updating object graphs. Act recursively on
the entire object graph, replacing placeholders and details where necessary,
and then "merging" the final graph. This means that the objects that are
passed into methods are copied over to new instances which are then returned.
The original objects should be discarded.
@param obj An entity or an unloaded reference to an entity. Not null.
@type obj ObjectI
"""
u = self.getUpdateService()
u.saveObject(obj)
def saveArray (self, objs):
"""
Provide method for directly updating list of object graphs. Act recursively on
the entire object graph, replacing placeholders and details where necessary,
and then "merging" the final graph. This means that the objects that are
passed into methods are copied over to new instances which are then returned.
The original objects should be discarded.
@param obj List of entities or an unloaded references to an entity. Not null.
@type obj L{ObjectI}
"""
u = self.getUpdateService()
u.saveArray(objs)
def saveAndReturnObject (self, obj):
"""
Provide method for directly updating object graphs and return it. Act recursively on
the entire object graph, replacing placeholders and details where necessary,
and then "merging" the final graph. This means that the objects that are
passed into methods are copied over to new instances which are then returned.
The original objects should be discarded.
@param obj An entity or an unloaded reference to an entity. Not null.
@type obj ObjectI
@return Saved object
@rtype ObjectI
"""
u = self.getUpdateService()
res = u.saveAndReturnObject(obj)
res.unload()
obj = omero.gateway.BlitzObjectWrapper(self, res)
return obj
def saveAndReturnId (self, obj):
"""
Provide method for directly updating object graphs and return ID. Act recursively on
the entire object graph, replacing placeholders and details where necessary,
and then "merging" the final graph. This means that the objects that are
passed into methods are copied over to new instances which are then returned.
The original objects should be discarded.
@param obj An entity or an unloaded reference to an entity. Not null.
@type obj ObjectI
@return ID of saved object
@rtype Long
"""
u = self.getUpdateService()
res = u.saveAndReturnObject(obj)
res.unload()
return res.id.val
def saveAndReturnFile(self, binary, oFile_id):
"""
Provide method for directly updating a file object and return binary.
@param binary Binary. Not null.
@type binary String
@param oFile_id File Id in order to manage the state of the service. Not null.
@type oFile_id Long
@return Shallow copy of file.
"""
store = self.createRawFileStore()
store.setFileId(oFile_id);
pos = 0
rlen = 0
for chunk in binary.chunks():
rlen = len(chunk)
store.write(chunk, pos, rlen)
pos = pos + rlen
return store.save()
##############################################
## IShare
def getShare (self, oid):
"""
Gets share for the given share id.
@param oid: Share ID.
@type oid: Long
@return: ShareWrapper or None
@rtype: L{ShareWrapper}
"""
sh_serv = self.getShareService()
sh = sh_serv.getShare(long(oid))
if sh is not None:
return ShareWrapper(self, sh)
else:
return None
def getOwnShares(self):
"""
Gets all owned shares for the current user.
@return: Shares that user owns
@rtype: L{ShareWrapper} generator
"""
sh = self.getShareService()
for e in sh.getOwnShares(False):
yield ShareWrapper(self, e)
def getMemberShares(self):
"""
Gets all shares where current user is a member.
@return: Shares that user is a member of
@rtype: L{ShareWrapper} generator
"""
sh = self.getShareService()
for e in sh.getMemberShares(False):
yield ShareWrapper(self, e)
def getMemberCount(self, share_ids):
"""
Returns a map from share id to the count of total members (including the
owner). This is represented by ome.model.meta.ShareMember links.
@param share_ids: List of IDs
@type share_ids: List of Longs
@return: Dict of shareId: member-count
@rtype: Dict of long: long
"""
sh = self.getShareService()
return sh.getMemberCount(share_ids)
def getCommentCount(self, share_ids):
"""
Returns a map from share id to comment count.
@param share_ids: List of IDs
@type share_ids: List of Longs
@return: Dict of shareId: comment-count
@rtype: Dict of long: long
"""
sh = self.getShareService()
return sh.getCommentCount(share_ids)
def getContents(self, share_id):
"""
Looks up all items belonging to the share, wrapped in object wrapper
@param share_id: share ID
@type share_id: Long
@return: Share contents
@rtype: L{omero.gateway.BlitzObjectWrapper} generator
"""
sh = self.getShareService()
for e in sh.getContents(long(share_id)):
try:
obj = omero.gateway.BlitzObjectWrapper(self, e)
except:
obj = omero.gateway.BlitzObjectWrapper(self,None)
obj._obj = e
yield obj
def getComments(self, share_id):
"""
Looks up all comments which belong to the share, wrapped in object wrapper
@param share_id: share ID
@type share_id: Long
@return: Share comments
@rtype: L{AnnotationWrapper} generator
"""
sh = self.getShareService()
for e in sh.getComments(long(share_id)):
yield AnnotationWrapper(self, e)
def getAllMembers(self, share_id):
"""
Get all {@link Experimenter users} who are a member of the share.
@param share_id: share ID
@type share_id: Long
@return: Members of share
@rtype: L{ExperimenterWrapper} generator
"""
sh = self.getShareService()
for e in sh.getAllMembers(long(share_id)):
yield ExperimenterWrapper(self, e)
def getAllGuests(self, share_id):
"""
Get the email addresses for all share guests.
@param share_id: share ID
@type share_id: Long
@return: List of e-mail addresses
@rtype: List of Strings
"""
sh = self.getShareService()
return sh.getAllGuests(long(share_id))
def getAllUsers(self, share_id):
"""
Get a single set containing the login names of the users as well email addresses for guests.
@param share_id: share ID
@type share_id: Long
@return: List of usernames and e-mail addresses
@rtype: List of Strings
"""
sh = self.getShareService()
return sh.getAllUsers(long(share_id))
def prepareRecipients(self, recipients):
recps = list()
for m in recipients:
try:
e = (m.email, m.email.val)[isinstance(m.email, omero.RString)]
if e is not None and e!="":
recps.append(e)
except:
logger.error(traceback.format_exc())
logger.info(recps)
if len(recps) == 0:
raise AttributeError("Recipients list is empty")
return recps
def addComment(self, host, blitz_id, share_id, comment):
sh = self.getShareService()
new_cm = sh.addComment(long(share_id), str(comment))
members = list(self.getAllMembers(long(share_id)))
sh = self.getShare(long(share_id))
if self.getEventContext().userId != sh.owner.id.val:
members.append(sh.getOwner())
if sh.active:
try:
for m in members:
try:
if m.id == self.getEventContext().userId:
members.remove(m)
except:
logger.error(traceback.format_exc())
recipients = self.prepareRecipients(members)
except Exception, x:
logger.error(traceback.format_exc())
else:
blitz = settings.SERVER_LIST.get(pk=blitz_id)
t = settings.EMAIL_TEMPLATES["add_comment_to_share"]
message = t['text_content'] % (settings.APPLICATION_HOST, blitz_id)
message_html = t['html_content'] % (settings.APPLICATION_HOST, blitz_id, settings.APPLICATION_HOST, blitz_id)
try:
title = 'OMERO.web - new comment for share %i' % share_id
text_content = message
html_content = message_html
msg = EmailMultiAlternatives(title, text_content, settings.SERVER_EMAIL, recipients)
msg.attach_alternative(html_content, "text/html")
msg.send()
logger.error("Email was sent")
except:
logger.error(traceback.format_exc())
def removeImage(self, share_id, image_id):
sh = self.getShareService()
img = self.getObject("Image", image_id)
sh.removeObject(long(share_id), img._obj)
def createShare(self, host, blitz_id, image, message, members, enable, expiration=None):
sh = self.getShareService()
q = self.getQueryService()
items = list()
ms = list()
p = omero.sys.Parameters()
p.map = {}
#images
if len(image) > 0:
p.map["ids"] = rlist([rlong(long(a)) for a in image])
sql = "select im from Image im join fetch im.details.owner join fetch im.details.group where im.id in (:ids) order by im.name"
items.extend(q.findAllByQuery(sql, p))
#members
if members is not None:
p.map["ids"] = rlist([rlong(long(a)) for a in members])
sql = "select e from Experimenter e " \
"where e.id in (:ids) order by e.omeName"
ms = q.findAllByQuery(sql, p)
sid = sh.createShare(message, rtime(expiration), items, ms, [], enable)
sh.addObjects(sid, items)
#send email if avtive
if enable:
try:
recipients = self.prepareRecipients(ms)
except Exception, x:
logger.error(traceback.format_exc())
else:
t = settings.EMAIL_TEMPLATES["create_share"]
message = t['text_content'] % (settings.APPLICATION_HOST, blitz_id, self.getUser().getFullName())
message_html = t['html_content'] % (settings.APPLICATION_HOST, blitz_id, settings.APPLICATION_HOST, blitz_id, self.getUser().getFullName())
try:
title = 'OMERO.web - new share %i' % sid
text_content = message
html_content = message_html
msg = EmailMultiAlternatives(title, text_content, settings.SERVER_EMAIL, recipients)
msg.attach_alternative(html_content, "text/html")
msg.send()
logger.error("Email was sent")
except:
logger.error(traceback.format_exc())
def updateShareOrDiscussion (self, host, blitz_id, share_id, message, add_members, rm_members, enable, expiration=None):
sh = self.getShareService()
sh.setDescription(long(share_id), message)
sh.setExpiration(long(share_id), rtime(expiration))
sh.setActive(long(share_id), enable)
if len(add_members) > 0:
sh.addUsers(long(share_id), add_members)
if len(rm_members) > 0:
sh.removeUsers(long(share_id), rm_members)
#send email if avtive
if len(add_members) > 0:
try:
recipients = self.prepareRecipients(add_members)
except Exception, x:
logger.error(traceback.format_exc())
else:
blitz = settings.SERVER_LIST.get(pk=blitz_id)
t = settings.EMAIL_TEMPLATES["add_member_to_share"]
message = t['text_content'] % (settings.APPLICATION_HOST, blitz_id, self.getUser().getFullName())
message_html = t['html_content'] % (settings.APPLICATION_HOST, blitz_id, settings.APPLICATION_HOST, blitz_id, self.getUser().getFullName())
try:
title = 'OMERO.web - update share %i' % share_id
text_content = message
html_content = message_html
msg = EmailMultiAlternatives(title, text_content, settings.SERVER_EMAIL, recipients)
msg.attach_alternative(html_content, "text/html")
msg.send()
logger.error("Email was sent")
except:
logger.error(traceback.format_exc())
if len(rm_members) > 0:
try:
recipients = self.prepareRecipients(rm_members)
except Exception, x:
logger.error(traceback.format_exc())
else:
blitz = settings.SERVER_LIST.get(pk=blitz_id)
t = settings.EMAIL_TEMPLATES["remove_member_from_share"]
message = t['text_content'] % (settings.APPLICATION_HOST, blitz_id)
message_html = t['html_content'] % (settings.APPLICATION_HOST, blitz_id, settings.APPLICATION_HOST, blitz_id)
try:
title = 'OMERO.web - update share %i' % share_id
text_content = message
html_content = message_html
msg = EmailMultiAlternatives(title, text_content, settings.SERVER_EMAIL, recipients)
msg.attach_alternative(html_content, "text/html")
msg.send()
logger.error("Email was sent")
except:
logger.error(traceback.format_exc())
##############################################
## History methods ##
#def getLastAcquiredImages (self):
# tm = self.getTimelineService()
# p = omero.sys.Parameters()
# p.map = {}
# f = omero.sys.Filter()
# f.ownerId = rlong(self.getEventContext().userId)
# f.groupId = rlong(self.getEventContext().groupId)
# f.limit = rint(6)
# p.theFilter = f
# for e in tm.getMostRecentObjects(['Image'], p, False)["Image"]:
# yield ImageWrapper(self, e)
def listLastImportedImages (self):
"""
Retrieve most recent imported images
controlled by the security system.
@return: Generator yielding Images
@rtype: L{ImageWrapper} generator
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
f.ownerId = rlong(self.getEventContext().userId)
f.groupId = rlong(self.getEventContext().groupId)
f.limit = rint(10)
p.theFilter = f
for e in tm.getMostRecentObjects(['Image'], p, False)["Image"]:
yield ImageWrapper(self, e)
def listMostRecentShares (self):
"""
Retrieve most recent shares
controlled by the security system.
@return: Generator yielding SessionAnnotationLink
@rtype: L{ShareWrapper} generator
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
f.ownerId = rlong(self.getEventContext().userId)
f.limit = rint(10)
p.theFilter = f
for e in tm.getMostRecentShareCommentLinks(p):
yield ShareWrapper(self, e.parent)
def listMostRecentShareComments (self):
"""
Retrieve most recent share comments
controlled by the security system.
@return: Generator yielding SessionAnnotationLink
@rtype: L{SessionCommentWrapper} generator
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
f.ownerId = rlong(self.getEventContext().userId)
f.limit = rint(10)
p.theFilter = f
for e in tm.getMostRecentShareCommentLinks(p):
yield AnnotationWrapper(self, e.child, link=ShareWrapper(self, e.parent))
def listMostRecentComments (self):
"""
Retrieve most recent comment annotations
controlled by the security system.
@return: Generator yielding BlitzObjectWrapper
@rtype: L{BlitzObjectWrapper} generator
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
f.ownerId = rlong(self.getEventContext().userId)
f.groupId = rlong(self.getEventContext().groupId)
f.limit = rint(10)
p.theFilter = f
for e in tm.getMostRecentAnnotationLinks(None, ['CommentAnnotation'], None, p):
yield omero.gateway.BlitzObjectWrapper(self, e)
def listMostRecentTags (self):
"""
Retrieve most recent tag annotations
controlled by the security system.
@return: Generator yielding BlitzObjectWrapper
@rtype: L{BlitzObjectWrapper} generator
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
#f.ownerId = rlong(self.getEventContext().userId)
f.groupId = rlong(self.getEventContext().groupId)
f.limit = rint(200)
p.theFilter = f
for e in tm.getMostRecentAnnotationLinks(None, ['TagAnnotation'], None, p):
yield omero.gateway.BlitzObjectWrapper(self, e.child)
def getDataByPeriod (self, start, end, eid, otype=None, page=None):
"""
Retrieve given data objects by the given period of time
controlled by the security system.
@param start Starting data
@type start Long
@param end Finishing data
@type end Long
@param otype Data type: Project, Dataset, Image
@type otype String
@return: Map of project, dataset and image lists
@rtype: Map
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
f.ownerId = rlong(eid)
f.groupId = rlong(self.getEventContext().groupId)
if page is not None:
f.limit = rint(PAGE)
f.offset = rint((int(page)-1)*PAGE)
else:
f.limit = rint(100)
p.theFilter = f
im_list = list()
ds_list = list()
pr_list = list()
if otype == 'image':
try:
for e in tm.getByPeriod(['Image'], rtime(long(start)), rtime(long(end)), p, True)['Image']:
im_list.append(ImageWrapper(self, e))
except:
pass
elif otype == 'dataset':
try:
for e in tm.getByPeriod(['Dataset'], rtime(long(start)), rtime(long(end)), p, True)['Dataset']:
ds_list.append(DatasetWrapper(self, e))
except:
pass
elif otype == 'project':
try:
for e in tm.getByPeriod(['Project'], rtime(long(start)), rtime(long(end)), p, True)['Project']:
pr_list.append(ImageWrapper(self, e))
except:
pass
else:
res = tm.getByPeriod(['Image', 'Dataset', 'Project'], rtime(long(start)), rtime(long(end)), p, True)
try:
for e in res['Image']:
im_list.append(ImageWrapper(self, e))
except:
pass
try:
for e in res['Dataset']:
ds_list.append(DatasetWrapper(self, e))
except:
pass
try:
for e in res['Project']:
pr_list.append(ProjectWrapper(self, e))
except:
pass
return {'project': pr_list, 'dataset':ds_list, 'image':im_list}
def countDataByPeriod (self, start, end, eid, otype=None):
"""
Counts given data objects by the given period of time
controlled by the security system.
@param start Starting data
@type start Long
@param end Finishing data
@type end Long
@param otype Data type: Project, Dataset, Image
@type otype String
@return: Counter
@rtype: Long
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
f.ownerId = rlong(eid)
f.groupId = rlong(self.getEventContext().groupId)
p.theFilter = f
if otype == 'image':
return tm.countByPeriod(['Image'], rtime(long(start)), rtime(long(end)), p)['Image']
elif otype == 'dataset':
return tm.countByPeriod(['Dataset'], rtime(long(start)), rtime(long(end)), p)['Dataset']
elif otype == 'project':
return tm.countByPeriod(['Project'], rtime(long(start)), rtime(long(end)), p)['Project']
else:
c = tm.countByPeriod(['Image', 'Dataset', 'Project'], rtime(long(start)), rtime(long(end)), p)
return c['Image']+c['Dataset']+c['Project']
def getEventsByPeriod (self, start, end, eid):
"""
Retrieve event log objects by the given period of time
controlled by the security system.
@param start Starting data
@type start Long
@param end Finishing data
@type end Long
@return: List of event logs
@rtype: List
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
f.limit = rint(100000)
f.ownerId = rlong(eid)
f.groupId = rlong(self.getEventContext().groupId)
p.theFilter = f
return tm.getEventLogsByPeriod(rtime(start), rtime(end), p)
#yield EventLogWrapper(self, e)
omero.gateway.BlitzGateway = OmeroWebGateway
class OmeroWebSafeCallWrapper(OmeroGatewaySafeCallWrapper): #pragma: no cover
"""
Function or method wrapper that handles L{Ice.ObjectNotExistException}
by re-creating the server side proxy.
"""
def handle_exception(self, e, *args, **kwargs):
if e.__class__ is Ice.ObjectNotExistException:
# Restored proxy object re-creation logic from the pre-#5835
# version of # _safeCallWrap() from omero.gateway. (See #6365)
logger.warn('Attempting to re-create proxy and re-call method.')
try:
self.proxyObjectWrapper._obj = \
self.proxyObjectWrapper._create_func()
func = getattr(self.proxyObjectWrapper._obj, self.attr)
return func(*args, **kwargs)
except Exception, e:
self.debug(e.__class__.__name__, args, kwargs)
raise
else:
super(OmeroWebSafeCallWrapper, self).handle_exception(
e, *args, **kwargs)
omero.gateway.SafeCallWrapper = OmeroWebSafeCallWrapper
class OmeroWebObjectWrapper (object):
annotation_counter = None
def countParents (self):
l = self.listParents()
if l is not None:
return len(l)
def countAnnotations (self):
"""
Count on annotations linked to the object and set the value
on the custom fiels 'annotation_counter'.
@return Counter
"""
if self.annotation_counter is not None:
return self.annotation_counter
else:
container = self._conn.getContainerService()
m = container.getCollectionCount(self._obj.__class__.__name__, type(self._obj).ANNOTATIONLINKS, [self._oid], None)
if m[self._oid] > 0:
self.annotation_counter = m[self._oid]
return self.annotation_counter
else:
return None
def warpName(self):
"""
Warp name of the object if names is longer then 30 characters.
@return Warped string.
"""
try:
l = len(self.name)
if l < 30:
return self.name
elif l >= 30:
splited = []
for v in range(0,len(self.name),30):
splited.append(self.name[v:v+30]+"\n")
return "".join(splited)
except:
logger.info(traceback.format_exc())
return self.name
class ExperimenterWrapper (OmeroWebObjectWrapper, omero.gateway.ExperimenterWrapper):
"""
omero_model_ExperimenterI class wrapper overwrite omero.gateway.ExperimenterWrapper
and extend OmeroWebObjectWrapper.
"""
def isEditable(self):
return self.omeName.lower() not in ('guest')
omero.gateway.ExperimenterWrapper = ExperimenterWrapper
class ExperimenterGroupWrapper (OmeroWebObjectWrapper, omero.gateway.ExperimenterGroupWrapper):
"""
omero_model_ExperimenterGroupI class wrapper overwrite omero.gateway.ExperimenterGroupWrapper
and extend OmeroWebObjectWrapper.
"""
def isEditable(self):
return self.name.lower() not in ('guest', 'user')
omero.gateway.ExperimenterGroupWrapper = ExperimenterGroupWrapper
class ProjectWrapper (OmeroWebObjectWrapper, omero.gateway.ProjectWrapper):
"""
omero_model_ProjectI class wrapper overwrite omero.gateway.ProjectWrapper
and extend OmeroWebObjectWrapper.
"""
annotation_counter = None
def __prepare__ (self, **kwargs):
super(ProjectWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
omero.gateway.ProjectWrapper = ProjectWrapper
class DatasetWrapper (OmeroWebObjectWrapper, omero.gateway.DatasetWrapper):
"""
omero_model_DatasetI class wrapper overwrite omero.gateway.DatasetWrapper
and extends OmeroWebObjectWrapper.
"""
annotation_counter = None
def __prepare__ (self, **kwargs):
super(DatasetWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
if kwargs.has_key('link'):
self.link = kwargs.has_key('link') and kwargs['link'] or None
omero.gateway.DatasetWrapper = DatasetWrapper
class ImageWrapper (OmeroWebObjectWrapper, omero.gateway.ImageWrapper):
"""
omero_model_ImageI class wrapper overwrite omero.gateway.ImageWrapper
and extends OmeroWebObjectWrapper.
"""
annotation_counter = None
def __prepare__ (self, **kwargs):
super(ImageWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
if kwargs.has_key('link'):
self.link = kwargs.has_key('link') and kwargs['link'] or None
"""
This override standard omero.gateway.ImageWrapper.getChannels
and catch exceptions.
"""
def getChannels (self):
try:
return super(ImageWrapper, self).getChannels()
except Exception, x:
logger.error('Failed to load channels:', exc_info=True)
return None
omero.gateway.ImageWrapper = ImageWrapper
class PlateWrapper (OmeroWebObjectWrapper, omero.gateway.PlateWrapper):
"""
omero_model_PlateI class wrapper overwrite omero.gateway.PlateWrapper
and extends OmeroWebObjectWrapper.
"""
annotation_counter = None
def __prepare__ (self, **kwargs):
super(PlateWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
if kwargs.has_key('link'):
self.link = kwargs.has_key('link') and kwargs['link'] or None
def _loadPlateAcquisitions(self):
p = omero.sys.Parameters()
p.map = {}
p.map["pid"] = self._obj.id
sql = "select pa from PlateAcquisition as pa join fetch pa.plate as p where p.id=:pid"
self._obj._plateAcquisitionsSeq = self._conn.getQueryService().findAllByQuery(sql, p)
self._obj._plateAcquisitionsLoaded = True
def countPlateAcquisitions(self):
if self._obj.sizeOfPlateAcquisitions() < 0:
self._loadPlateAcquisitions()
return self._obj.sizeOfPlateAcquisitions()
def listPlateAcquisitions(self):
if not self._obj._plateAcquisitionsLoaded:
self._loadPlateAcquisitions()
for pa in self._obj.copyPlateAcquisitions():
yield PlateAcquisitionWrapper(self._conn, pa)
def getFields (self, pid=None):
"""
Returns tuple of min and max of indexed collection of well samples
per plate acquisition if exists
"""
q = self._conn.getQueryService()
sql = "select minIndex(ws), maxIndex(ws) from Well w " \
"join w.wellSamples ws where w.plate.id=:oid"
p = omero.sys.Parameters()
p.map = {}
p.map["oid"] = self._obj.id
if pid is not None:
sql += " and ws.plateAcquisition.id=:pid"
p.map["pid"] = rlong(pid)
fields = None
try:
res = [r for r in unwrap(q.projection(sql, p))[0] if r != None]
if len(res) == 2:
fields = tuple(res)
except:
pass
return fields
omero.gateway.PlateWrapper = PlateWrapper
class WellWrapper (OmeroWebObjectWrapper, omero.gateway.WellWrapper):
"""
omero_model_ImageI class wrapper overwrite omero.gateway.ImageWrapper
and extends OmeroWebObjectWrapper.
"""
annotation_counter = None
def __prepare__ (self, **kwargs):
super(WellWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
if kwargs.has_key('link'):
self.link = kwargs.has_key('link') and kwargs['link'] or None
omero.gateway.WellWrapper = WellWrapper
class PlateAcquisitionWrapper (OmeroWebObjectWrapper, omero.gateway.BlitzObjectWrapper):
"""
omero_model_PlateI class wrapper overwrite omero.gateway.PlateWrapper
and extends OmeroWebObjectWrapper.
"""
annotation_counter = None
def __bstrap__ (self):
self.OMERO_CLASS = 'PlateAcquisition'
def __prepare__ (self, **kwargs):
super(PlateAcquisitionWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
def getName (self):
name = super(PlateAcquisitionWrapper, self).getName()
if name is None:
if self.startTime is not None and self.endTime is not None:
name = "%s - %s" % (datetime.fromtimestamp(self.startTime/1000), datetime.fromtimestamp(self.endTime/1000))
else:
name = "Plate %i" % self.id
return name
name = property(getName)
def getFields (self):
"""
Returns max of indexed collection of well samples
"""
p = omero.sys.Parameters()
p.map = {}
p.map["oid"] = self._obj.id
q = self._conn.getQueryService()
sql = "select maxIndex(pa.wellSamples)+1 from PlateAcquisition as pa "\
"where pa.id=:oid"
try:
index = unwrap(q.projection(sql, p))[0][0]
except:
index = -1
return index
class ScreenWrapper (OmeroWebObjectWrapper, omero.gateway.ScreenWrapper):
"""
omero_model_ScreenI class wrapper overwrite omero.gateway.ScreenWrapper
and extends OmeroWebObjectWrapper.
"""
annotation_counter = None
def __prepare__ (self, **kwargs):
super(ScreenWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
omero.gateway.ScreenWrapper = ScreenWrapper
class EventLogWrapper (omero.gateway.BlitzObjectWrapper):
"""
omero_model_EventLogI class wrapper extends omero.gateway.BlitzObjectWrapper.
"""
LINK_CLASS = "EventLog"
class ShareWrapper (omero.gateway.BlitzObjectWrapper):
"""
omero_model_ShareI class wrapper extends BlitzObjectWrapper.
"""
def getShareType(self):
if self.itemCount == 0:
return "Discussion"
else:
return "Share"
def isEmpty(self):
if self.itemCount == 0:
return True
return False
def getExpireDate(self):
#workaround for problem of year 2038
try:
d = self.started+self.timeToLive
if d > 2051222400000:
return datetime(2035, 1, 1, 0, 0, 0)
return datetime.fromtimestamp(d / 1000)
except:
logger.info(traceback.format_exc())
return None
def getStartDate(self):
"""
Gets the start date of the share
@return: Start Date-time
@rtype: datetime object
"""
return datetime.fromtimestamp(self.getStarted()/1000)
def getExpirationDate(self):
"""
Gets the end date for the share
@return: End Date-time
@rtype: datetime object
"""
#workaround for problem of year 2038
try:
d = self.started+self.timeToLive
if d > 2051222400:
return datetime(2035, 1, 1, 0, 0, 0)
return datetime.fromtimestamp(d / 1000)
except:
logger.info(traceback.format_exc())
return None
def isExpired(self):
"""
Returns True if we are past the end date of the share
@return: True if share expired
@rtype: Boolean
"""
#workaround for problem of year 2038
now = time.time()
try:
d = long(self.started+self.timeToLive)
if (d / 1000) > now:
return False
return True
except:
logger.info(traceback.format_exc())
return None
def isOwned(self):
"""
Returns True if share is owned by the current user
@return: True if owned
@rtype: Boolean
"""
try:
if self.owner.id.val == self._conn.getEventContext().userId:
return True
except:
logger.error(traceback.format_exc())
return False
def getOwner(self):
"""
The owner of this share
@return: Owner
@rtype: L{ExperimenterWrapper}
"""
return omero.gateway.ExperimenterWrapper(self, self.owner)
# IMPORTANT to update the map of wrappers 'project', 'dataset', 'image' etc. returned by getObjects()
omero.gateway.refreshWrappers()
omero.gateway.KNOWN_WRAPPERS.update({"plateacquisition":PlateAcquisitionWrapper})
| gpl-2.0 | -7,456,426,245,659,466,000 | 36.570796 | 169 | 0.55935 | false |
ayushagrawal288/zamboni | mkt/commonplace/urls.py | 7 | 3562 | from django.conf import settings
from django.conf.urls import include, patterns, url
import mkt
from . import views
def fireplace_route(path, name=None):
"""
Helper function for building Fireplace URLs. `path` is the URL route,
and `name` (if specified) is the name given to the route.
"""
kwargs = {}
if name:
kwargs['name'] = name
return url('^%s$' % path, views.commonplace, {'repo': 'fireplace'},
**kwargs)
fireplace_reviews_patterns = patterns(
'',
fireplace_route('flag', 'ratings.flag'),
fireplace_route('delete', 'ratings.delete'),
)
fireplace_app_patterns = patterns(
'',
fireplace_route('', 'detail'),
fireplace_route('abuse', 'detail.abuse'),
fireplace_route('privacy', 'detail.privacy'),
fireplace_route('recommended', 'recommended'),
fireplace_route('reviews/', 'ratings.list'),
fireplace_route('reviews/add', 'ratings.add'),
url('^(?P<review_id>\d+)/', include(fireplace_reviews_patterns)),
)
fireplace_website_patterns = patterns(
'',
fireplace_route('', 'website.detail'),
)
urlpatterns = patterns(
'',
# Fireplace:
url('^$', views.commonplace, {'repo': 'fireplace'}, name='home'),
url('^server.html$', views.commonplace, {'repo': 'fireplace'},
name='commonplace.fireplace'),
url('^fxa-authorize$', views.fxa_authorize,
name='commonplace.fxa_authorize'),
(r'^app/%s/' % mkt.APP_SLUG, include(fireplace_app_patterns)),
(r'^website/(?P<pk>\d+)', include(fireplace_website_patterns)),
url(r'^iframe-install.html/?$', views.iframe_install,
name='commonplace.iframe-install'),
url(r'^potatolytics.html$', views.potatolytics,
name='commonplace.potatolytics'),
# Commbadge:
url('^comm/app/%s$' % mkt.APP_SLUG, views.commonplace,
{'repo': 'commbadge'},
name='commonplace.commbadge.app_dashboard'),
url('^comm/thread/(?P<thread_id>\d+)$', views.commonplace,
{'repo': 'commbadge'},
name='commonplace.commbadge.show_thread'),
url('^comm/.*$', views.commonplace, {'repo': 'commbadge'},
name='commonplace.commbadge'),
# Transonic:
url('^curate/.*$', views.commonplace, {'repo': 'transonic'},
name='commonplace.transonic'),
# Stats:
url('^statistics/app/%s$' % mkt.APP_SLUG, views.commonplace,
{'repo': 'marketplace-stats'},
name='commonplace.stats.app_dashboard'),
url('^statistics/.*$', views.commonplace, {'repo': 'marketplace-stats'},
name='commonplace.stats'),
# Operator Dashboard:
url('^operators/.*$', views.commonplace,
{'repo': 'marketplace-operator-dashboard'},
name='commonplace.operatordashboard'),
# Submission:
url('^submission/.*$', views.commonplace,
{'repo': 'marketplace-submission'},
name='commonplace.submission'),
)
if settings.DEBUG:
# More Fireplace stuff, only for local dev:
urlpatterns += patterns(
'',
fireplace_route('category/.*'),
fireplace_route('categories'),
fireplace_route('collection/.*'),
fireplace_route('debug'),
fireplace_route('feed/.*'),
fireplace_route('feedback'),
fireplace_route('fxa-authorize'),
fireplace_route('new'),
fireplace_route('popular'),
fireplace_route('privacy-policy'),
fireplace_route('purchases'),
fireplace_route('search/?'),
fireplace_route('settings'),
fireplace_route('terms-of-use'),
fireplace_route('tests'),
)
| bsd-3-clause | 7,800,972,507,997,840,000 | 32.28972 | 76 | 0.614542 | false |
graingert/isort | kate_plugin/isort_plugin.py | 12 | 3174 | """ Sorts Python import definitions, and groups them based on type (stdlib, third-party, local).
isort/isort_kate_plugin.py
Provides a simple kate plugin that enables the use of isort to sort Python imports
in the currently open kate file.
Copyright (C) 2013 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import kate
from isort import SortImports
try:
from PySide import QtGui
except ImportError:
from PyQt4 import QtGui
def sort_kate_imports(add_imports=(), remove_imports=()):
"""Sorts imports within Kate while maintaining cursor position and selection, even if length of file changes."""
document = kate.activeDocument()
view = document.activeView()
position = view.cursorPosition()
selection = view.selectionRange()
sorter = SortImports(file_contents=document.text(), add_imports=add_imports, remove_imports=remove_imports,
settings_path=os.path.dirname(os.path.abspath(str(document.url().path()))))
document.setText(sorter.output)
position.setLine(position.line() + sorter.length_change)
if selection:
start = selection.start()
start.setLine(start.line() + sorter.length_change)
end = selection.end()
end.setLine(end.line() + sorter.length_change)
selection.setRange(start, end)
view.setSelection(selection)
view.setCursorPosition(position)
@kate.action
def sort_imports():
"""Sort Imports"""
sort_kate_imports()
@kate.action
def add_imports():
"""Add Imports"""
text, ok = QtGui.QInputDialog.getText(None,
'Add Import',
'Enter an import line to add (example: from os import path or os.path):')
if ok:
sort_kate_imports(add_imports=text.split(";"))
@kate.action
def remove_imports():
"""Remove Imports"""
text, ok = QtGui.QInputDialog.getText(None,
'Remove Import',
'Enter an import line to remove (example: os.path or from os import path):')
if ok:
sort_kate_imports(remove_imports=text.split(";"))
| mit | 3,649,261,278,003,242,500 | 38.675 | 118 | 0.692187 | false |
baldengineers/mapper | main.py | 1 | 70755 | #easy cs:go mapper: counter-strike: global offensive port of the easy tf2 mapper
#
#in development, not at a working stage.
#DIFFERENCES:
#more prefabrications
#more sections (subsections?)
#improved UI
#improved file count
#multi-game system
# program boots up and variables are set which change what game the program utilizes
# (set up after dialog with radio button + grid size is chosen)
# grid size of createprefab, how skybox renderings, skybox textures, light vars, window titles, file directories, etc.
#move all prefabs on grid
# if we can make a new grid system widget
#
#important:
#move all variable definitions that need changing based off game selection
#to a separate function which runs after dialog
#make the grid size dialog run before everything else. make it its own separate class that
#runs before mainwindow
import sys
#move this to after initial dialog
import os.path
import os
from PySide.QtCore import *
from PySide.QtGui import *
import importlib
import createPrefab
import pf
from PIL import Image
from PIL.ImageQt import ImageQt
import generateSkybox
import light_create
import export
import subprocess
import pickle
import pprint
import random
import glob
import webbrowser
import wave
import zipfile
import shutil
import winsound
import GridWidget
class GridBtn(QWidget):
def __init__(self, parent, x, y, btn_id):
super(GridBtn, self).__init__()
self.button = QPushButton("", parent)
self.x,self.y = x,y
self.btn_id = btn_id
self.button.resize(32,32)
self.button.setFixedSize(32, 32)
self.button.pressed.connect(lambda: self.click_func(parent, x, y,btn_id))
self.button.installEventFilter(self)
self.button.show()
self.icons = None
parent.progress += 100/(parent.grid_x*parent.grid_y)
parent.progressBar.setValue(parent.progress)
def reset_icon(self):
self.button.setIcon(QIcon(""))
def click_func(self, parent, x, y, btn_id, clicked=True, h_moduleName="None", h_icon=''): #h_moduleName and h_icon and h_rot are used when undoing/redoing
current_list = eval('parent.tile_list%s' % str(parent.list_tab_widget.currentIndex()+1))
#format | history.append((x,y,moduleName,self.icon,level))
if clicked:
parent.redo_history=[]
if self.icons:
moduleName = eval(parent.prefab_list[parent.list_tab_widget.currentIndex()][parent.current_list.currentRow()])
templist=[(x,y,moduleName,self.icons,None)]
else:
templist=[(x,y,None,None,None)]
def clear_btn(btn_id):
self.button.setIcon(QIcon())
for l in [parent.totalblocks,parent.entity_list,parent.stored_info_list]:
l[btn_id] = ''
parent.iconlist[btn_id] = ('','')
self.icons = None
if self.checkForCtrl(clicked):
clear_btn(btn_id)
else:
if clicked:
if parent.ymin == None or parent.xmin == None:
parent.ymin,parent.xmin = y,x
else:
if y < parent.ymin:
parent.ymin = y
if x < parent.xmin:
parent.xmin = x
if y > parent.ymax:
parent.ymax = y
if x > parent.xmax:
parent.xmax = x
moduleName = eval(parent.prefab_list[parent.list_tab_widget.currentIndex()][parent.current_list.currentRow()])
else:
moduleName = h_moduleName if h_moduleName != None else clear_btn(btn_id)
if h_moduleName != None:
if clicked:
icon = parent.cur_icon
else:
icon = h_icon
self.button.setIcon(QIcon(icon))
self.button.setIconSize(QSize(32,32))
parent.iconlist[btn_id] = [icon]
parent.stored_info_list[btn_id] = [moduleName,x,y,parent.id_num,parent.world_id_num,parent.entity_num,parent.placeholder_list,parent.rotation]
self.icons = icon
else:
parent.stored_info_list[btn_id] = ""
if "*" not in parent.windowTitle():
parent.setWindowTitle("Easy "+parent.gameVar+" Mapper* - ["+parent.currentfilename+"]")
if clicked:
templist.append((x,y,moduleName,self.icons,None))
parent.history.append(templist)
def checkForCtrl(self, clicked):
if clicked:
modifiers = QApplication.keyboardModifiers()
if modifiers == Qt.ControlModifier:
return True
else:
return False
else:
return False
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
#QApplication.setStyle(QStyleFactory.create("Cleanlooks")) #comment out if unwanted
#define some variables used throughout the class
self.level = 0
self.levels = 0
self.id_num = 1
self.world_id_num = 2
self.rotation = 0
self.entity_num = 1
self.btn_id_count = 0
self.grid_list=[]
self.totalblocks = []
self.skybox_list=[]
self.last_tuple = 'First'
self.skybox_light_list=[]
self.iconlist = []
self.cur_icon = ""
self.rotation_icon_list=[]
self.skybox_angle_list=[]
self.skybox_icon_list=[]
self.gridsize = []
self.count_btns = 0
self.entity_list=[]
self.save_dict = {}
self.load_dict = {}
self.stored_info_list=[]
#tabs should be more reusable
#for example: the following lists should be this instead:
#self.prefab_list = [[] for i in self.tabs] where self.tabs is the # of tabs
#i.e. We should be able to create new tabs whenever we want, just by
#changing the self.tabs variable.
self.prefab_list = [[],[],[]]
self.prefab_text_list = [[],[],[]]
self.prefab_icon_list = [[],[],[]]
self.openblocks=[]
self.placeholder_list = []
self.history = []
self.redo_history = []
self.currentfilename='Untitled'
self.file_loaded = False
self.current_loaded = ''
self.latest_path='/'
self.isTF = True
self.TLBool = False
self.SLBool = False
self.BRBool = False
#initial startup/gridchange window
initWindow = GridChangeWindow(self, True)
values = initWindow.returnVal()
#tell which game was chosen on launch
if self.isTF:
self.gameVar,self.gameDirVar = "TF2","tf2/"
else:
self.gameVar,self.gameDirVar = "CS:GO","csgo/"
self.TFFormat() if self.isTF else self.CSFormat()
util_list = [createPrefab,light_create,generateSkybox,export]
for util in util_list:
util.setGameDirVar(self.gameDirVar)
#create the main window
self.setGeometry(100, 25, 875, 750)
self.setWindowTitle("Easy "+self.gameVar+" Mapper")
self.setWindowIcon(QIcon("icons\icon.ico"))
#removed for now to see how gui looks without it
## if self.isTF:
## namelist = ['gravelpit','2fort','upward','mvm']
## palette = QPalette()
## palette.setBrush(QPalette.Background,QBrush(QPixmap(self.gameDirVar+"icons/backgrounds/background_"+namelist[random.randint(0,3)]+".jpg")))
## self.setPalette(palette)
#create menubar
exitAction = QAction("&Exit", self)
exitAction.setShortcut("Ctrl+Q")
exitAction.setStatusTip("Exit Application")
exitAction.triggered.connect(self.close_application)
openAction = QAction("&Open", self)
openAction.setShortcut("Ctrl+O")
openAction.setStatusTip("Open .vmf file")
openAction.triggered.connect(self.file_open)
saveAction = QAction("&Save", self)
saveAction.setShortcut("Ctrl+S")
saveAction.setStatusTip("Save File as .ezm save, allowing for use by others/you later.")
saveAction.triggered.connect(self.file_save)
saveAsAction = QAction("&Save As", self)
saveAsAction.setShortcut("Ctrl+Shift+S")
saveAsAction.setStatusTip("Save File as .ezm save, allowing for use by others/you later.")
saveAsAction.triggered.connect(lambda: self.file_save(False, True))
helpAction = QAction("&Wiki",self)
helpAction.triggered.connect(lambda: webbrowser.open_new_tab('http://github.com/baldengineers/easytf2_mapper/wiki'))
tutorialAction = QAction("&Reference Guide",self)
tutorialAction.setStatusTip("Quick reference guide on the Mapper website.")
tutorialAction.triggered.connect(lambda: webbrowser.open_new_tab('http://tf2mapper.com/tutorial.html'))
newAction = QAction("&New", self)
newAction.setShortcut("Ctrl+n")
newAction.setStatusTip("Create a New File")
newAction.triggered.connect(self.grid_change)
hammerAction = QAction("&Open Hammer",self)
hammerAction.setShortcut("Ctrl+H")
hammerAction.setStatusTip("Opens up Hammer.")
hammerAction.triggered.connect(lambda: self.open_hammer(0,"null"))
changeHammer = QAction("&Change Hammer Directory",self)
changeHammer.setShortcut("Ctrl+Shift+H")
changeHammer.setStatusTip("Changes default hammer directory.")
changeHammer.triggered.connect(lambda: self.open_hammer(0,"null",True))
changeLightAction = QAction("&Change Lighting", self)
changeLightAction.setShortcut("Ctrl+J")
changeLightAction.setStatusTip("Change the environment lighting of the map.")
changeLightAction.triggered.connect(self.change_light)
exportAction = QAction("&as .VMF", self)
exportAction.setShortcut("Ctrl+E")
exportAction.setStatusTip("Export as .vmf")
exportAction.triggered.connect(self.file_export)
undoAction = QAction("&Undo", self)
undoAction.setShortcut("Ctrl+Z")
undoAction.setStatusTip("Undo previous action")
undoAction.triggered.connect(lambda: self.undo(True))
redoAction = QAction("&Redo", self)
redoAction.setShortcut("Ctrl+Shift+Z")
redoAction.setStatusTip("Redo previous action")
redoAction.triggered.connect(lambda: self.undo(False))
gridAction = QAction("&Set Grid Size", self)
gridAction.setShortcut("Ctrl+G")
gridAction.setStatusTip("Set Grid Height and Width. RESETS ALL BLOCKS.")
gridAction.triggered.connect(self.grid_change) #change so it just makes grid bigger/smaller, not erase all blocks, or else it would just do the same exact thing as making a new file
createPrefabAction = QAction("&Create Prefab", self)
createPrefabAction.setShortcut("Ctrl+I")
createPrefabAction.setStatusTip("View the readme for a good idea on formatting Hammer Prefabs.")
createPrefabAction.triggered.connect(self.create_prefab)
consoleAction = QAction("&Open Dev Console", self)
consoleAction.setShortcut("`")
consoleAction.setStatusTip("Run functions/print variables manually")
consoleAction.triggered.connect(self.open_console)
changeSkybox = QAction("&Change Skybox", self)
changeSkybox.setStatusTip("Change the skybox of the map.")
changeSkybox.setShortcut("Ctrl+B")
changeSkybox.triggered.connect(self.change_skybox)
importPrefab = QAction("&Prefab",self)
importPrefab.setStatusTip("Import a prefab in a .zip file. You can find some user-made ones at http://tf2mapper.com")
importPrefab.setShortcut("Ctrl+Shift+I")
importPrefab.triggered.connect(self.import_prefab)
bspExportAction = QAction("&as .BSP",self)
bspExportAction.setStatusTip("Export as .bsp")
bspExportAction.setShortcut("Ctrl+Shift+E")
bspExportAction.triggered.connect(self.file_export_bsp)
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu("&File")
editMenu = mainMenu.addMenu("&Edit")
optionsMenu = mainMenu.addMenu("&Options")
toolsMenu = mainMenu.addMenu("&Tools")
helpMenu = mainMenu.addMenu("&Help")
fileMenu.addAction(newAction)
fileMenu.addAction(openAction)
fileMenu.addAction(saveAction)
fileMenu.addAction(saveAsAction)
fileMenu.addSeparator()
importMenu = fileMenu.addMenu("&Import")
importMenu.addAction(importPrefab)
exportMenu = fileMenu.addMenu("&Export")
exportMenu.addAction(exportAction)
exportMenu.addAction(bspExportAction)
fileMenu.addSeparator()
editMenu.addAction(undoAction)
editMenu.addAction(redoAction)
fileMenu.addAction(exitAction)
optionsMenu.addAction(gridAction)
optionsMenu.addAction(changeSkybox)
optionsMenu.addAction(changeHammer)
toolsMenu.addAction(createPrefabAction)
toolsMenu.addAction(hammerAction)
toolsMenu.addSeparator()
toolsMenu.addAction(consoleAction)
helpMenu.addAction(tutorialAction)
helpMenu.addAction(helpAction)
#create the status bar
self.status = QStatusBar(self)
self.setStatusBar(self.status)
#perform some necessary functions for startup of program
self.home()
self.grid_change_func(values[0], values[1], values[2])
#self.change_skybox()
#self.level_select()
def TFFormat(self):
print('TF2 version of the mapper loading!')
sys.path.append(self.gameDirVar+"prefabs/")
self.currentlight = '''
entity
{
"id" "world_idnum"
"classname" "light_environment"
"_ambient" "255 255 255 100"
"_ambientHDR" "-1 -1 -1 1"
"_AmbientScaleHDR" "1"
"_light" "CURRENT_LIGHT"
"_lightHDR" "-1 -1 -1 1"
"_lightscaleHDR" "1"
"angles" "CURRENT_ANGLE"
"pitch" "0"
"SunSpreadAngle" "0"
"origin" "0 0 73"
editor
{
"color" "220 30 220"
"visgroupshown" "1"
"visgroupautoshown" "1"
"logicalpos" "[0 500]"
}
}
'''
#skybox default needs to be based off game chosen
self.skybox = 'sky_tf2_04'
#skyboxlight = '255 255 255 200'
#skyboxangle = '0 0 0'
#if the user does not change the lighting, it sticks with this.
#if the user does not choose a skybox it sticks with this
#self.prefab_file = open(self.gameDirVar+"prefab_template/prefab_list.txt")
#self.prefab_text_file = open(self.gameDirVar+"prefab_template/prefab_text_list.txt")
#self.prefab_icon_file = open(self.gameDirVar+"prefab_template/prefab_icon_list.txt")
self.prefab_file = pickle.load(open(self.gameDirVar+"prefabs/pfinfo.ezmd","rb"))
self.skybox_file = open(self.gameDirVar+"prefab_template/skybox_list.txt")
self.skybox_icon = open(self.gameDirVar+"prefab_template/skybox_icons.txt")
self.skybox_light = open(self.gameDirVar+"prefab_template/skybox_light.txt")
self.skybox_angle = open(self.gameDirVar+"prefab_template/skybox_angle.txt")
for main_index,file in enumerate(["prefab_list","prefab_icon_list","prefab_text_list"]):
for index,line in enumerate(self.prefab_file[main_index+1]):
eval("self."+file+"""[int(self.prefab_file[0][index])].append(line)""")# need to do this because reading the file generates a \n after every line
section = 0
self.rotation_icon_list = []
self.index_section_list = [0]
self.rotation_icon_list.append([])
#print(rotation_icon_list)
for line in self.skybox_file.readlines():
self.skybox_list.append(line[:-1] if line.endswith("\n") else line)# need to do this because reading the file generates a \n after every line
for line in self.skybox_icon.readlines():
self.skybox_icon_list.append(line[:-1] if line.endswith("\n") else line)
for line in self.skybox_light.readlines():
self.skybox_light_list.append(line[:-1] if line.endswith("\n") else line)
for line in self.skybox_angle.readlines():
self.skybox_angle_list.append(line[:-1] if line.endswith("\n") else line)
for file in [self.skybox_file,self.skybox_icon,self.skybox_angle,self.skybox_light]:
file.close()
print(self.prefab_list)
#imports that need prefab_list to be defined
for sec in self.prefab_list:
for item in sec:
if item:
globals()[item] = importlib.import_module(item)
print("import", item)
self.save_dict[item]=eval(item)
self.load_dict[eval(item)]=item
logo = open('logo.log','r+')
logo_f = logo.readlines()
for i in logo_f:
print(i[:-1])
logo.close()
print("\n~~~~~~~~~~~~~~~~~~~~~\nMapper loaded! You may have to alt-tab to find the input values dialog.\n")
def CSFormat(self):
#for cs area
pass
def open_hammer(self,loaded,file,reloc = False):
self.open_file()
if "loaded_first_time" not in self.files or reloc:
self.file.close()
self.open_file(True)
hammer_location = QFileDialog.getOpenFileName(self, "Find Hammer Location", "/","Hammer Executable (*.exe *.bat)")
hammer_location = str(hammer_location[0])
self.file.write("loaded_first_time\n")
self.file.write(hammer_location)
self.file.close()
if loaded == 1:
subprocess.Popen(hammer_location +" "+ file)
else:
subprocess.Popen(hammer_location)
else:
if os.path.isfile(self.fileloaded[1]):
if loaded == 1:
subprocess.Popen(self.fileloaded[1] + " "+file)
else:
subprocess.Popen(self.fileloaded[1])
else:
print(str(e))
self.notFound = QMessageBox().setText("ERROR!")
self.notFound.setInformativeText("Hammer executable/batch moved or renamed! (or something else went wrong...)")
self.notFound.exec_()
self.file.close()
os.remove(gameDirVar+"startupcache/startup.su")
self.open_hammer(0,"null")
def open_file(self,reloc = False):
if reloc:
os.remove(self.gameDirVar+"startupcache/startup.su")
if os.path.isfile(self.gameDirVar+"startupcache/startup.su"):
self.file = open(self.gameDirVar+"startupcache/startup.su", "r+")
else:
self.file = open(self.gameDirVar+"startupcache/startup.su", "w+")
self.fileloaded = self.file.readlines()
self.files = "".join(self.fileloaded)
def closeEvent(self, event):
#closeEvent runs close_application when the x button is pressed
event.ignore()
self.close_application()
def home(self):
global levels, current_list
self.xmin = None
self.ymin = None
self.xmax = 0
self.ymax = 0
self.central_widget = QWidget()
self.setCentralWidget(self.central_widget)
self.scrollArea = QScrollArea()
self.current = QPushButton("",self)
self.current.setIcon(QIcon(''))
self.current.setIconSize(QSize(40,40))
self.current.setFixedSize(QSize(40,40))
self.current.setFlat(True)
self.rotateCW = QToolButton(self)
self.rotateCW.setShortcut(QKeySequence(Qt.Key_Right))
self.rotateCW.setIcon(QIcon('icons/rotate_cw.png'))
self.rotateCW.setIconSize(QSize(40,40))
self.rotateCW.setFixedSize(QSize(40,40))
self.rotateCW.setAutoRaise(True)
self.rotateCCW = QToolButton(self)
self.rotateCCW.setShortcut(QKeySequence(Qt.Key_Left))
self.rotateCCW.setIcon(QIcon('icons/rotate_ccw.png'))
self.rotateCCW.setIconSize(QSize(40,40))
self.rotateCCW.setFixedSize(QSize(40,40))
self.rotateCCW.setAutoRaise(True)
#sets rotation value. 0 = right, 1 = down, 2 = left, 3 = right
self.rotateCW.clicked.connect(self.rotateCW_func)
self.rotateCCW.clicked.connect(self.rotateCCW_func)
self.button_rotate_layout = QHBoxLayout()
self.button_rotate_layout.addWidget(self.rotateCCW)
self.button_rotate_layout.addWidget(self.current)
self.button_rotate_layout.addWidget(self.rotateCW)
self.button_rotate_layout.addStretch(1)
#add the main tool bar
self.skyboxAction = QAction(QIcon('icons/sky.png'), "Change Skybox", self)
self.skyboxAction.triggered.connect(self.loadSkyboxList)
self.tileListAction = QAction(QIcon('icons/tile_list.png'), "Re-open Tile list", self)
self.tileListAction.triggered.connect(self.loadTileList)
self.rotateDockAction = QAction(QIcon('icons/rotate_dock.png'), "Re-open Rotation Dock", self)
self.rotateDockAction.triggered.connect(self.loadButtonRotate)
self.mainToolBar = self.addToolBar("Main")
self.mainToolBar.addAction(self.skyboxAction)
self.mainToolBar.addAction(self.tileListAction)
self.mainToolBar.addAction(self.rotateDockAction)
#add the many sections of the tile_list
self.tile_list1 = QListWidget()
self.tile_list2 = QListWidget()
self.tile_list3 = QListWidget()
self.current_list = self.tile_list1
for l in [self.tile_list1, self.tile_list2, self.tile_list3]:
l.setDragEnabled(True)
self.gui_skybox_list = QListWidget()
#print(self.skybox_icon_list)
self.gui_skybox_list.setIconSize(QSize(140, 20))
self.gui_skybox_list.setMaximumWidth(160)
for index, text in enumerate(self.skybox_list):
item = QListWidgetItem(QIcon(self.gameDirVar+self.skybox_icon_list[index]),'')
self.gui_skybox_list.addItem(item)
self.list_tab_widget = QTabWidget()
self.list_tab_widget.setMaximumWidth(200)
self.list_tab_widget.addTab(self.tile_list1,'Geometry')
self.list_tab_widget.addTab(self.tile_list2,'Map Layout')
self.list_tab_widget.addTab(self.tile_list3,'Fun')
self.list_tab_widget.currentChanged.connect(self.changeCurrentList)
print("len:", self.list_tab_widget.count())
#add the prefab tools
self.up_tool_btn = QToolButton(self)
self.up_tool_btn.setIcon(QIcon('icons/up.png'))
self.up_tool_btn.clicked.connect(self.prefab_list_up)
self.down_tool_btn = QToolButton(self)
self.down_tool_btn.setIcon(QIcon('icons/down.png'))
self.down_tool_btn.clicked.connect(self.prefab_list_down)
self.del_tool_btn = QToolButton(self)
self.del_tool_btn.setIcon(QIcon('icons/delete.png'))
self.del_tool_btn.clicked.connect(lambda: self.prefab_list_del(self.current_list.currentRow()))
self.add_tool_btn = QToolButton(self)
self.add_tool_btn.setIcon(QIcon('icons/add.png'))
self.add_tool_btn.clicked.connect(self.create_prefab)
self.tile_toolbar = QToolBar()
for t in [self.up_tool_btn,self.down_tool_btn,self.del_tool_btn,self.add_tool_btn]:
self.tile_toolbar.addWidget(t)
self.tile_toolbar.addSeparator()
for index, text in enumerate(self.prefab_text_list):
for ind, indiv in enumerate(text):
curr_list = eval("self.tile_list%d" % (index+1))
item = QListWidgetItem(QIcon(self.gameDirVar+self.prefab_icon_list[index][ind]), indiv)
curr_list.addItem(item)
for i in range(self.list_tab_widget.count()):
eval("self.tile_list%d" %(i+1)).currentItemChanged.connect(self.changeIcon)
#contains label and list vertically
self.tile_list_layout = QVBoxLayout()
#self.tile_list_layout.addWidget(self.listLabel)
self.tile_list_layout.addWidget(self.list_tab_widget)
#self.tile_list_layout.addWidget(self.toolsLabel)
self.tile_list_layout.addWidget(self.tile_toolbar)
self.button_grid_layout = QGridLayout()
self.button_grid_layout.setSpacing(0)
self.grid_widget = QWidget()
self.grid_widget.setLayout(self.button_grid_layout)
self.scrollArea.setWidget(self.grid_widget)
self.scrollArea.setWidgetResizable(True)
self.button_rotate_widget = QWidget()
self.button_rotate_widget.setLayout(self.button_rotate_layout)
self.tile_list_widget = QWidget()
self.tile_list_widget.setLayout(self.tile_list_layout)
self.loadTileList(True)
self.loadSkyboxList(True)
self.loadButtonRotate(True)
self.column = QHBoxLayout()
self.column.addWidget(self.scrollArea)
self.row = QVBoxLayout(self.central_widget)
self.row.addLayout(self.column)
#TESTING
from classes import PrefabItem, ListGroup
#grid for placing prefabs
self.grid = GridWidget.GridWidget(20,20,self)
self.grid_container = GridWidget.GridWidgetContainer(self.grid)
self.grid_dock = QDockWidget("Grid", self)
self.grid_dock.setWidget(self.grid_container)
self.grid_dock.setFloating(True)
#define various lists
self.tile_list1 = QListWidget()
self.tile_list2 = QListWidget()
self.tile_list3 = QListWidget()
#add items to self.tab_dict and everything will update
self.tab_dict = {"Geometry":self.tile_list1, "Map Layout":self.tile_list2, "Fun/Other":self.tile_list3}
self.list_group = ListGroup([l for _, l in self.tab_dict.items()])
def set_cur_prefab(item):
self.grid.cur_prefab = item.prefab
for _, tile_list in self.tab_dict.items():
tile_list.itemClicked.connect(set_cur_prefab)
#add prefabs to the lists
with open("tf2/prefabs.dat", "rb") as f:
l = pickle.load(f)
for p in l:
prefab = pf.Prefab(p)
self.tab_dict[prefab.section].addItem(PrefabItem(prefab))
#create tabwidget for the lists
self.list_tab_widget = QTabWidget()
self.list_tab_widget.addTab(self.tab_dict['Geometry'],'Geometry')
self.list_tab_widget.addTab(self.tab_dict['Map Layout'],'Map Layout')
self.list_tab_widget.addTab(self.tab_dict['Fun/Other'],'Fun/Other')
#create dock for the tab widget
self.prefab_dock = QDockWidget("Prefabs", self)
self.prefab_dock.setWidget(self.list_tab_widget)
self.prefab_dock.setFloating(True)
#create buttons for the tools
self.grid_tools_ag = QActionGroup(self)
self.add_prefab_action = QAction(QIcon("icons/add_prefab.png"), "Add a prefab to the grid", self.grid_tools_ag)
self.add_prefab_action.toggled.connect(self.grid.enableAddPrefab)
self.select_action = QAction(QIcon("icons/select_move.png"), "Select Prefabs", self.grid_tools_ag)
self.select_action.toggled.connect(self.grid.enableSelect)
self.grid_tools = QToolBar()
self.grid_tools.setOrientation(Qt.Vertical)
self.addToolBar(Qt.LeftToolBarArea, self.grid_tools)
for act in [self.add_prefab_action,self.select_action]:
act.setCheckable(True)
self.grid_tools.addAction(act)
self.add_prefab_action.setChecked(True) #set the default button checked
def file_export():
for p in self.grid.prefabs:
p.prefab.create(p.posx, p.posy, self.grid.prefab_scale, self.rotataion)
## self.grid_tool_dock = QDockWidget("Tools", self)
## self.grid_tool_dock.setWidget(self.grid_tools)
## self.grid_tool_dock.setFloating(True)
self.addDockWidget(Qt.LeftDockWidgetArea, self.skybox_list_dock)
#END TESTING
if os.path.isfile(self.gameDirVar+'startupcache/firsttime.su'):
f = open(self.gameDirVar+'startupcache/firsttime.su', 'r+')
lines = f.readlines()
else:
f = open(self.gameDirVar+'startupcache/firsttime.su','w+')
lines = f.readlines()
if "startup" not in lines:
QMessageBox.information(self, "First Launch", "First Launch!\n\nYou haven't launched this before! Try looking at the <a href=\"https://github.com/baldengineers/easytf2_mapper/wiki/Texture-bug\">wiki</a> for help!")
f.write("startup")
f.close()
#WILL ONLY WORK IN REDIST FORM
else:
pass
self.show()
def loadSkyboxList(self,startup=False):
if not self.SLBool:
self.skybox_list_dock = QDockWidget("Skybox List", self)
self.skybox_list_dock.visibilityChanged.connect(self.toggleSLBool)
self.skybox_list_dock.setWidget(self.gui_skybox_list)
self.skybox_list_dock.setFloating(False)
self.addDockWidget(Qt.LeftDockWidgetArea, self.skybox_list_dock)
def toggleSLBool(self):
if self.SLBool:
self.SLBool = False
else:
self.SLBool = True
def loadTileList(self,startup=False):
if not self.TLBool:
self.tile_list_dock = QDockWidget("Prefab List", self)
self.tile_list_dock.visibilityChanged.connect(self.toggleTLBool)
self.tile_list_dock.setWidget(self.tile_list_widget)
self.tile_list_dock.setFloating(False)
self.addDockWidget(Qt.RightDockWidgetArea, self.tile_list_dock)
#if startup:
#self.TLBool = True
def toggleTLBool(self):
if self.TLBool:
self.TLBool = False
else:
self.TLBool = True
def loadButtonRotate(self,startup = False):
if not self.BRBool:
self.button_rotate_dock = QDockWidget("Current Prefab", self)
self.button_rotate_dock.visibilityChanged.connect(self.toggleBRBool)
self.button_rotate_dock.setWidget(self.button_rotate_widget)
self.button_rotate_dock.setFloating(False)
self.addDockWidget(Qt.LeftDockWidgetArea,self.button_rotate_dock)
#if startup:
#self.BRBool = True
#i am.... the top dock
# ^
# |
#this comment is perfect and i will leave it in because the pun is wasted because it's no longer on the top dock widget area
def toggleBRBool(self):
if self.BRBool:
self.BRBool = False
else:
self.BRBool = True
def changeCurrentList(self):
print("current list: tile_list%s" % str(self.list_tab_widget.currentIndex()+1))
self.current_list = eval('self.tile_list%s' % str(self.list_tab_widget.currentIndex()+1))
def rotateCW_func(self):
if self.rotation < 3:
self.rotation = self.rotation + 1
else:
self.rotation = 0
self.changeIcon()
def rotateCCW_func(self):
if self.rotation == 0:
self.rotation = 3
else:
self.rotation = self.rotation - 1
self.changeIcon()
def prefab_list_up(self):
self.current_list = eval('self.tile_list%s' % str(self.list_tab_widget.currentIndex()+1))
currentRow = self.current_list.currentRow()
if currentRow > 0:
currentItem = self.current_list.takeItem(currentRow)
self.current_list.insertItem(currentRow - 1, currentItem)
self.current_list.setCurrentRow(currentRow - 1)
self.update_list_file(currentRow, currentRow - 1)
self.changeIcon()
def prefab_list_down(self):
self.current_list = eval('self.tile_list%s' % str(self.list_tab_widget.currentIndex()+1))
currentRow = self.current_list.currentRow()
if currentRow < self.current_list.count() - 1:
currentItem = self.current_list.takeItem(currentRow)
self.current_list.insertItem(currentRow + 1, currentItem)
self.current_list.setCurrentRow(currentRow + 1)
self.update_list_file(currentRow, currentRow + 1)
self.changeIcon()
def update_list_file(self, old_index, new_index):
file_list = [self.gameDirVar+"prefab_template/prefab_list.txt", self.gameDirVar+"prefab_template/prefab_icon_list.txt", self.gameDirVar+"prefab_template/prefab_text_list.txt"]
list_list = [prefab_list, prefab_icon_list, prefab_text_list]
for l in list_list:
l.insert(new_index, l.pop(old_index))
with open(file_list[list_list.index(l)], "w") as file:
if list_list.index(l) == 0:
rot_file = open(self.gameDirVar+"prefab_template/rot_prefab_list.txt", "w")
for item in l:
file.write(item + "\n")
if list_list.index(l) == 0:
rot_file.write(item + "_icon_list.txt" + "\n")
#stupid icon lists, making me add more lines of code to my already concise function
def prefab_list_del(self, currentprefab):
#NEEDS TO BE REDONE based off what mode
choice = QMessageBox.question(self,"Delete Prefab (DO NOT DELETE STOCK PREFABS)","Are you sure you want to delete \"%s\"?\nThis is mainly for developers." %(prefab_text_list[self.list_tab_widget.currentIndex()][currentprefab]),
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if choice == QMessageBox.Yes:
text_list = [self.gameDirVar+'prefab_template/prefab_text_list.txt',self.gameDirVar+'prefab_template/rot_prefab_list.txt',
self.gameDirVar+'prefab_template/prefab_list.txt', self.gameDirVar+'prefab_template/prefab_icon_list.txt']
for cur in text_list:
file = open(cur, 'r+')
cur_list = file.readlines()
file.seek(0)
file.truncate()
print(cur_list[index_section_list[self.list_tab_widget.currentIndex()]+currentprefab+1])
del cur_list[index_section_list[self.list_tab_widget.currentIndex()]+currentprefab+1]
cur_str = "".join(cur_list)
file.write(cur_str)
file.close()
restart_btn = QPushButton("Restart")
later_btn = QPushButton("Later")
choice = QMessageBox(self)
choice.setIcon(QMessageBox.Question)
choice.setWindowTitle("Prefab Successfully Deleted")
choice.setText("Program must be restarted for changes to take effect.")
choice.setInformativeText("Restart? You will lose any unsaved progress.")
choice.addButton(restart_btn, QMessageBox.YesRole)
choice.addButton(later_btn, QMessageBox.NoRole)
choice.setDefaultButton(later_btn)
#needs to be redone-- final redist will not be called easytf2mapper as it is no longer just that
if choice.exec_() == 0:
if os.path.isfile('EasyTF2Mapper.exe'):
subprocess.Popen('EasyTF2Mapper.exe')
else:
subprocess.Popen('python main.py')
sys.exit()
else:
pass
else:
del choice
def changeIcon(self):
pixmap = QPixmap(self.gameDirVar+self.prefab_icon_list[self.list_tab_widget.currentIndex()][self.current_list.currentRow()])
transform = QTransform().rotate(90*self.rotation)
self.cur_icon = pixmap.transformed(transform, Qt.SmoothTransformation)
self.current.setIcon(QIcon(self.cur_icon))
self.current.setIconSize(QSize(32,32))
def file_open(self, tmp = False, first = False):
global stored_info_list, totalblocks,entity_list, currentfilename, file_loaded, latest_path,save_dict,load_dict
if not tmp:
name = QFileDialog.getOpenFileName(self, "Open File", latest_path,"*.ezm")
latest_path,file = str(name[0]),open(name[0], "rb")
self.level = 0
self.iconlist=[]
while True:
header = pickle.load(file)
if "levels" in header:
openlines = pickle.load(file)
levelcountload = openlines
elif "grid_size" in header:
openlines = pickle.load(file)
self.grid_change_func(openlines[0],openlines[1],openlines[2])
#print('grid changed')
elif "stored_info_list" in header:
stored_info_list=[]
stored_info_list_temp=[]
openlines = pickle.load(file)
for item in openlines:
stored_info_list_temp.append(item)
for index,lvl in enumerate(stored_info_list_temp):
stored_info_list.append([])
for info in lvl:
try:
temp = save_dict[info[0]]
info[0] = temp
stored_info_list[index].append(info)
except:
stored_info_list[index].append('')
elif "icon_list" in header:
self.iconlist=[]
openlines = pickle.load(file)
for item in openlines:
self.iconlist.append(item)
elif "GSList" in header:
openlines = pickle.load(file)
self.gui_skybox_list.setCurrentRow(openlines)
else:
break
for i in range(levelcountload):
file = open(self.gameDirVar+"leveltemp/level" + str(i)+".tmp", "wb")
pickle.dump(self.iconlist[i], file)
file.close()
#self.change_skybox()
file.close()
self.setWindowTitle("Easy "+gameVar+" Mapper - [" + str(name[0]) + "]")
currentfilename = str(name[0])
file_loaded = True
self.upd_icns()
else:
file = open(self.gameDirVar+"leveltemp/level.tmp", "rb")
self.iconlist = pickle.load(file)
file.close()
for index, icon in enumerate(self.iconlist):
self.grid_list[index].button.setIcon(QIcon(icon))
self.grid_list[index].button.setIconSize(QSize(32,32))
def upd_icns(self):
for index, icon in enumerate(self.iconlist[0]):
#if "icons" in icon:
#print(grid_list)
if icon != '':
#print("index: "+str(index)+" icon name: "+icon[0])
ptrans = QTransform().rotate(90*icon[1])
pmap = QPixmap(icon[0]).transformed(ptrans,Qt.SmoothTransformation)
self.grid_list[index].button.setIcon(QIcon(pmap))
self.grid_list[index].button.setIconSize(QSize(32,32))
else:
#print(str(e))
self.grid_list[index].button.setIcon(QIcon(''))
self.grid_list[index].button.setIconSize(QSize(32,32))
def file_save(self, tmp = False, saveAs = False):
global grid_x, grid_y, iconlist, levels, level, currentfilename, file_loaded, latest_path, stored_info_list, save_dict,load_dict,skybox2_list
print(latest_path)
self.gridsize = (grid_x,grid_y)
skybox_sav = self.gui_skybox_list.currentRow()
if not tmp:
if not file_loaded or saveAs:
name = QFileDialog.getSaveFileName(self, "Save File", latest_path, "*.ezm")[0]
latest_path = name
else:
if "*" in currentfilename:
name = currentfilename[:-1]
else:
name = currentfilename
file = open(name, "wb")
pickle.dump("<levels>",file)
pickle.dump(self.levels,file)
pickle.dump("<grid_size>", file)
pickle.dump(self.gridsize, file)
pickle.dump("<stored_info_list>", file)
stored_info_list_temp=[]
for index,lvl in enumerate(stored_info_list):
stored_info_list_temp.append([])
for info in lvl:
#print(info)
if info:
temp = load_dict[info[0]]
info[0] = temp
stored_info_list_temp[index].append(info)
else:
stored_info_list_temp[index].append('')
pickle.dump(stored_info_list_temp, file)
pickle.dump("<icon_list>", file)
pickle.dump(self.iconlist, file)
pickle.dump("<GSList>", file)
pickle.dump(skybox_sav, file)
file.close()
QMessageBox.information(self, "File Saved", "File saved as %s" %(name))
self.setWindowTitle("Easy "+gameVar+" Mapper - [" + name + "]")
currentfilename = name
file_loaded = True
else:
#writes tmp file to save the icons for each level
file = open(self.gameDirVar+"leveltemp/level.tmp", "wb")
pickle.dump(self.iconlist, file)
file.close()
def file_export(self,bsp=False):
global cur_vmf_location,id_num,stored_info_list, grid_y, grid_x, world_id_num, count_btns, currentlight, skybox, skybox2_list, entity_list, skybox_light_list, skybox_angle_list, latest_path
skyboxgeolist = []
#make recommended height based off tallest prefab in the map
skyboxz = QInputDialog.getInt(self,("Set Skybox Height"),("Skybox Height(hammer units, %d minimum recommended):" %(1024)), QLineEdit.Normal, 1024)
skyboxz = int(skyboxz[0])
#generate skybox stuff now
#needs to be redone to change how skyboxes are rendered
create = generateSkybox.createSkyboxLeft(grid_x,grid_y,skyboxz,self.id_num,world_id_num)
skyboxgeolist.append(create[0])
self.id_num = create[1]
self.world_id_num = create[2]
create = generateSkybox.createSkyboxNorth(grid_x,grid_y,skyboxz,self.id_num,world_id_num)
skyboxgeolist.append(create[0])
self.id_num = create[1]
self.world_id_num = create[2]
create = generateSkybox.createSkyboxRight(grid_x,grid_y,skyboxz,self.id_num,world_id_num)
skyboxgeolist.append(create[0])
self.id_num = create[1]
self.world_id_num = create[2]
create = generateSkybox.createSkyboxTop(grid_x,grid_y,skyboxz,self.id_num,world_id_num)
skyboxgeolist.append(create[0])
self.id_num = create[1]
self.world_id_num = create[2]
create = generateSkybox.createSkyboxSouth(grid_x,grid_y,skyboxz,self.id_num,world_id_num)
skyboxgeolist.append(create[0])
skybox = self.skybox_list[self.gui_skybox_list.currentRow()]
skyboxlight = self.skybox_light_list[self.gui_skybox_list.currentRow()]
skyboxangle = self.skybox_angle_list[self.gui_skybox_list.currentRow()]
skyboxangle = '0 145 0'
skyboxlight = '216 207 194 700'
skybox = 'sky_tf2_04'
currentlight = currentlight.replace("world_idnum",str(world_id_num))
currentlight = currentlight.replace("CURRENT_LIGHT",skyboxlight)
currentlight = currentlight.replace("CURRENT_ANGLE",skyboxangle)
QMessageBox.critical(self, "Error", "Please choose a skybox.")
self.change_skybox()
light = currentlight
latest_path = latest_path.replace(".ezm",".vmf")
self.totalblocks =[]
self.entity_list=[]
for lvl in stored_info_list:
for prfb in lvl:
if prfb != '':
create = prfb[0].createTile(prfb[1], prfb[2], prfb[3], prfb[4], prfb[5], prfb[6], prfb[7], prfb[8])
self.id_num = create[1]
self.world_id_num = create[2]
self.totalblocks.append(create[0])
self.entity_num = create[3]
self.placeholder_list = create[5]
self.entity_list.append(create[4])
import export #export contains the code to compile/export the map
wholething = export.execute(totalblocks, entity_list, skybox,skyboxgeolist, light)
if bsp:
with open(self.gameDirVar+'output/'+gameVar+'mapperoutput.vmf','w+') as f:
f.write(wholething)
self.cur_vmf_location = self.gameDirVar+'output/'+gameVar+'mapperoutput.vmf'
else:
name = QFileDialog.getSaveFileName(self, "Export .vmf", latest_path, "Valve Map File (*.vmf)")
with open(name[0], "w+") as f:
f.write(wholething)
popup = QMessageBox(self, "File Exported",
"The .vmf has been outputted to %s" %(name[0]) + " Open it in hammer to compile as a .bsp. Check out the wiki (https://github.com/baldengineers/easytf2_mapper/wiki/Texture-bug) for fixing errors with textures.")
popup.setWindowTitle("File Exported")
popup.setText("The .vmf has been outputted to %s" %(name[0]))
popup.setInformativeText(" Open it in hammer to compile as a .bsp and/or make some changes.")
hammerButton = popup.addButton("Open Hammer",QMessageBox.ActionRole)
exitButton = popup.addButton("OK",QMessageBox.ActionRole)
popup.exec_()
if popup.clickedButton() == hammerButton:
self.open_hammer(1,name[0])
if popup.clickedButton() == exitButton:
popup.deleteLater()
self.cur_vmf_location = name[0]
def file_export_bsp(self):
self.file_export(True)
#need to change for multi-game
#this is fine and can be used, just make an if/then with the cs:go version
tf2BinLoc = open(self.gameDirVar+'startupcache/vbsp.su','r+')
tf2BinLocFile = tf2BinLoc.readlines()[0].replace('\\','/') #wtf even is this!?!? why do you need it?!?!
tf2BinLoc.close()
if not os.path.isfile(tf2BinLocFile):
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
tf2BinLoc = open('startupcache/vbsp.su', 'w+')
tf2BinLocFile = QFileDialog.getExistingDirectory(self,'LOCATE Team Fortress 2/bin, NOT IN DEFAULT LOCATION!')
tf2BinLocFile = str(tf2BinLocFile.replace('\\','/'))
tf2BinLoc.write(tf2BinLocFile)
tf2BinLoc.close()
subprocess.call('"'+tf2BinLocFile+'/vbsp.exe" "'+self.cur_vmf_location+'"')
subprocess.call('"'+tf2BinLocFile+'/vvis.exe" "'+self.cur_vmf_location.replace('.vmf','.bsp')+'"')
subprocess.call('"'+tf2BinLocFile+'/vrad.exe" "'+self.cur_vmf_location.replace('.vmf','.bsp')+'"')
shutil.copyfile(cur_vmf_location.replace('.vmf','.bsp'),tf2BinLocFile.replace('/bin','/tf/maps/tf2mapperoutput.bsp'))
popup = QMessageBox(self)
popup.setWindowTitle("File Exported")
popup.setText("The .vmf has been outputted to %s" %(tf2BinLocFile.replace('/bin','/tf/maps/tf2mapperoutput.bsp')))
popup.setInformativeText("Open TF2 and in load up 'tf2outputmapper.bsp'! You can do this by typing 'map tf2mapperoutput' or by creating a server with that map.\n\nThere also is a .vmf file of your map stored in output/tf2mapperoutput.vmf.")
hammerButton = popup.addButton("Open TF2",QMessageBox.ActionRole)
exitButton = popup.addButton("OK",QMessageBox.ActionRole)
popup.exec_()
if popup.clickedButton() == hammerButton:
subprocess.Popen('"'+tf2BinLocFile.replace('steamapps/common/Team Fortress 2/bin','')+'steam.exe" "steam://run/440"')
if popup.clickedButton() == exitButton:
popup.deleteLater()
def removeButtons(self):
for i in reversed(range(self.button_grid_layout.count())):
widget = self.button_grid_layout.takeAt(i).widget()
if widget is not None:
widget.deleteLater()
def grid_change(self):
grid_dialog = GridChangeWindow(self)
values = grid_dialog.returnVal()
self.grid_change_func(values[0], values[1], values[2])
def grid_change_func(self,x,y,z):
#needs to be changed to accomodate grid widget
#basically: reset entitylist, totalblocks, and iconlist
#reset grid widget
#set mins and maxs to None
self.entity_list = []
self.iconlist = []
self.totalblocks = []
self.grid_list = []
self.xmin = None
self.ymin = None
self.xmax = None
self.ymax = None
#self.level = 0
self.count_btns = 0
self.file_loaded = False
self.grid_y = y
self.grid_x = x
self.levels = z
self.removeButtons()
#create the progress bar
self.progressBar = QProgressBar()
self.progress = 0 #how much progress is on the progressBar
self.status.addWidget(self.progressBar)
#self.totalblocks.append([])
#self.entity_list.append([])
#self.iconlist.append([])
self.stored_info_list.append([])
self.btn_id_count=0
self.count_btns=0
for x in range(self.grid_x):
for y in range(self.grid_y):
self.totalblocks.append("") #This is so that there are no problems with replacing list values
self.entity_list.append("")
self.iconlist.append(('',''))
self.stored_info_list.append('')
for x in range(self.grid_x):
for y in range(self.grid_y):
grid_btn = GridBtn(self, x, y, self.btn_id_count)
self.button_grid_layout.addWidget(grid_btn.button,y,x)
self.btn_id_count += 1
self.grid_list.append(grid_btn)
self.button_grid_layout.setRowStretch(self.grid_y + 1, 1)
self.button_grid_layout.setColumnStretch(self.grid_x + 1, 1)
self.entity_list.append("lighting slot")
self.count_btns = self.grid_x*self.grid_y
self.status.removeWidget(self.progressBar)
self.setWindowTitle("Easy "+self.gameVar+" Mapper ")
def change_light(self):
r_input = QInputDialog.getInt(self, ("Red light level 0-255"),
("Put in the red light ambiance level, 0-255:"))
g_input = QInputDialog.getInt(self, ("Green light level 0-255"),
("Put in the green light ambiance level, 0-255:"))
b_input = QInputDialog.getInt(self, ("Blue light level 0-255"),
("Put in the blue light ambiance level, 0-255:"))
light_input = QInputDialog.getInt(self, ("Brightness level"),
("Put in the brightness level desired:"))
r_input = int(r_input[0])
g_input = int(g_input[0])
b_input = int(b_input[0])
light_input = int(light_input[0])
if r_input > 255 or g_input > 255 or b_input > 255:
print("Error. Put in a number below 256 for each color input")
self.currentlight = light_create.replacevalues(r_input,g_input,b_input,light_input,world_id_num)
def change_skybox(self):
self.window = QDialog(self)
skybox2_list = QListWidget()
skybox2_list.setIconSize(QSize(200, 25))
for index, text in enumerate(self.skybox_list):
item = QListWidgetItem(QIcon(self.gameDirVar+self.skybox_icon_list[index]), text)
skybox2_list.addItem(item)
self.layout = QHBoxLayout()
self.layout.addWidget(skybox2_list)
self.window.setGeometry(150,150,400,300)
self.window.setWindowTitle("Choose a skybox")
self.window.setWindowIcon(QIcon("icons\icon.ico"))
self.window.setLayout(self.layout)
skybox2_list.itemClicked.connect(self.window.close)
self.window.exec_()
def close_application(self, restart = False):
if not restart:
close = True
if "*" in self.windowTitle():
print('are you sure')
choice = QMessageBox.warning(self, "Exit TF2Mapper",
"Some changes have not been saved.\nDo you really want to quit?",
QMessageBox.Ok | QMessageBox.Cancel,
QMessageBox.Cancel)
if choice != QMessageBox.Ok:
close = False
if close:
folder = self.gameDirVar+'leveltemp/'
for f in os.listdir(folder):
if "level" in f:
print("removing", f)
os.remove(folder+f)
sys.exit()
if restart:
choice = QMessageBox.question(self, "Restart",
"Are you sure you want to restart?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if choice == QMessageBox.Yes:
folder = self.gameDirVar+'leveltemp/'
for f in os.listdir(folder):
if "level" in f:
print("removing", f)
os.remove(folder+f)
#again the exe references need to be changed
if os.path.isfile('./EasyEasyTF2Mapper.exe'):
subprocess.Popen('EasyTF2Mapper.exe')
else:
subprocess.Popen('python main.py')
sys.exit()
def create_prefab(self):
self.window = QDialog(self)
self.textLineEdit = QLineEdit()
self.nameLineEdit = QLineEdit()
self.vmfTextEdit = QLineEdit()
self.iconTextEdit = QLineEdit()
self.vmfBrowse = QPushButton("Browse",self)
self.vmfBrowse.clicked.connect(lambda: self.vmfTextEdit.setText(QFileDialog.getOpenFileName(self, "Choose .vmf File", "/","*.vmf")[0]))
self.iconBrowse = QPushButton("Browse",self)
self.iconBrowse.clicked.connect(lambda: self.iconTextEdit.setText(QFileDialog.getOpenFileName(self, "Choose .jpg File", "/","*.jpg")[0]))
self.vmfLayout = QHBoxLayout()
self.vmfLayout.addWidget(self.vmfTextEdit)
self.vmfLayout.addWidget(self.vmfBrowse)
self.vmfBrowse.setWindowModality(Qt.NonModal)
self.iconLayout = QHBoxLayout()
self.iconLayout.addWidget(self.iconTextEdit)
self.iconLayout.addWidget(self.iconBrowse)
self.okay_btn = QPushButton("Create Prefab", self)
self.blankstring = QWidget()
self.okay_btn_layout = QHBoxLayout()
self.okay_btn_layout.addStretch(1)
self.okay_btn_layout.addWidget(self.okay_btn)
self.okay_btn.clicked.connect(self.create_run_func)
#self.rotCheckBox = QCheckBox()
self.expCheckBox = QCheckBox()
self.buggyText = QLabel("This is a pretty buggy tool at this point, and is mostly used by developers. Are you sure you want to do this? \n(exported prefabs can be found in the main directory, where the executable is.)")
self.sectionSelect = QComboBox()
#needs to have a cs:go version
if self.isTF:
self.sectionSelect.addItems(["Geometry","Map Layout","Fun/Other"])
else:
pass
self.radioLayout = QHBoxLayout()
self.radioTF2 = QRadioButton("TF2",self)
self.radioCSGO = QRadioButton("CS:GO",self)
self.group.addButton(self.radioTF2)
self.group.addButton(self.radioCSGO)
self.group.setExclusive(True)
self.radioLayout.addWidget(self.radioTF2)
self.radioLayout.addWidget(self.radioCSGO)
self.form = QFormLayout()
self.form.addRow(self.buggyText)
self.form.addRow("Prefab Text:", self.textLineEdit)
self.form.addRow("Prefab Name:", self.nameLineEdit)
self.form.addRow("VMF file (.vmf):", self.vmfLayout)
self.form.addRow("Icon (.jpg):", self.iconLayout)
#self.form.addRow("Make Rotations?", self.rotCheckBox)
self.form.addRow("Export prefab?", self.expCheckBox)
self.form.addRow("Which section?",self.sectionSelect)
self.form.addRow("Which game?", self.radioLayout)
for i in range(5):
self.form.addRow(self.blankstring)
self.form.addRow(self.okay_btn_layout)
self.window.setGeometry(150,150,400,300)
self.window.setWindowTitle("Create Prefab")
self.window.setWindowIcon(QIcon("icons\icon.ico"))
self.window.setLayout(self.form)
self.window.exec_()
def create_run_func(self):
if self.sectionSelect.currentIndex() == 2:
input_number = 'END'
else:
input_number = index_section_list[self.sectionSelect.currentIndex()+1]
name_str = self.nameLineEdit.displayText().replace(' ','_')
form_list,t_list = [self.vmfTextEdit.displayText(),self.textLineEdit.displayText(),self.iconTextEdit.displayText(),self.nameLineEdit.displayText()],[]
form_dict = {1:'Prefab Text',2:'Prefab Name',3:'VMF file',4:'Icon'}
if self.vmfTextEdit.displayText() != '' and self.textLineEdit.displayText() != '' and self.iconTextEdit.displayText() != '' and self.nameLineEdit.displayText() != '':
QMessageBox.information(self, "Files Created, restart to see the prefab.",createPrefab.create(self.vmfTextEdit.displayText(), name_str, self.textLineEdit.displayText(), self.iconTextEdit.displayText(),self.expCheckBox.isChecked(),input_number,self.sectionSelect.currentIndex(),self.radioTF2.isChecked()))
restart_btn = QPushButton("Restart")
later_btn = QPushButton("Later")
choice = QMessageBox(self)
choice.setIcon(QMessageBox.Question)
choice.setWindowTitle("Prefab Successfully Created")
choice.setText("Program must be restarted for changes to take effect.")
choice.setInformativeText("Restart? You will lose any unsaved progress.")
choice.addButton(restart_btn, QMessageBox.YesRole)
choice.addButton(later_btn, QMessageBox.NoRole)
choice.setDefaultButton(later_btn)
#exe name change
if choice.exec_() == 0:
if os.path.isfile('./EasyEasyTF2Mapper.exe'):
subprocess.Popen('EasyTF2Mapper.exe')
else:
subprocess.Popen('python main.py')
sys.exit()
else:
for index,box in enumerate(form_list):
if box == '':
t_list.append(form_dict[index+1])
err = ", ".join(t_list)
QMessageBox.critical(self, "Error", "Fill out all sections of the form. ("+err+")")
#self.importprefabs()
def import_prefab(self):
name = QFileDialog.getOpenFileName(self, "Import Zipped Prefab", latest_path,"*.zip")[0]
prefab_zip = zipfile.ZipFile(name).extractall("")
lists = pickle.load(gameDirVar+'prefabs/pinfo.ezmd')
lns = pickle.load('info.pfb')
#there need to be 4 items in the list that is info.pfb
#1) what section it is (int) [eg. 0]
#2) prefab name (str) [eg. "ground_prefab"]
#3) prefab icon dir (str) [eg. "icons/ground_prefab.png"]
#4) prefab text name (str) [eg. Ground Prefab]
for list_index,line in enumerate(lns):
lists[list_index].append(line)
os.remove('info.pfb')
with open(gameDirVar+'prefabs/pinfo.ezmd', "w") as tfile:
pickle.dump(lists,tfile)
restart_btn = QPushButton("Restart")
later_btn = QPushButton("Later")
choice = QMessageBox(self)
choice.setIcon(QMessageBox.Question)
choice.setWindowTitle("Prefab Successfully Imported")
choice.setText("Program must be restarted for changes to take effect.")
choice.setInformativeText("Restart? You will lose any unsaved progress.")
choice.addButton(restart_btn, QMessageBox.YesRole)
choice.addButton(later_btn, QMessageBox.NoRole)
choice.setDefaultButton(later_btn)
#rename exe
if choice.exec_() == 0:
if os.path.isfile('./EasyEasyTF2Mapper.exe'):
subprocess.Popen('EasyTF2Mapper.exe')
else:
subprocess.Popen('python main.py')
sys.exit()
def open_console(self):
#contains dev console where you can manually run functions
self.console = QDialog()
self.console.setWindowTitle("Developer Console")
self.prev_text = QTextEdit("<Bald Engineers Developer Console>")
self.prev_text.setText('''Developer console for Easy '''+gameVar+''' Mapper version r 1.0.1. Current commands are:
print <variable>, setlevel <int>, help, restart, exit, func <function>, wiki, py <python function>.\n''')
self.prev_text.setReadOnly(True)
self.curr_text = QLineEdit()
self.curr_text_btn = QPushButton("Enter")
self.curr_text_btn.clicked.connect(self.console_enter)
self.curr_text_layout = QHBoxLayout()
self.curr_text_layout.addWidget(self.curr_text)
self.curr_text_layout.addWidget(self.curr_text_btn)
self.console_close_btn = QPushButton("Close")
self.console_close_btn.clicked.connect(self.console.close)
self.console_form = QFormLayout()
self.console_form.addRow(self.prev_text)
self.console_form.addRow(self.curr_text_layout)
self.console_form.addRow(self.console_close_btn)
self.console.setLayout(self.console_form)
self.console.show()
def console_enter(self):
global level, levels
command = ""
char_num = 0
text = self.curr_text.displayText()
text_prefix = text + " --> "
command = text.split()[0]
try:
value = text.split()[1]
except IndexError:
value = ""
if command == "print":
try:
new_text = text_prefix + str(eval(value))
except Exception as e:
new_text = text_prefix + str(e)
elif command == "setlevel":
try:
if int(value)-1 < int(self.levels):
self.level = int(value)-1
self.level.setText("Level: " + str(self.level+1))
new_text = text_prefix + "Level set to "+str(value+".")
else:
new_text = text_prefix + "Level "+str(value+" is out of range.")
except Exception as e:
new_text = text_prefix + str(e)
elif command == "help":
new_text = text_prefix + '''Developer console for Easy '''+gameVar+''' Mapper version r 1.0.1. Current commands are: print <variable>, func <function>, setlevel <int>, help, restart, exit, func <function>, wiki, py <python function>'''
elif command == "exit":
self.close_application()
elif command == "restart":
self.close_application(True)
elif command == "pootis":
new_text = '<img src="icons/thedoobs.jpg">'
elif command == "sterries" or command == "jerries":
new_text = text_prefix + "Gimme all those berries, berries, berries!"
elif command == "sideshow":
new_text = ''
self.sideshow()
elif command == "func":
try:
eval("self."+value + "()")
new_text = text_prefix + "Function "+value+" has been run."
except Exception as e:
new_text = text_prefix + str(e)
elif command == "wiki":
try:
webbrowser.open("http://github.com/baldengineers/easytf2_mapper/wiki")
new_text = text_prefix + "Wiki has been opened in your default browser"
except Exception as e:
print(str(e))
elif command == "py":
try:
new_text = text_prefix + str(eval(value))
except Exception as e:
new_text = text_prefix + str(e)
else:
new_text = text_prefix + "\"" + command + "\" is not a valid command"
self.prev_text.append(new_text)
self.curr_text.setText("")
def undo(self, undo):
if self.history if undo else self.redo_history:
x = self.history[-1][0][0] if undo else self.redo_history[-1][1][0]
y = self.history[-1][0][1] if undo else self.redo_history[-1][1][1]
h_moduleName = self.history[-1][0][2] if undo else self.redo_history[-1][1][2]
h_icon = self.history[-1][0][3] if undo else self.redo_history[-1][1][3]
h_level = self.history[-1][0][4] if undo else self.redo_history[-1][1][4]
if h_level == None:
for button in self.grid_list:
if button.x == x and button.y == y:
button.click_func(self, x, y, button.btn_id, False, h_moduleName, h_icon)
break
else:
#self.level.setText("Level: " + str(h_level+1))
self.levellist.setCurrentRow(h_level)
#self.change_level(False, False, True)
self.redo_history.append(self.history.pop(-1)) if undo else self.history.append(self.redo_history.pop(-1))
else:
winsound.MessageBeep(winsound.MB_ICONEXCLAMATION)
#format | click_func(parent, x, y, btn_id, clicked=True, h_moduleName="None", h_icon='')
#format | history.append((x,y,moduleName,self.icon,level), (x,y,moduleName,self.icon,level))
def sideshow(self):
self.gif("icons/sideshow.gif", (350,262,154,103), "SIDESHOW", "icons/ss.ico")
def heavy(self):
self.gif("icons/heavy.gif", (350,262,150,99), "DANCE HEAVY DANCE!")
def gif(self, file, geo, title, icon="icons\icon.ico"):
self.gif = QLabel()
movie = QMovie(file)
self.gif.setMovie(movie)
self.gif.setGeometry(geo[0],geo[1],geo[2],geo[3])
self.gif.setWindowTitle(title)
self.gif.setWindowIcon(QIcon(icon))
self.gif.show()
movie.start()
class GridChangeWindow(QDialog):
def __init__(self, parent, startup = False):
super(GridChangeWindow,self).__init__()
#parent - references the main window's attributes
#startup | Boolean | - if the window is being run when program starts up
self.startup = startup
if not self.startup:
parent.entity_list = []
parent.iconlist = []
parent.totalblocks = []
parent.grid_list = []
self.widthSpin = QSpinBox()
self.heightSpin = QSpinBox()
for spin in [self.widthSpin, self.heightSpin]:
spin.setRange(0,1000)
spin.setSingleStep(5)
spin.setValue(5)
self.okay_btn = QPushButton("OK",self)
self.okay_btn.clicked.connect(lambda: self.clickFunction(parent))
self.form = QFormLayout()
self.form.addRow("Set Grid Width:",self.widthSpin)
self.form.addRow("Set Grid Height:",self.heightSpin)
#self.form.addRow("Set Amount of Levels:",self.text3)
if self.startup:
self.radioTF2 = QRadioButton("&TF2",self)
self.radioTF2.setChecked(True)
self.radioTF2.setWhatsThis("TF2- The best game xd")
self.radioCSGO = QRadioButton("&CS:GO",self)
self.group = QButtonGroup()
self.group.addButton(self.radioTF2)
self.group.addButton(self.radioCSGO)
self.group.setExclusive(True)
self.radioLayout = QHBoxLayout()
self.radioLayout.addWidget(self.radioTF2)
self.radioLayout.addWidget(self.radioCSGO)
self.form.addRow("Choose game:",self.radioLayout)
self.form.addRow(self.okay_btn)
self.setLayout(self.form)
self.setWindowTitle("Set Grid Size")
self.setWindowIcon(QIcon("icons\icon.ico"))
self.exec_()
def clickFunction(self, parent):
self.hide()
self.deleteLater()
if self.startup:
parent.isTF = self.radioTF2.isChecked()
def returnVal(self):
return (self.widthSpin.value(), self.heightSpin.value(), 1)
def closeEvent(self, event):
if self.startup:
sys.exit()
if __name__ == '__main__':
#Main Program
app = QApplication(sys.argv)
main = MainWindow()
sys.exit(app.exec_())
| gpl-3.0 | 8,250,403,391,271,463,000 | 40.474209 | 316 | 0.587591 | false |
javachengwc/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf/table.py | 96 | 9406 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import TABLENS
from element import Element
# Autogenerated
def Body(**args):
return Element(qname = (TABLENS,'body'), **args)
def CalculationSettings(**args):
return Element(qname = (TABLENS,'calculation-settings'), **args)
def CellAddress(**args):
return Element(qname = (TABLENS,'cell-address'), **args)
def CellContentChange(**args):
return Element(qname = (TABLENS,'cell-content-change'), **args)
def CellContentDeletion(**args):
return Element(qname = (TABLENS,'cell-content-deletion'), **args)
def CellRangeSource(**args):
return Element(qname = (TABLENS,'cell-range-source'), **args)
def ChangeDeletion(**args):
return Element(qname = (TABLENS,'change-deletion'), **args)
def ChangeTrackTableCell(**args):
return Element(qname = (TABLENS,'change-track-table-cell'), **args)
def Consolidation(**args):
return Element(qname = (TABLENS,'consolidation'), **args)
def ContentValidation(**args):
return Element(qname = (TABLENS,'content-validation'), **args)
def ContentValidations(**args):
return Element(qname = (TABLENS,'content-validations'), **args)
def CoveredTableCell(**args):
return Element(qname = (TABLENS,'covered-table-cell'), **args)
def CutOffs(**args):
return Element(qname = (TABLENS,'cut-offs'), **args)
def DataPilotDisplayInfo(**args):
return Element(qname = (TABLENS,'data-pilot-display-info'), **args)
def DataPilotField(**args):
return Element(qname = (TABLENS,'data-pilot-field'), **args)
def DataPilotFieldReference(**args):
return Element(qname = (TABLENS,'data-pilot-field-reference'), **args)
def DataPilotGroup(**args):
return Element(qname = (TABLENS,'data-pilot-group'), **args)
def DataPilotGroupMember(**args):
return Element(qname = (TABLENS,'data-pilot-group-member'), **args)
def DataPilotGroups(**args):
return Element(qname = (TABLENS,'data-pilot-groups'), **args)
def DataPilotLayoutInfo(**args):
return Element(qname = (TABLENS,'data-pilot-layout-info'), **args)
def DataPilotLevel(**args):
return Element(qname = (TABLENS,'data-pilot-level'), **args)
def DataPilotMember(**args):
return Element(qname = (TABLENS,'data-pilot-member'), **args)
def DataPilotMembers(**args):
return Element(qname = (TABLENS,'data-pilot-members'), **args)
def DataPilotSortInfo(**args):
return Element(qname = (TABLENS,'data-pilot-sort-info'), **args)
def DataPilotSubtotal(**args):
return Element(qname = (TABLENS,'data-pilot-subtotal'), **args)
def DataPilotSubtotals(**args):
return Element(qname = (TABLENS,'data-pilot-subtotals'), **args)
def DataPilotTable(**args):
return Element(qname = (TABLENS,'data-pilot-table'), **args)
def DataPilotTables(**args):
return Element(qname = (TABLENS,'data-pilot-tables'), **args)
def DatabaseRange(**args):
return Element(qname = (TABLENS,'database-range'), **args)
def DatabaseRanges(**args):
return Element(qname = (TABLENS,'database-ranges'), **args)
def DatabaseSourceQuery(**args):
return Element(qname = (TABLENS,'database-source-query'), **args)
def DatabaseSourceSql(**args):
return Element(qname = (TABLENS,'database-source-sql'), **args)
def DatabaseSourceTable(**args):
return Element(qname = (TABLENS,'database-source-table'), **args)
def DdeLink(**args):
return Element(qname = (TABLENS,'dde-link'), **args)
def DdeLinks(**args):
return Element(qname = (TABLENS,'dde-links'), **args)
def Deletion(**args):
return Element(qname = (TABLENS,'deletion'), **args)
def Deletions(**args):
return Element(qname = (TABLENS,'deletions'), **args)
def Dependencies(**args):
return Element(qname = (TABLENS,'dependencies'), **args)
def Dependency(**args):
return Element(qname = (TABLENS,'dependency'), **args)
def Detective(**args):
return Element(qname = (TABLENS,'detective'), **args)
def ErrorMacro(**args):
return Element(qname = (TABLENS,'error-macro'), **args)
def ErrorMessage(**args):
return Element(qname = (TABLENS,'error-message'), **args)
def EvenColumns(**args):
return Element(qname = (TABLENS,'even-columns'), **args)
def EvenRows(**args):
return Element(qname = (TABLENS,'even-rows'), **args)
def Filter(**args):
return Element(qname = (TABLENS,'filter'), **args)
def FilterAnd(**args):
return Element(qname = (TABLENS,'filter-and'), **args)
def FilterCondition(**args):
return Element(qname = (TABLENS,'filter-condition'), **args)
def FilterOr(**args):
return Element(qname = (TABLENS,'filter-or'), **args)
def FirstColumn(**args):
return Element(qname = (TABLENS,'first-column'), **args)
def FirstRow(**args):
return Element(qname = (TABLENS,'first-row'), **args)
def HelpMessage(**args):
return Element(qname = (TABLENS,'help-message'), **args)
def HighlightedRange(**args):
return Element(qname = (TABLENS,'highlighted-range'), **args)
def Insertion(**args):
return Element(qname = (TABLENS,'insertion'), **args)
def InsertionCutOff(**args):
return Element(qname = (TABLENS,'insertion-cut-off'), **args)
def Iteration(**args):
return Element(qname = (TABLENS,'iteration'), **args)
def LabelRange(**args):
return Element(qname = (TABLENS,'label-range'), **args)
def LabelRanges(**args):
return Element(qname = (TABLENS,'label-ranges'), **args)
def LastColumn(**args):
return Element(qname = (TABLENS,'last-column'), **args)
def LastRow(**args):
return Element(qname = (TABLENS,'last-row'), **args)
def Movement(**args):
return Element(qname = (TABLENS,'movement'), **args)
def MovementCutOff(**args):
return Element(qname = (TABLENS,'movement-cut-off'), **args)
def NamedExpression(**args):
return Element(qname = (TABLENS,'named-expression'), **args)
def NamedExpressions(**args):
return Element(qname = (TABLENS,'named-expressions'), **args)
def NamedRange(**args):
return Element(qname = (TABLENS,'named-range'), **args)
def NullDate(**args):
return Element(qname = (TABLENS,'null-date'), **args)
def OddColumns(**args):
return Element(qname = (TABLENS,'odd-columns'), **args)
def OddRows(**args):
return Element(qname = (TABLENS,'odd-rows'), **args)
def Operation(**args):
return Element(qname = (TABLENS,'operation'), **args)
def Previous(**args):
return Element(qname = (TABLENS,'previous'), **args)
def Scenario(**args):
return Element(qname = (TABLENS,'scenario'), **args)
def Shapes(**args):
return Element(qname = (TABLENS,'shapes'), **args)
def Sort(**args):
return Element(qname = (TABLENS,'sort'), **args)
def SortBy(**args):
return Element(qname = (TABLENS,'sort-by'), **args)
def SortGroups(**args):
return Element(qname = (TABLENS,'sort-groups'), **args)
def SourceCellRange(**args):
return Element(qname = (TABLENS,'source-cell-range'), **args)
def SourceRangeAddress(**args):
return Element(qname = (TABLENS,'source-range-address'), **args)
def SourceService(**args):
return Element(qname = (TABLENS,'source-service'), **args)
def SubtotalField(**args):
return Element(qname = (TABLENS,'subtotal-field'), **args)
def SubtotalRule(**args):
return Element(qname = (TABLENS,'subtotal-rule'), **args)
def SubtotalRules(**args):
return Element(qname = (TABLENS,'subtotal-rules'), **args)
def Table(**args):
return Element(qname = (TABLENS,'table'), **args)
def TableCell(**args):
return Element(qname = (TABLENS,'table-cell'), **args)
def TableColumn(**args):
return Element(qname = (TABLENS,'table-column'), **args)
def TableColumnGroup(**args):
return Element(qname = (TABLENS,'table-column-group'), **args)
def TableColumns(**args):
return Element(qname = (TABLENS,'table-columns'), **args)
def TableHeaderColumns(**args):
return Element(qname = (TABLENS,'table-header-columns'), **args)
def TableHeaderRows(**args):
return Element(qname = (TABLENS,'table-header-rows'), **args)
def TableRow(**args):
return Element(qname = (TABLENS,'table-row'), **args)
def TableRowGroup(**args):
return Element(qname = (TABLENS,'table-row-group'), **args)
def TableRows(**args):
return Element(qname = (TABLENS,'table-rows'), **args)
def TableSource(**args):
return Element(qname = (TABLENS,'table-source'), **args)
def TableTemplate(**args):
return Element(qname = (TABLENS,'table-template'), **args)
def TargetRangeAddress(**args):
return Element(qname = (TABLENS,'target-range-address'), **args)
def TrackedChanges(**args):
return Element(qname = (TABLENS,'tracked-changes'), **args)
| apache-2.0 | 5,012,233,178,499,737,000 | 29.635179 | 80 | 0.68453 | false |
sgerhart/ansible | lib/ansible/modules/network/meraki/meraki_device.py | 43 | 15823 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Kevin Breit (@kbreit) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: meraki_device
short_description: Manage devices in the Meraki cloud
version_added: "2.7"
description:
- Visibility into devices associated to a Meraki environment.
notes:
- This module does not support claiming of devices or licenses into a Meraki organization.
- More information about the Meraki API can be found at U(https://dashboard.meraki.com/api_docs).
- Some of the options are likely only used for developers within Meraki.
options:
state:
description:
- Query an organization.
choices: [absent, present, query]
default: query
org_name:
description:
- Name of organization.
- If C(clone) is specified, C(org_name) is the name of the new organization.
aliases: [ organization ]
org_id:
description:
- ID of organization.
net_name:
description:
- Name of a network.
aliases: [network]
net_id:
description:
- ID of a network.
serial:
description:
- Serial number of a device to query.
hostname:
description:
- Hostname of network device to search for.
aliases: [name]
model:
description:
- Model of network device to search for.
tags:
description:
- Space delimited list of tags to assign to device.
lat:
description:
- Latitude of device's geographic location.
- Use negative number for southern hemisphere.
aliases: [latitude]
lng:
description:
- Longitude of device's geographic location.
- Use negative number for western hemisphere.
aliases: [longitude]
address:
description:
- Postal address of device's location.
move_map_marker:
description:
- Whether or not to set the latitude and longitude of a device based on the new address.
- Only applies when C(lat) and C(lng) are not specified.
type: bool
serial_lldp_cdp:
description:
- Serial number of device to query LLDP/CDP information from.
lldp_cdp_timespan:
description:
- Timespan, in seconds, used to query LLDP and CDP information.
- Must be less than 1 month.
serial_uplink:
description:
- Serial number of device to query uplink information from.
author:
- Kevin Breit (@kbreit)
extends_documentation_fragment: meraki
'''
EXAMPLES = r'''
- name: Query all devices in an organization.
meraki_device:
auth_key: abc12345
org_name: YourOrg
state: query
delegate_to: localhost
- name: Query all devices in a network.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
state: query
delegate_to: localhost
- name: Query a device by serial number.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
serial: ABC-123
state: query
delegate_to: localhost
- name: Lookup uplink information about a device.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
serial_uplink: ABC-123
state: query
delegate_to: localhost
- name: Lookup LLDP and CDP information about devices connected to specified device.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
serial_lldp_cdp: ABC-123
state: query
delegate_to: localhost
- name: Lookup a device by hostname.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
hostname: main-switch
state: query
delegate_to: localhost
- name: Query all devices of a specific model.
meraki_device:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
model: MR26
state: query
delegate_to: localhost
- name: Update information about a device.
meraki_device:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
state: present
serial: '{{serial}}'
name: mr26
address: 1060 W. Addison St., Chicago, IL
lat: 41.948038
lng: -87.65568
tags: recently-added
delegate_to: localhost
- name: Claim a deivce into a network.
meraki_device:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
serial: ABC-123
state: present
delegate_to: localhost
- name: Remove a device from a network.
meraki_device:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
serial: ABC-123
state: absent
delegate_to: localhost
'''
RETURN = r'''
response:
description: Data returned from Meraki dashboard.
type: dict
returned: info
'''
import os
from ansible.module_utils.basic import AnsibleModule, json, env_fallback
from ansible.module_utils._text import to_native
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec
def format_tags(tags):
return " {tags} ".format(tags=tags)
def is_device_valid(meraki, serial, data):
for device in data:
if device['serial'] == serial:
return True
return False
def get_org_devices(meraki, org_id):
path = meraki.construct_path('get_all_org', org_id=org_id)
response = meraki.request(path, method='GET')
if meraki.status != 200:
meraki.fail_json(msg='Failed to query all devices belonging to the organization')
return response
def main():
# define the available arguments/parameters that a user can pass to
# the module
argument_spec = meraki_argument_spec()
argument_spec.update(state=dict(type='str', choices=['absent', 'present', 'query'], default='query'),
net_name=dict(type='str', aliases=['network']),
net_id=dict(type='str'),
serial=dict(type='str'),
serial_uplink=dict(type='str'),
serial_lldp_cdp=dict(type='str'),
lldp_cdp_timespan=dict(type='int'),
hostname=dict(type='str', aliases=['name']),
model=dict(type='str'),
tags=dict(type='str'),
lat=dict(type='float', aliases=['latitude']),
lng=dict(type='float', aliases=['longitude']),
address=dict(type='str'),
move_map_marker=dict(type='bool'),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
meraki = MerakiModule(module, function='device')
if meraki.params['serial_lldp_cdp'] and not meraki.params['lldp_cdp_timespan']:
meraki.fail_json(msg='lldp_cdp_timespan is required when querying LLDP and CDP information')
if meraki.params['net_name'] and meraki.params['net_id']:
meraki.fail_json(msg='net_name and net_id are mutually exclusive')
meraki.params['follow_redirects'] = 'all'
query_urls = {'device': '/networks/{net_id}/devices'}
query_org_urls = {'device': '/organizations/{org_id}/inventory'}
query_device_urls = {'device': '/networks/{net_id}/devices/'}
claim_device_urls = {'device': '/networks/{net_id}/devices/claim'}
bind_org_urls = {'device': '/organizations/{org_id}/claim'}
update_device_urls = {'device': '/networks/{net_id}/devices/'}
delete_device_urls = {'device': '/networks/{net_id}/devices/'}
meraki.url_catalog['get_all'].update(query_urls)
meraki.url_catalog['get_all_org'] = query_org_urls
meraki.url_catalog['get_device'] = query_device_urls
meraki.url_catalog['create'] = claim_device_urls
meraki.url_catalog['bind_org'] = bind_org_urls
meraki.url_catalog['update'] = update_device_urls
meraki.url_catalog['delete'] = delete_device_urls
payload = None
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
# FIXME: Work with Meraki so they can implement a check mode
if module.check_mode:
meraki.exit_json(**meraki.result)
# execute checks for argument completeness
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.params['org_id']
if org_id is None:
org_id = meraki.get_org_id(meraki.params['org_name'])
nets = meraki.get_nets(org_id=org_id)
net_id = None
if meraki.params['net_id'] or meraki.params['net_name']:
net_id = meraki.params['net_id']
if net_id is None:
net_id = meraki.get_net_id(net_name=meraki.params['net_name'], data=nets)
if meraki.params['state'] == 'query':
if meraki.params['net_name'] or meraki.params['net_id']:
device = []
if meraki.params['serial']:
path = meraki.construct_path('get_device', net_id=net_id) + meraki.params['serial']
request = meraki.request(path, method='GET')
device.append(request)
meraki.result['data'] = device
elif meraki.params['serial_uplink']:
path = meraki.construct_path('get_device', net_id=net_id) + meraki.params['serial_uplink'] + '/uplink'
meraki.result['data'] = (meraki.request(path, method='GET'))
elif meraki.params['serial_lldp_cdp']:
if meraki.params['lldp_cdp_timespan'] > 2592000:
meraki.fail_json(msg='LLDP/CDP timespan must be less than a month (2592000 seconds)')
path = meraki.construct_path('get_device', net_id=net_id) + meraki.params['serial_lldp_cdp'] + '/lldp_cdp'
path = path + '?timespan=' + str(meraki.params['lldp_cdp_timespan'])
device.append(meraki.request(path, method='GET'))
meraki.result['data'] = device
elif meraki.params['hostname']:
path = meraki.construct_path('get_all', net_id=net_id)
devices = meraki.request(path, method='GET')
for unit in devices:
if unit['name'] == meraki.params['hostname']:
device.append(unit)
meraki.result['data'] = device
elif meraki.params['model']:
path = meraki.construct_path('get_all', net_id=net_id)
devices = meraki.request(path, method='GET')
device_match = []
for device in devices:
if device['model'] == meraki.params['model']:
device_match.append(device)
meraki.result['data'] = device_match
else:
path = meraki.construct_path('get_all', net_id=net_id)
request = meraki.request(path, method='GET')
meraki.result['data'] = request
else:
path = meraki.construct_path('get_all_org', org_id=org_id)
devices = meraki.request(path, method='GET')
if meraki.params['serial']:
for device in devices:
if device['serial'] == meraki.params['serial']:
meraki.result['data'] = device
else:
meraki.result['data'] = devices
elif meraki.params['state'] == 'present':
device = []
if meraki.params['hostname']:
query_path = meraki.construct_path('get_all', net_id=net_id)
device_list = meraki.request(query_path, method='GET')
if is_device_valid(meraki, meraki.params['serial'], device_list):
payload = {'name': meraki.params['hostname'],
'tags': format_tags(meraki.params['tags']),
'lat': meraki.params['lat'],
'lng': meraki.params['lng'],
'address': meraki.params['address'],
'moveMapMarker': meraki.params['move_map_marker'],
}
query_path = meraki.construct_path('get_device', net_id=net_id) + meraki.params['serial']
device_data = meraki.request(query_path, method='GET')
ignore_keys = ['lanIp', 'serial', 'mac', 'model', 'networkId', 'moveMapMarker', 'wan1Ip', 'wan2Ip']
if meraki.is_update_required(device_data, payload, optional_ignore=ignore_keys):
path = meraki.construct_path('update', net_id=net_id) + meraki.params['serial']
updated_device = []
updated_device.append(meraki.request(path, method='PUT', payload=json.dumps(payload)))
meraki.result['data'] = updated_device
meraki.result['changed'] = True
else:
if net_id is None:
device_list = get_org_devices(meraki, org_id)
if is_device_valid(meraki, meraki.params['serial'], device_list) is False:
payload = {'serial': meraki.params['serial']}
path = meraki.construct_path('bind_org', org_id=org_id)
created_device = []
created_device.append(meraki.request(path, method='POST', payload=json.dumps(payload)))
meraki.result['data'] = created_device
meraki.result['changed'] = True
else:
query_path = meraki.construct_path('get_all', net_id=net_id)
device_list = meraki.request(query_path, method='GET')
if is_device_valid(meraki, meraki.params['serial'], device_list) is False:
if net_id:
payload = {'serial': meraki.params['serial']}
path = meraki.construct_path('create', net_id=net_id)
created_device = []
created_device.append(meraki.request(path, method='POST', payload=json.dumps(payload)))
meraki.result['data'] = created_device
meraki.result['changed'] = True
elif meraki.params['state'] == 'absent':
device = []
query_path = meraki.construct_path('get_all', net_id=net_id)
device_list = meraki.request(query_path, method='GET')
if is_device_valid(meraki, meraki.params['serial'], device_list) is True:
path = meraki.construct_path('delete', net_id=net_id)
path = path + meraki.params['serial'] + '/remove'
request = meraki.request(path, method='POST')
meraki.result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
if __name__ == '__main__':
main()
| mit | 6,094,491,076,118,871,000 | 37.498783 | 122 | 0.5966 | false |
TheWardoctor/Wardoctors-repo | script.module.liveresolver/lib/liveresolver/__init__.py | 10 | 43372 | # -*- coding: utf-8 -*-
import re
from modules import client,webutils,cloudflare,decryptionUtils,cache,liveresolver_utils,convert
from modules.constants import resolver_dict
from modules.log_utils import log
from modules.liveresolver_utils import *
import urlparse,urllib,base64
from BeautifulSoup import BeautifulSoup as bs
global limit
limit=0
from modules import constants
FLASH = constants.flash_ver()
'''
Pass any url containing video to this function.
It will try to find the embedded video and resolve it, returning the resolved
and playable video link.
cache_timeout (in hours) - how long to cache the found stream link for the given page.
html - pass html content to resolver and it will search for embedded links from it, instead
of requesting the given url and searching from there.
'''
def resolve(url, cache_timeout=3, html=None, title='Video',icon='x'):
try:
log("Resolver called with url: " + url)
resolved=None
if html==None:
resolved=resolve_it(url,title=title)
if resolved==None:
if html==None and cache_timeout!=0:
#semi-cached resolving
url=cache.get(find_link,cache_timeout,url)
else:
url = find_link(url,html=html)
resolved=url
url=resolve_it(url,title=title,icon=icon)
if url!=None:
resolved=url
log("Resolved url: " + resolved)
return resolved
except:
log("Failed to find link.")
return url
'''
Check if your video link is resolvable through the liveresolver module.
'''
def isValid(url):
return prepare(urlparse.urlparse(url).netloc) in resolver_dict.keys()
'''
Flush the liveresolver cache.
'''
def delete_cache():
cache.clear()
'''
Not intended for external use.
This method is used internally for resolving the found link.
'''
def resolve_it(url, title='Video',icon='x'):
if '.m3u8' in url or 'rtmp:' in url or '.flv' in url or '.mp4' in url or '.ts' in url or url.startswith('plugin://'):
if '.m3u8' in url and '|' not in url:
url += '|%s' % urllib.urlencode({'User-Agent': client.agent()})
if '.ts' in url:
url = 'plugin://plugin.video.f4mTester/?name=%s&iconImage=%s&streamtype=TSDOWNLOADER&url='%(urllib.quote(title),urllib.quote(icon)) + urllib.quote(url)
return url
if '.f4m' in url:
from resolvers import f4m
resolved = f4m.resolve(url)
return resolved
if url.startswith('acestream://') or url.startswith('sop://') or '.acelive' in url:
from resolvers import sop_ace
resolved = sop_ace.resolve(url, title)
return resolved
netloc = prepare(urlparse.urlparse(url).netloc)
if netloc in resolver_dict.keys():
resolver = resolver_dict[netloc]
log("Calling resolver: " + resolver)
exec "from resolvers import %s"%resolver
resolved = eval(resolver+".resolve(url)")
return resolved
else:
return
def find_link(url, html=''):
log('Finding in : %s'%url)
try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
except: referer = 'http://' + urlparse.urlparse(url).netloc
url = manual_url_fix(url)
host = urlparse.urlparse(url).netloc
headers = {'Referer':referer, 'Host':host, 'User-Agent' : client.agent(), 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language' : 'en-US,en;q=0.5'}
if html=='' or html is None:
html = client.request(url, headers=headers)
ws = ['livetvcdn','shadow','blog']
if any(w in url for w in ws) and 'goto/' not in url :
import requests
s = requests.Session()
s.headers = headers
r = s.get(url)
html = r.text
ref=url
fs=list(globals().copy())
for f in fs:
if 'finder' in f:
resolved = eval (f+"(html,ref)")
if resolved:
log('Resolved with %s: %s'%(f,resolved))
return resolved
break
return
#embeded iframes
def finder1(html,url):
html = html.replace('/adplus/adplus.html?id=','')
try:html = urllib.unquote(html)
except:pass
global limit
limit+=1
ref=url
try:
urls = re.findall('<i?frame\s*.+?src=(?:\'|\")(.+?)(?:\'|\")',html,flags=re.IGNORECASE)
urly = client.parseDOM(html, "iframe", ret="src")
urlc = re.findall('top.location.href\s*=\s*[\'\"](.+?axe-tv[^\'\"]+)[\'\"]',html)
for url in urlc:
if 'sky-sports-1' not in url and 'fox1ushd' not in url:
urls.append(url)
urls += urly
try:
urls.append(re.findall("playStream\('iframe', '(.+?)'\)",html)[0])
except: pass
urls += re.findall('<a.+?href=[\'\"](/live-.+?stream.+?)[\'\"]',html)
urls += re.findall('(http://www.hdmyt.info/(?:channel|player).php\?file=[^"\']+)["\']',html)
from random import shuffle
for url in urls:
url = url.replace('https','http')
if 'c4.zedo' in url or 'ProtectFile.File' in url or 'adServe' in url or 'facebook' in url or 'banner' in url:
continue
elif "micast" in url or 'turbocast' in url:
return finder47(html,ref)
elif 'lshstream' in url:
return finder2(url,url)
rr = resolve_it(url)
if rr:
return rr
uri = manual_fix(url,ref)
if limit>=25:
log("Exiting - iframe visit limit reached")
return
resolved = find_link(uri)
if resolved:
break
headers = {'User-Agent': client.agent(), 'Referer': ref}
if '.m3u8' in resolved and '|' not in resolved:
headers.update({'X-Requested-With':constants.get_shockwave(), 'Host':urlparse.urlparse(resolved).netloc, 'Connection':'keep-alive'})
resolved += '|%s' % urllib.urlencode(headers)
return resolved
except:
return
#lsh stream
def finder2(html,url):
try:
reg = re.compile('(http://(?:www.)?lshstream.com[^\"\']+)')
url = re.findall(reg,html)[0]
return url
except:
try:
reg = re.compile('fid=[\"\'](.+?)[\"\'].+?lshstream.+?.com/embed.js')
fid = re.findall(reg,html)[0]
url = 'http://www.lshstreams.com/embed.php?u=%s&vw=720&vh=420&live.realstreamunited.com=%s'%(fid,url)
return url
except:
return
#castalba
def finder3(html,url):
try:
reg=re.compile('id=[\"\']([^\"\']+)[\"\'];.+?castalba.tv/.+?.js')
id=re.findall(reg,html)[0]
url = 'http://castalba.tv/embed.php?cid=%s&wh=600&ht=380&referer=%s'%(id,url)
return url
except:
return
#jw_config
def finder4(html,url):
ref = url
try:
links = re.compile('file\s*:\s*[\"\']([^\"\']+)[\"\']').findall(html)
for link in links:
if '.png' in link or link == '.flv':
continue
if '.f4m' in link:
link = link+'?referer=%s'%url
if '.m3u8' in link and '|' not in link:
link += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref, 'X-Requested-With':constants.get_shockwave(), 'Host':urlparse.urlparse(link).netloc, 'Connection':'keep-alive','Accept':'*/*'})
return link
except:
return
#vlc_config
def finder5(html,url):
try:
soup=bs(html)
try:
link=soup.find('embed',{'id':'vlc'})
link=link['target']
except:
link=soup.find('embed',{'name':'vlc'})
link=link['target']
return link
except:
return
#sawlive
def finder6(html,url):
try:
uri = re.compile("[\"']([^\"\']*sawlive.tv\/embed\/[^\"'\/]+)\"").findall(html)[0]
page = re.compile('//.+?/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(uri)[0]
host = urlparse.urlparse(uri).netloc
uri = 'http://sawlive.tv/embed/%s?referer=%s&host=%s' % (page,url,host)
return uri
except:
try:
uri = re.compile("src=(?:\'|\")(http:\/\/(?:www\.)?sawlive.tv\/embed\/.+?)(?:\'|\")").findall(html)[0]
page = re.compile('//.+?/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(uri)[0]
host = urlparse.urlparse(uri).netloc
uri = 'http://sawlive.tv/embed/%s?referer=%s&host=%s' % (page,url,host)
return uri
except:
return
#yocast
def finder7(html,url):
try:
reg=re.compile('<script>fid\s*=\s*(?:\'|\")(.+?)(?:\'|\");.+?src=(?:\'|\")http://www.yocast.tv/.+?.js(?:\'|\")')
id = re.findall(reg,html)[0]
url='http://www.yocast.tv/embed.php?live=%s&vw=600&vh=450'%id
return url
except:
return
#miplayer
def finder8(html,url):
try:
reg = re.compile("(http://(?:www\.)?miplayer.net/embed[^'\"]+)")
url = re.findall(reg,html)[0]
return url
except:
return
#castamp
def finder9(html,url):
try:
reg = re.compile("(http://(?:www.)?castamp.com/embed.php\?c=[^\"&]+)")
url = re.findall(reg,html)[0]
return url
except:
return
#04 stream
def finder10(html,url):
try:
reg = re.compile('04stream.com/\w+\.js\?stream=([^ "\'&]+)')
url = re.findall(reg,html)[0]
url = 'http://www.04stream.com/weed.js?stream=%s&width=600&height=460&str=is&link=1&cat=3'%url
return url
except:
return
#leton
def finder11(html,url):
try:
html = urllib.unquote(html)
reg = re.compile('leton.tv/player.php\?streampage=([^&]+)&')
url = re.findall(reg,html)[0]
url = 'http://leton.tv/player.php?streampage=%s&width=600&height=450'%url
return url
except:
return
#yotv.co
def finder12(html,url):
try:
ref=url
reg = re.compile("<script type='text/javascript'>\s*fid=(?:\'|\")(.+?)(?:\'|\");\s*v_width=.+?;\s*v_height=.+?;</script><script type='text/javascript' src='http://www.yotv.co/player.js'></script>")
url = re.findall(reg,html)[0]
url = 'http://www.yotv.co/embed.php?live=%s&vw=620&vh=490&referer=%s'%(url,ref)
return url
except:
return
#hdcast
def finder13(html,url):
try:
url = re.compile('src="(http://(?:www\.)?hdcast.me/embed[^\'"]+)').findall(html)[0]
return url
except:
pass
#zerocast
def finder14(html,url):
try:
ref=url
url = re.compile('zerocast\.(?:tv|in)/(?:channel|embed)?\.php\?a=(\d+)').findall(html)[0]
url = 'http://zerocast.tv/channel.php?a=%s&width=640&height=480&autostart=true'%url
return url
except:
pass
#castup
def finder15(html,url):
try:
ref = url
reg = '<script type="text/javascript">\s*fid=(?:\'|\")(.+?)(?:\'|\");.+?src="http://www.castup.tv/js/.+?.js">'
url = re.findall(reg,html)[0]
url = 'http://www.castup.tv/embed_2.php?channel=%s&vw=650&vh=410&referer=%s'%(url,ref)
return url
except:
return
#mybeststream
def finder16(html,url):
try:
ref=url
try:
id = re.findall('id=(?:\'|\")(\d+)(?:\'|\");width=.*?pt987.googlecode.com',html)[0]
except:
id = re.findall('id=[\"\']([^\"\']+)[\"\'];.+?mybeststream.xyz',html)[0]
url = 'http://mybeststream.xyz/gen_s.php?id=%s&width=640&height=385&referer=%s'%(id,ref)
return url
except:
pass
#sunhd
def finder17(html,url):
try:
ref=url
url = re.findall('src="(http://www.sunhd.info/channel.+?.php\?file=.+?)"',html)[0]
return url+'&referer=%s'%ref
except:
pass
#youtube
def finder18(html,url):
try:
url = re.findall('src="?(https?://(?:www.|)youtube(?:-nocookie)?.com.+?[^\'\"]+)',html)[0]
return url.replace('amp;','').replace('-nocookie','')
except:
return
#livestream
def finder19(html,url):
try:
url = re.findall('(http://(?:new\.)?livestream.com[^"]+)',html)[0]
if 'player' in url:
return url
except:
return
#privatestream
def finder20(html,url):
try:
try:
id = re.findall('privatestream.tv/player\?streamname=([^&]+)&', html)[0]
except:
id = re.findall('privatestream.tv/((?!player)[^\.&\?\=]+)',html)[0]
if id != 'js/jquery-1':
url = 'http://privatestream.tv/player?streamname=%s&width=640&height=490'%id
return url
else:
return
except:
return
#airq.tv
def finder21(html,url):
try:
id = re.findall('(?:SRC|src)="http://airq.tv/(\w+)',html)[0]
url = 'http://airq.tv/%s/'%id
return url
except:
return
#aliez
def finder22(html,url):
try:
ref = url
try:
id = re.findall('emb.aliez[\w\.]+?/player/live.php\?id=([^&"]+)',html)[0]
return 'http://emb.aliez.me/player/live.php?id=%s&w=728&h=480&referer=%s'%(id,ref)
except:
try:
id = re.findall('(?:94.242.255.35|195.154.44.194|aliez\.\w+)/player/(?:live|embed).php\?id=(\d+)',html)[0]
except:
id = re.findall('http://aliez.(?:me|tv)/live/(.+?)(?:/|"|\')',html)[0]
return 'http://emb.aliez.me/player/live.php?id=%s&w=728&h=480&referer=%s'%(id,ref)
return
except:
return
#p3g
def finder23(html,url):
try:
id = re.findall("channel='(.+?)',\s*g='.+?';</script><script type='text/javascript' src='http://p3g.tv/resources/scripts/p3g.js'",html)[0]
url = 'http://www.p3g.tv/embedplayer/%s/2/600/420'%id
return url
except:
return
#dinozap (not implemented)
def finder24(html,url):
try:
url = re.findall('(http://(?:www\.)?dinozap.info/redirect/channel.php\?id=[^"\']+)',html)[0]
return url
except:
return
#liveflashplayer
def finder25(html,url):
try:
id = re.findall("channel='(.+?)', g='.+?';</script><script type='text/javascript' src='http://www.liveflashplayer.net/resources/scripts/liveFlashEmbed.js'>",html)[0]
url = 'http://www.liveflashplayer.net/membedplayer/%s/1/620/430'%id
return url
except:
return
#laola1
def finder26(html,url):
try:
url = re.findall('(http://www.laola1.tv[^"]+)', html)[0]
return url
except:
pass
#ehftv
def finder27(html,url):
try:
url = re.findall('src=(?:\'|\")(http:\/\/(?:www\.)?ehftv.com(?:/|//)player\.php[^\'\"]+)',html)[0]
return url
except:
return
#zoomtv
def finder28(html,url):
try:
ref=url
try:
fid = re.findall('fid="(.+?)".+?zome.zoomtv.me/.+?.js',html)[0]
except:
f = re.findall('fid=([^;]+)',html)[0]
fid = re.findall('%s\s*=\s*[\"\']([^\"\']+)'%f,html)[0]
pid = re.findall('pid\s*=\s*(.+?);',html)[0]
url = 'http://www.zoomtv.me/embed.php?v=' + fid + '&vw=660&vh=450&referer=%s&pid=%s'%(ref,pid)
return url
except:
return
#streamlive
def finder29(html,url):
try:
ref = url
url = re.findall('src="(http://(?:www.)?streamlive.to/embed/[^"]+)"',html)[0]
url = url + '&referer=%s'%ref
return url
except:
return
#roja redirect links
def finder30(html,url):
try:
html = client.request(url, referer=urlparse.urlparse(url).netloc)
url = re.findall('href="(.+?)">click here...',html)[0]
resolved = find_link(url+'&referer=http://rojedirecta.me')
return resolved
except:
return
#iguide
def finder31(html,url):
try:
ref=url
url = re.findall('(http://(?:www.)?iguide.to/embed/[^"\']+)"',html)[0]
return url+'&referer='+ref
except:
return
#letgo
def finder32(html,url):
try:
id = re.findall('fid="(.+?)"; v_width=.+?; v_height=.+?;</script><script type="text/javascript" src="http://www.letgo.tv/js/embed.js"',html)[0]
url = 'http://www.letgo.tv/embed.php?channel=%s&vw=630&vh=450'%id
return url
except:
return
#streamup
def finder33(html,url):
ref = url
try:
id = re.findall("streamup.com/rooms/([^/\'\"?&\s]+)",html)[0]
url = 'http://streamup.com/%s'%id
return url
except:
try:
id = re.findall('streamup.com/([^/\'\"?&\s]+)/embed',html)[0]
url = 'http://streamup.com/%s'%(id)
return url
except:
return
#p2pcast
def finder34(html,url):
try:
ref = url
try:
id = re.findall('http://p2pcast.tv/(?:p2pembed|stream).php\?id=([^&]+)',html)[0]
except:
id = re.findall("id=[\"\'](.+?)[\"\'];.+?src=[\"\']http://js.p2pcast.+?.js",html)[0]
url = 'http://p2pcast.tv/stream.php?id=%s&referer=%s'%(id,ref)
return url
except:
return
def finder35(html,url):
try:
try:
id = re.findall('cast3d.tv/embed.php\?(?:u|channel)=([^&]+)&',html)[0]
except:
id = re.findall('fid\s*=\s*(?:\'|\")(.+?)(?:\'|\");.*\s*.+?src=(?:\'|\")http://www.cast3d.tv/js/.+?.js',html)[0]
url = 'http://www.cast3d.tv/embed.php?channel=%s&vw=600&vh=400'%id
return url
except:
return
#xvtr
def finder36(html,url):
try:
ref = url
id = re.findall("fid=\"(.+?)\".+?</script><script type='text/javascript' src='http://www.xvtr.pw/embed.js'></script>",html)[0]
url = 'http://www.xvtr.pw/channel/%s.htm?referer=%s'%(id,ref)
return url
except:
return
#acestream
def finder37(html,url):
try:
try:
ace = re.findall('this.load(?:Player|Torrent)\((?:\'|\")(.+?)(?:\'|\")',html)[0]
except:
ace = re.findall('"http://torrentstream.net/p/(.+?)"',html)[0]
url = 'plugin://program.plexus/?mode=1&url=%s&name=Video'%(ace)
return url
except:
return
#sopcast
def finder38(html,url):
try:
sop = re.findall("(sop://[^\"\']+)['\"]",html)[0]
url = 'plugin://program.plexus/?mode=2&url=%s&name=Video'%(sop)
return url
except:
return
#turbocast
def finder39(html,url):
try:
url = re.findall('(http://www.turbocast.tv[^\'\"]+)',html)[0]
return url
except:
try:
url = re.findall('(.+?turbocast.tv.+?)',url)[0]
return url
except:
return
#directstream
def finder40(html,url):
try:
ref=url
fid = re.findall('fid=(?:\'|\")(.+?)(?:\'|\").+?</script><script type="text/javascript" src="http://direct-stream.org/embedStream.js"',html)[0]
url = 'http://direct-stream.org/e.php?id=%s&vw=740&vh=490&referer=%s'%(fid,ref)
return url
except:
return
#pxstream
def finder42(html,url):
try:
ref=url
id = re.findall("file=(?:\'|\")(.+?)(?:\'|\");.+?src='http://pxstream.tv/.+?.js",html)[0]
url = 'http://pxstream.tv/embedrouter.php?file=%s&width=730&height=430&jwplayer=flash&referer=%s'%(id,ref)
return url
except:
return
#publishpublish
def finder43(html,url):
try:
ref=url
id = re.findall('fid="(.+?)";.+?</script><script type="text/javascript" src="http://www.pushpublish.tv/js/embed.js"',html)[0]
loc = (urlparse.urlparse(url).netloc).replace('www.','')
url ='http://www.pushpublish.tv/player.php?channel=%s&vw=650&vh=400&domain=%s&referer=%s'%(id,loc,ref)
return url
except:
return
#ucaster
def finder44(html,url):
try:
ref=url
id = re.findall('channel=[\'"]([^\'"]+)[\'"].*?ucaster.(?:eu|com)', html)[0]
url = 'http://www.embeducaster.com/membedplayer/%s/1/595/500?referer=%s'%(id,ref)
return url
except:
return
#rocktv
def finder45(html,url):
try:
ref=url
id = re.findall("fid=[\'\"]([^\'\"]+)[\'\"];.+?src=[\'\"]http://www.rocktv.co/player.+?.js",html)[0]
url = 'http://rocktv.co/embed.php?live=%s&vw=620&vh=490&referer=%s'%(id,ref)
return url
except:
return
#ezcast
def finder46(html,url):
try:
ref=url
id = re.findall("channel=(?:\'|\")(.+?)(?:\'|\").+?src=(?:\'|\")http://www.ezcast.tv/static/scripts/ezcast.js(?:\'|\")>",html)[0]
url = 'http://www.embedezcast.com/embedplayer/%s/1/790/420?referer=%s'%(id,ref)
return url
except:
return
#micast
def finder47(html,url):
try:
ref=url
try:
id = re.findall('micast.tv/.*?\.php\?ch=([^"\']+)',html)[0]
except:
try:
id = re.findall('turbocast.tv/.*?\.php\?ch=([^"]+)',html)[0]
except:
id = re.findall('(?:ca|ch)=(?:\'|\")(.+?)(?:\'|\").+?micast.tv/embed.js(?:\'|\")',html)[0]
url = 'http://micast.tv/iframe.php?ch=%s&referer=%s'%(id,ref)
return url
except:
return
#openlive
def finder48(html,url):
try:
ref=url
id = re.findall("file=(?:\'|\")(.+?)(?:\'|\").+?src=(?:\'|\")http://openlive.org/live.js(?:\'|\")>",html)[0]
url = 'http://openlive.org/embed.php?file=%s&width=640&height=380&referer=%s'%(id,ref)
return url
except:
return
#helper
def finder49(html,url):
try:
ch = re.findall('fid=(?:\'|\")(.+?)(?:\'|\");.+?src=(?:\'|\")http://www.webspor.pw/HD/TV/info/channel.js(?:\'|\")>',html)[0]
url = 'http://worldsport.me/%s'%ch
return find_link(url)
except:
return
#sostart
def finder50(html,url):
try:
ref=url
id = re.findall("id=(?:\'|\")(.+?)(?:\'|\");.+?src=(?:\'|\")http://.+?sostart.([^/]+)/.+?.js(?:\'|\")>",html)[0]
url = 'http://sostart.%s/stream.php?id=%s&width=630&height=450&referer=%s'%(id[1],id[0],ref)
return url
except:
return
#lsh
def finder52(html,url):
try:
ref=url
id = re.findall('fid=(?:\'|\")(.+?)(?:\'|\");.+?src=(?:\'|\")http://cdn.lshstream.com/embed.js(?:\'|\")>')
url = 'http://cdn.lshstream.com/embed.php?u=%s&referer=' + ref
return url
except:
return
#hqstream
def finder53(html,url):
try:
ref=url
id = re.findall('http://hqstream.tv/.+?\?streampage=([^&/ ]+)',html)[0]
url = 'http://hqstream.tv/player.php?streampage=%s&height=480&width=700&referer=%s'%(id,ref)
return url
except:
return
#jw rtmp
def finder54(html,url):
try:
rtmp = re.findall('jwplayer.+?file.?\s*:\s*[\"\']((?:rtmp|http)?://[^\"\']+)[\"\']',html)[0]
return rtmp
except:
return
#tutele
def finder55(html,url):
try:
ref = url
id = re.findall("channel=(?:\'|\")(.+?)(?:\'|\").+?src='http://tutelehd.com/embedPlayer.js'>",html)[0]
url = 'http://tutelehd.com/embed/embed.php?channel=%s&referer=%s'%(id,ref)
return url
except:
return
#janjua
def finder56(html,url):
try:
ref = url
id = re.findall("channel=(?:\'|\")(.+?)(?:\'|\").+?src=(?:\'|\")http://www.janjua.tv/resources/scripts/janjua.js(?:\'|\")>",html)[0]
url = 'http://www.janjua.tv/embedplayer/%s/1/500/400?referer=%s'%(id,ref)
return url
except:
return
#abcast
def finder57(html,url):
try:
ref = url
id = re.findall("file=(?:\'|\")(.+?)(?:\'|\").+?src=(?:\'|\")http://abcast.net/simple.js(?:\'|\")",html)[0]
url = 'http://abcast.net/embed.php?file=%s&referer=%s'%(id,ref)
return url
except:
return
#castfree
def finder58(html,url):
try:
ref = url
id = re.findall('castfree.me/channel.php\?a=(\d+)',html)[0]
url = 'http://www.castfree.me/embed.php?a=%s&id=&width=640&height=460&autostart=true&referer=%s'%(id,ref)
return url
except:
return
#dinozap
def finder59(html,url):
try:
ref = url
url = re.findall('[\"\'](http://(?:www.)?player(?:hd|app)\d+.pw/channel(?:fr)?.php\?file=[^"\']+)',html)[0]
return url + '&referer=' + ref
except:
return
#dinozap
def finder60(html,url):
try:
ref = url
id = re.findall('(?:www\.)?sitenow.me/channel.php\?file=([^"\']+)',html)[0]
return url + 'http://www.sitenow.me/channel.php?file=%s&width=670&height=470&autostart=true&referer=s'%(id,ref)
except:
return
#streamcasttv
def finder61(html,url):
try:
id = re.findall("file=(?:\'|\")(.+?)(?:\'|\");.+?src=(?:\'|\")http://streamcasttv.biz/.+?.js",html)[0]
url ='http://streamcasttv.biz/embed.php?file=%s&referer=%s'%(id,url)
return url
except:
return
#rtmp
def finder63(html,url):
try:
swf = re.findall('src=(?:\'|\")(.+?.swf)',html)[0]
file, rtmp = re.findall('flashvars=(?:\'|\")file=(.+?)&.+?streamer=(.+?)&',html)[0]
url = rtmp + ' playpath=' + file +' swfUrl=' + swf + ' flashver=WIN\\2019,0,0,226 live=true timeout=15 swfVfy=true pageUrl=' + url
return url
except:
return
def finder64(html,url):
try:
url = re.findall('(http://vaughnlive.tv/embed/video/[^/\'"?&\s]+)',html)[0]
return url
except:
return
def finder65(html,url):
try:
referer = url
url = re.findall('src=(?:\'|\")(.+?)(?:\'|\").+?type="video/mp4"',html)[0]
if len(url)<10:
raise
url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': referer})
return url
except:
return
#hdcast.org
def finder66(html,url):
try:
ref = url
id,id2 = re.findall('fid="(.+?)";.+?src="http://hdcast.org/(.+?).js">',html)[0]
url = 'http://www.hdcast.org/%s.php?u=%s&vw=854&vh=480&domain=%s&referer=%s'%(id2,id,urlparse.urlparse(ref).netloc,ref)
return url
except:
return
#serbiaplus
def finder67(html,url):
try:
if 'serbiaplus' not in url:
return
id = re.findall('fid="(.+?)";.+?src="/live.js"',html)[0]
url = 'http://serbiaplus.com/' + id
resolved = find_link(url)
return resolved
except:
pass
#streamking
def finder68(html,url):
try:
ref = url
url = re.findall('(http://streamking.cc/[^"\']+)(?:\'|\")',html)[0]
return url+'&referer=%s'%ref
except:
return
#beba
def finder69(html,url):
try:
url = re.findall('http://beba.ucoz.com/playerlive.html\?id=(.+?)$',url)[0]
return find_link(url)
except:
return
#stream-sports
def finder70(html,url):
try:
ref = url
url = re.findall('http://www.stream\-sports.eu/uploads/video.html\?id=(.+?)$',url)[0]
return url+'&referer=%s'%ref
except:
return
#ustream
def finder71(html,url):
try:
ref=url
url=re.findall('(https?://(?:www.)?ustream.tv/embed/.+?[^\'\"]+)',html)[0]
url+='&referer='+ref
return url
except:
return
#config finder
def finder72(html,ref):
try:
url = re.findall('src\s*:\s*\'(.+?(?:.m3u8)?)\'',html)[0]
if 'images/' in url:
return
url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref})
return url
except:
pass
#config finder
def finder73(html,url):
try:
ref = url
url = re.findall('Player\(\{\n\s*source\:\s*[\'\"](.+?)[\'\"]\,',html)[0]
url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref})
if 'ace/manifest' in url:
url = finder102(html,url)
return url
except:
return
#cast4u
def finder74(html,url):
try:
ref = url
id = re.findall('id=[\'\"](.+?)[\'\"].+?src=[\'\"]http://www.cast4u.tv/.+?.js',html)[0]
url = 'http://www.cast4u.tv/embed.php?live=%s&vw=620&vh=490&referer=%s'%(id,ref)
return url
except:
return
#m3u8 config finder
def finder75(html,url):
try:
ref = url
url = re.findall('file: window.atob\(\'(.+?)\'\),', html)[0]
file = base64.b64decode(url)
file += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref, 'X-Requested-With':constants.get_shockwave(), 'Host':urlparse.urlparse(file).netloc, 'Connection':'keep-alive','Accept':'*/*'})
return file
except:
return
#direct stream 2nd finder
def finder76(html,url):
ref = url
try:
id = re.findall('fid=[\"\'](.+?)[\"\'];.+?data-rocketsrc="http://direct-stream.org/.+?.js',html)[0]
url ="http://direct-stream.org/e.php?id=%s&vw=700&vh=400&referer=%s"%(id,ref)
return url
except:
return
#zona priority
def finder77(html,url):
try:
html = urllib.unquote(html)
url = finder4(html,url)
if client.request(url) != None:
return url
return
except:
return
#weplayer
def finder78(html,url):
try:
id = re.findall("id=['\"](.+?)['\"];.+?src=['\"]http://weplayer.([^/]+)/.+?.js([^\s]+)",html)[0]
url = 'http://weplayer.%s/stream.php?id=%s&width=640&height=480&referer=%s'%(id[1],id[0],url)
if '-->' in id[2]:
return
return find_link(url)
except:
return
def finder79(html,url):
try:
ref = url
url = re.findall("playStream\('hls', '(.+?)'",html)[0]
url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref, 'X-Requested-With':constants.get_shockwave(), 'Host':urlparse.urlparse(url).netloc, 'Connection':'keep-alive','Accept':'*/*'})
return url
except:
return
#tvope
def finder80(html,ref):
try:
id = re.findall('c="(.+?)";.+?</script>\s*<script.+?src="http://i.tvope.com/js/.+?.js',html)[0]
url = 'http://tvope.com/emb/player.php?c=%s&w=700&h=480&referer=%s&d=www.popofthestreams.xyz'%(id,ref)
return url
except:
return
#dinozap
def finder81(html,url):
try:
ref = url
url = re.findall('[\"\'](https?://(?:www\.)?dinozap.info/redirect/channel.php\?id=[^"\']+)',html)[0]
return url + '&referer=' + ref
except:
return
#dinozap
def finder82(html,url):
try:
ref = url
url = re.findall('[\"\'](https?://(?:www\.)?tv.verdirectotv.org/channel.php\?file=[^"\']+)',html)[0]
return url + '&referer=' + ref
except:
return
#dinozap
def finder83(html,url):
try:
ref = url
url = re.findall('[\"\'](https?://(?:www\.)?dinostream.pw/channel.php\?file=[^"\']+)',html)[0]
return url + '&referer=' + ref
except:
return
#dinozap
def finder84(html,url):
try:
ref = url
url = re.findall('[\"\'](https?://(?:www\.)?(?:serverhd.eu|cast3d.me)/channel\w*\.php\?file=[^"\']+)',html)[0]
return url + '&referer=' + ref
except:
return
#dinozap
def finder85(html,url):
try:
ref = url
url = re.findall('[\"\'](https?://(?:www\.)?sstream.pw/channel.php\?file=[^"\']+)',html)[0]
return url + '&referer=' + ref
except:
return
#dinozap
def finder86(html,url):
try:
ref = url
url = re.findall('[\"\'](https?://(?:www\.)?ponlatv.com/channel.php\?file=[^"\']+)',html)[0]
return url + '&referer=' + ref
except:
return
#acestream
def finder90(html,ref):
try:
url = re.findall('(acestream://[^"\']+)["\']',html)[0]
return url
except:
return
#sopcast
def finder91(html,ref):
try:
url = re.findall('(sop://[^"\']+)["\']',html)[0]
return url
except:
return
#shadownet
def finder92(html,ref):
try:
url = re.findall('src=[\"\']([^\"\']+)[\"\'].+?mpeg',html)[0]
if 'rtmp' in url:
url+=' swfUrl=http://www.shadow-net.biz/javascript/videojs/flashls/video-js.swf flashver=%s live=true timeout=18 swfVfy=1 pageUrl=http://www.shadow-net.biz/'%FLASH
elif 'm3u8' in url:
url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref, 'X-Requested-With':constants.get_shockwave(), 'Host':urlparse.urlparse(url).netloc, 'Connection':'keep-alive','Accept':'*/*', 'Origin':'http://shadow.go.ro'})
return url
except:
return
#filmon
def finder93(html,ref):
try:
id = re.findall('filmon.(?:com|tv)/tv/channel/export\?channel_id=(\d+)',html)[0]
url = 'http://www.filmon.com/channel/' + id
return url
except:
return
#castto
def finder94(html,ref):
try:
id = re.findall('fid=["\'](.+?)["\'];.+?src=["\'](http://static.castto.me/js/.+?.js)', html)[0]
url = id[1]+'?id=%s&referer=%s'%(id[0],ref)
return url
except:
return
#redirect
def finder95(html,url):
try:
url = re.findall('<meta http-equiv="refresh".+?; url=(.+?)"',html)[0]
return find_link(url)
except:
return
#acelive
def finder96(html,url):
try:
url = re.findall('[\"\'](.+?.acelive.+?)[\"\']',html)[0]
return url
except:
return
#castasap
def finder97(html,url):
try:
ref = url
import requests
html = requests.get(url).text
chars = re.findall('&#(\d+)',html)
for c in chars:
html = html.replace('&#%s'%c, chr(int(c)))
html = html.replace(';','')
url = re.findall('src=[\"\'](http://www.(?:castasap|castflash|flashlive|fastflash).pw/embed.+?)[\"\']',html)[0]
url = add_args(url,{'referer':ref})
return url
except:
return
#deltatv
def finder98(html,ref):
try:
x,y = re.findall('id=[\'\"](.+?)[\'\"].+?src=[\'\"]http://deltatv.([^/]+)/.+?.js',html)[0]
url = 'http://deltatv.%s/stream.php?id=%s&width=640&height=480&referer=%s'%(y,x,ref)
return url
except:
return
#hdcast.info
def finder99(html,ref):
try:
id,rr = re.findall('fid=[\'\"](.+?)[\'\"].+?src=[\'\"]http://(?:www.)?hdcast.info/([^\.]+).js',html)[0]
url = 'http://www.hdcast.info/%s.php?live=%s&vw=620&vh=490&referer=%s'%(rr,id,ref)
return url
except:
return
#deltatv
def finder100(html,ref):
try:
url = re.findall('(http://deltatv.(?:pw|xyz)?/stream.php\?.+?[^"\']+)',html)[0]
url = url + '&referer=' + ref
return url
except:
return
#mybest
def finder103(html,ref):
try:
url = re.findall('(http://mybeststream.xyz.+?[^"\']+)',html)[0]
url = url + '&referer=' + ref
return url
except:
return
#blowfish decrypt
def finder100(html,ref):
try:
if 'Blowfish' not in html:
return
key = re.findall('new Blowfish\([\"\'](.+?)[\"\']\)',html)[0]
if len(key)>56:
key=key[:56]
crypted = re.findall('.decrypt\([\"\'](.+?)[\"\']\)',html)[0].decode("hex")
from modules import blowfish
cipher = blowfish.Blowfish(key)
decrypted = cipher.decrypt(crypted)
return find_link(ref,html=decrypted)
except:
return
#theactionlive
def finder101(html,ref):
try:
id = re.findall('id=[\"\'](.+?)[\"\'];.+?src=[\"\']http://theactionlive.com.+?.js',html)[0]
url = 'http://theactionlive.com?id=%s&referer=%s'%(id,ref)
return url
except:
return
#acestream
def finder102(html,ref):
try:
url = 'acestream://' + re.findall('ace/manifest.m3u8\?id\=([^\'\"]+)[\'\"]',url)[0]
return url
except:
return
#kolstg
def finder105(html,ref):
try:
id = re.findall('fid=["\'](.+?)["\'];.+?src=["\']http://(?:www.)?kolstg.pw/.+?.js', html)[0]
url = 'http://www.hornos.moy.su/channel/'+ id+'.htm?referer=' + ref
return url
except:
return
#mips
def finder106(html,ref):
try:
try:
ch,e = re.findall('channel=[\'\"](.+?)[\'\"]\s*,\s*e=[\'\"](.+?)[\'\"].+?src=[\'\"]http://(?:www.)?mipsplayer.com/.+?.js',html)[0]
except:
e,ch = re.findall('[,\s]e=[\'\"](.+?)[\'\"]\s*,\s*channel=[\'\"](.+?)[\'\"].+?src=[\'\"]http://(?:www.)?mipsplayer.com/.+?.js',html)[0]
url = 'http://www.mipsplayer.com/membedplayer/'+ch+'/'+e+'/675/400?referer=' + ref
return url
except:
return
#m3u8
def finder107(html,ref):
try:
m3u8 = re.findall('playlist_url:\s*[\"\']([^\"\']+)',html)[0]
host = re.findall('cdn_host:\s*[\"\']([^\"\']+)',html)[0]
url = 'http://' + host + m3u8
url+='|%s' % urllib.urlencode({'Referer':ref, 'User-agent':client.agent()})
return url
except:
return
#streamsus
def finder108(html,ref):
try:
url = re.findall('Watch Live\s*<a href=[\"\'](.+?)[\"\']>Here',html)[0]
return find_link(url)
except:
return
#f4m
def finder109(html,ref):
try:
f4m = re.findall('name=[\"\']flashvars[\"\'].+?value=[\"\']src=([^&]+)&',html)[0]
url = urllib.unquote(f4m)
return url
except:
return
return
#zona4vip
def finder110(html,ref):
try:
if 'zona4vip' not in ref:
return
fid = re.findall('fid=[\"\'](.+?)[\"\'].+?src=[\"\']/live.js',html)[0]
url = 'http://www.zona4vip.com/'+ fid
return find_link(url)
except:
return
#veetle livetvcdn
def finder111(html,ref):
try:
id = re.findall('veetle&c=([^&]+)',ref)[0]
url = 'http://veetle.com/v/' + id
return url
except:
return
#vlc new
def finder112(html,ref):
try:
url = re.findall('version=[\"\']VideoLAN.VLCPlugin.2[\"\'].+?target=[\"\']([^\"\']+)',html)[0]
return url
except:
return
#lsh stream embed
def finder113(html,ref):
try:
fid = re.findall('fid=[\"\'](.+?)[\"\'].+?src=[\"\'].+?lshstream.com/embed.js',html)[0]
loc = urlparse.urlparse(ref).netloc
url = 'http://www.lshstream.com/embed.php?u=%s&vw=640&vh=360&domain=%s'%(fid,loc)
return find_link(url)
except:
return
#castamp
def finder114(html,ref):
try:
fid = re.findall('channel=[\"\'](.+?)[\"\'].+?src=[\"\'].+?castamp.com/embed.js',html)[0]
url = 'http://castamp.com/embed.php?c=%s&vwidth=640&vheight=380&referer=%s'%(fid,ref)
return url
except:
return
#bro.adca.st
def finder115(html,ref):
try:
id = re.findall('id=[\"\'](.+?)[\"\'].+?src=[\"\'].+?bro.adca.st/.+?.js',html)[0]
url = 'http://bro.adca.st/stream.php?id='+id+'&width=640&height=460&referer=' + ref + '&stretching=uniform'
return url
except:
try:
url = re.findall('(http://bro.adca.st/stream.php[^\"\']+)',html)[0]
url = url + '&referer=' + ref
return url
except:
return
#akamai rtmpe
def finder116(html,ref):
if 'akamai' in ref:
html = decryptionUtils.doDemystify(html)
swf,streamer,file,token = re.findall('flashplayer:[\"\']([^\"\']+)[\"\'],streamer:[\"\']([^\"\']+)[\"\'],file:[\"\']([^\"\']+)[\"\'],token:[\"\']([^\"\']+)[\"\']',html)[0]
swf = 'http://akamaistreaming.com/' + swf
url = '%s playpath=%s token=%s swfUrl=%s pageUrl=%s flashver=%s'%(streamer,file,token,swf,ref,constants.flash_ver())
return url
#zunox stream
def finder117(html,ref):
if 'zunox' in ref:
url = 'http://zunox.hk/players/' + re.findall('(proxy.php\?id=[^\"\']+)',html)[0]
h2 = client.request(url)
import json
j = json.loads(h2)
host = urlparse.urlparse(j['url']).netloc.split(':')[0].replace(':80','')
url = j['url'].replace(':80','') +'.flv' + '|%s' % urllib.urlencode({'User-agent':client.agent(),'X-Requested-With':constants.get_shockwave(),'Referer':ref, 'Host':host, 'Connection':'keep-alive','Accept-Encodeing':'gzip, deflate, lzma, sdch'})
return url
#sportstream365
def finder118(html,ref):
try:
try:
id = re.findall('"sportstream365.com.+?game=(\d+)',html)[0]
except:
id = re.findall('"sportstream365.com.+?game=(\d+)',ref)[0]
return 'http://sportstream365.com/?game=%s&referer=%s'%(id,ref)
except:
return
#cndhls
def finder119(html,ref):
try:
id = re.findall('id=[\"\'](.+?)[\"\'].+?src=[\"\'].+?cndhls.+?.js',html)[0]
d = (urlparse.urlparse(ref).netloc).replace('www.','')
url = 'http://www.cndhlsstream.pw/embed.php?channel='+id+'&vw=640&vh=385&domain=' + d + '&referer=' + ref
return url
except:
return
#superplayer
def finder120(html,ref):
try:
id = re.findall("id=['\"](.+?)['\"];.+?src=['\"].+?superplayer.+?.js",html)[0]
url = 'http://nowlive.xyz/embed.php?id=%s&width=640&height=480&referer=%s'%(id,ref)
if '-->' in id[2]:
return
return find_link(url)
except:
return
#scity
def finder121(html,url):
try:
ref=url
id = re.findall("id=(?:\'|\")(.+?)(?:\'|\");.+?src.+?scity.tv.+?.js",html)[0]
url = 'http://scity.tv/stream.php?id=%s&width=630&height=450&referer=%s'%(id,ref)
return url
except:
return
def finder123(html,ref):
try:
url = re.findall('mpegurl.+?src=[\"\']([^\"\']+)[\"\']',html)[0]
return url + '|%s' % urllib.urlencode({'Referer':ref,'X-Requested-With':constants.get_shockwave(),'User-agent':client.agent()})
except:
return
#streamify
def finder124(html,url):
try:
ref=url
id = re.findall("channel=[\"\']([^\"\']+)[\"\'].+?src.+?streamifyplayer.com.+?.js",html)[0]
url = 'http://www.streamifyplayer.com/embedplayer/%s/1/620/430?referer=%s'%(id,ref)
return url
except:
return
#youtube live
def finder125(html,url):
try:
if 'youtube-live' in html:
url = re.findall("(https?://(?:www.)?youtube.com/[^\"\']+)",html)[0]
return url
except:
return
#streamp2p
def finder126(html,url):
try:
url = re.findall('(http://(?:www.)?streamp2p.com[^\"\']+)[\"\']',html)[0]
return url
except:
return
def finder127(html,url):
try:
try:
html = urllib.unquote(html)
except:
pass
url = re.findall('src=(http.+?m3.+?[^&]+)&',html)[0]
if 'amis' in url:
url = url.strip() +'|User-Agent=Mozilla/5.0'
return url.strip()
except:
return
#akamaistreaming
def finder128(html,ref):
try:
id = re.findall("id=['\"](.+?)['\"].+?src=['\"].+?akamaistreaming.+?.js",html)[0]
url = 'http://akamaistreaming.com/zn.php?id=%s&width=640&height=385&referer=%s'%(id,ref)
return url
except:
return
def finder129(html,ref):
try:
id = re.findall("id=['\"](.+?)['\"].+?src=['\"].+?akamaistreaming.+?.js",html)[0]
url = 'http://akamaistreaming.com/zn.php?id=%s&width=640&height=385&referer=%s'%(id,ref)
return url
except:
return
| apache-2.0 | -6,076,613,945,339,557,000 | 28.768016 | 252 | 0.525224 | false |
jordanaluft/gnome-music | gnomemusic/widgets.py | 1 | 34979 | # Copyright (c) 2013 Vadim Rutkovsky <[email protected]>
# Copyright (c) 2013 Shivani Poddar <[email protected]>
# Copyright (c) 2013 Arnel A. Borja <[email protected]>
# Copyright (c) 2013 Seif Lotfy <[email protected]>
# Copyright (c) 2013 Sai Suman Prayaga <[email protected]>
# Copyright (c) 2013 Jackson Isaac <[email protected]>
# Copyright (c) 2013 Felipe Borges <[email protected]>
# Copyright (c) 2013 Giovanni Campagna <[email protected]>
# Copyright (c) 2013 Guillaume Quintard <[email protected]>
#
# GNOME Music is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# GNOME Music is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with GNOME Music; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The GNOME Music authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and GNOME Music. This permission is above and beyond the permissions
# granted by the GPL license by which GNOME Music is covered. If you
# modify this code, you may extend this exception to your version of the
# code, but you are not obligated to do so. If you do not wish to do so,
# delete this exception statement from your version.
from gi.repository import Gtk, Gdk, Gd, GLib, GObject, Pango, Gio
from gi.repository import GdkPixbuf, Grl
from gettext import gettext as _, ngettext
from gnomemusic.grilo import grilo
from gnomemusic.albumArtCache import AlbumArtCache
from gnomemusic.player import DiscoveryStatus
from gnomemusic.playlists import Playlists, StaticPlaylists
from gnomemusic import log
import logging
logger = logging.getLogger(__name__)
ALBUM_ART_CACHE = AlbumArtCache.get_default()
NOW_PLAYING_ICON_NAME = 'media-playback-start-symbolic'
ERROR_ICON_NAME = 'dialog-error-symbolic'
try:
settings = Gio.Settings.new('org.gnome.Music')
MAX_TITLE_WIDTH = settings.get_int('max-width-chars')
except Exception as e:
MAX_TITLE_WIDTH = 20
logger.error("Error on setting widget max-width-chars: %s", str(e))
playlists = Playlists.get_default()
class StarHandler():
def __repr__(self):
return '<StarHandler>'
@log
def __init__(self, parent, star_index):
self.star_index = star_index
self.star_renderer_click = False
self.parent = parent
@log
def _add_star_renderers(self, list_widget, cols, hidden=False):
star_renderer = CellRendererClickablePixbuf(self.parent.view, hidden=hidden)
star_renderer.connect("clicked", self._on_star_toggled)
list_widget.add_renderer(star_renderer, lambda *args: None, None)
cols[0].clear_attributes(star_renderer)
cols[0].add_attribute(star_renderer, 'show_star', self.star_index)
@log
def _on_star_toggled(self, widget, path):
try:
_iter = self.parent.model.get_iter(path)
except TypeError:
return
try:
if self.parent.model.get_value(_iter, 9) == 2:
return
except AttributeError:
return
new_value = not self.parent.model.get_value(_iter, self.star_index)
self.parent.model.set_value(_iter, self.star_index, new_value)
song_item = self.parent.model.get_value(_iter, 5)
grilo.toggle_favorite(song_item) # toggle favorite status in database
playlists.update_static_playlist(StaticPlaylists.Favorites)
# Use this flag to ignore the upcoming _on_item_activated call
self.star_renderer_click = True
class AlbumWidget(Gtk.EventBox):
tracks = []
duration = 0
loadingIcon = ALBUM_ART_CACHE.get_default_icon(256, 256, True)
noArtworkIcon = ALBUM_ART_CACHE.get_default_icon(256, 256, False)
def __repr__(self):
return '<AlbumWidget>'
@log
def __init__(self, player, parentview):
Gtk.EventBox.__init__(self)
self.player = player
self.iterToClean = None
self.parentview = parentview
self.ui = Gtk.Builder()
self.ui.add_from_resource('/org/gnome/Music/AlbumWidget.ui')
self._create_model()
self.view = Gd.MainView(
shadow_type=Gtk.ShadowType.NONE
)
self.view.set_view_type(Gd.MainViewType.LIST)
self.album = None
self.header_bar = None
self.view.connect('item-activated', self._on_item_activated)
view_box = self.ui.get_object('view')
self.ui.get_object('scrolledWindow').set_placement(Gtk.CornerType.
TOP_LEFT)
self.view.connect('selection-mode-request', self._on_selection_mode_request)
child_view = self.view.get_children()[0]
child_view.set_margin_top(64)
child_view.set_margin_bottom(64)
child_view.set_margin_end(32)
self.view.remove(child_view)
view_box.add(child_view)
self.add(self.ui.get_object('AlbumWidget'))
self.star_handler = StarHandler(self, 9)
self._add_list_renderers()
self.get_style_context().add_class('view')
self.get_style_context().add_class('content-view')
self.view.get_generic_view().get_style_context().remove_class('view')
self.show_all()
@log
def _on_selection_mode_request(self, *args):
self.header_bar._select_button.clicked()
@log
def _on_item_activated(self, widget, id, path):
if self.star_handler.star_renderer_click:
self.star_handler.star_renderer_click = False
return
_iter = self.model.get_iter(path)
if self.model.get_value(_iter, 10) != DiscoveryStatus.FAILED:
if (self.iterToClean and self.player.playlistId == self.album):
item = self.model.get_value(self.iterToClean, 5)
title = AlbumArtCache.get_media_title(item)
self.model.set_value(self.iterToClean, 0, title)
# Hide now playing icon
self.model.set_value(self.iterToClean, 6, False)
self.player.set_playlist('Album', self.album, self.model, _iter, 5, 11)
self.player.set_playing(True)
@log
def _add_list_renderers(self):
list_widget = self.view.get_generic_view()
cols = list_widget.get_columns()
cols[0].set_min_width(100)
cols[0].set_max_width(200)
cells = cols[0].get_cells()
cells[2].set_visible(False)
cells[1].set_visible(False)
now_playing_symbol_renderer = Gtk.CellRendererPixbuf(xpad=0,
xalign=0.5,
yalign=0.5)
column_now_playing = Gtk.TreeViewColumn()
column_now_playing.set_fixed_width(48)
column_now_playing.pack_start(now_playing_symbol_renderer, False)
column_now_playing.set_cell_data_func(now_playing_symbol_renderer,
self._on_list_widget_icon_render, None)
list_widget.insert_column(column_now_playing, 0)
type_renderer = Gd.StyledTextRenderer(
xpad=16,
ellipsize=Pango.EllipsizeMode.END,
xalign=0.0
)
list_widget.add_renderer(type_renderer, lambda *args: None, None)
cols[0].clear_attributes(type_renderer)
cols[0].add_attribute(type_renderer, 'markup', 0)
durationRenderer = Gd.StyledTextRenderer(
xpad=16,
ellipsize=Pango.EllipsizeMode.END,
xalign=1.0
)
durationRenderer.add_class('dim-label')
list_widget.add_renderer(durationRenderer, lambda *args: None, None)
cols[0].clear_attributes(durationRenderer)
cols[0].add_attribute(durationRenderer, 'markup', 1)
self.star_handler._add_star_renderers(list_widget, cols)
def _on_list_widget_icon_render(self, col, cell, model, _iter, data):
if not self.player.currentTrackUri:
cell.set_visible(False)
return
if model.get_value(_iter, 10) == DiscoveryStatus.FAILED:
cell.set_property('icon-name', ERROR_ICON_NAME)
cell.set_visible(True)
elif model.get_value(_iter, 5).get_url() == self.player.currentTrackUri:
cell.set_property('icon-name', NOW_PLAYING_ICON_NAME)
cell.set_visible(True)
else:
cell.set_visible(False)
@log
def _create_model(self):
self.model = Gtk.ListStore(
GObject.TYPE_STRING, # title
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GdkPixbuf.Pixbuf, # icon
GObject.TYPE_OBJECT, # song object
GObject.TYPE_BOOLEAN, # item selected
GObject.TYPE_STRING,
GObject.TYPE_BOOLEAN,
GObject.TYPE_INT, # icon shown
GObject.TYPE_BOOLEAN,
GObject.TYPE_INT
)
@log
def update(self, artist, album, item, header_bar, selection_toolbar):
self.selection_toolbar = selection_toolbar
self.header_bar = header_bar
self.album = album
real_artist = item.get_string(Grl.METADATA_KEY_ARTIST)\
or item.get_author()\
or _("Unknown Artist")
self.ui.get_object('cover').set_from_pixbuf(self.loadingIcon)
ALBUM_ART_CACHE.lookup(item, 256, 256, self._on_look_up, None, real_artist, album)
self.duration = 0
self._create_model()
GLib.idle_add(grilo.populate_album_songs, item, self.add_item)
header_bar._select_button.connect(
'toggled', self._on_header_select_button_toggled)
header_bar._cancel_button.connect(
'clicked', self._on_header_cancel_button_clicked)
self.view.connect('view-selection-changed',
self._on_view_selection_changed)
self.view.set_model(self.model)
escaped_artist = GLib.markup_escape_text(artist)
escaped_album = GLib.markup_escape_text(album)
self.ui.get_object('artist_label').set_markup(escaped_artist)
self.ui.get_object('title_label').set_markup(escaped_album)
if (item.get_creation_date()):
self.ui.get_object('released_label_info').set_text(
str(item.get_creation_date().get_year()))
else:
self.ui.get_object('released_label_info').set_text('----')
self.player.connect('playlist-item-changed', self.update_model)
@log
def _on_view_selection_changed(self, widget):
items = self.view.get_selection()
self.selection_toolbar\
._add_to_playlist_button.set_sensitive(len(items) > 0)
if len(items) > 0:
self.header_bar._selection_menu_label.set_text(
ngettext("Selected %d item", "Selected %d items", len(items)) % len(items))
else:
self.header_bar._selection_menu_label.set_text(_("Click on items to select them"))
@log
def _on_header_cancel_button_clicked(self, button):
self.view.set_selection_mode(False)
self.header_bar.set_selection_mode(False)
self.header_bar.header_bar.title = self.album
@log
def _on_header_select_button_toggled(self, button):
if button.get_active():
self.view.set_selection_mode(True)
self.header_bar.set_selection_mode(True)
self.player.actionbar.set_visible(False)
self.selection_toolbar.actionbar.set_visible(True)
self.selection_toolbar._add_to_playlist_button.set_sensitive(False)
self.header_bar.header_bar.set_custom_title(self.header_bar._selection_menu_button)
else:
self.view.set_selection_mode(False)
self.header_bar.set_selection_mode(False)
self.header_bar.title = self.album
self.selection_toolbar.actionbar.set_visible(False)
if(self.player.get_playback_status() != 2):
self.player.actionbar.set_visible(True)
@log
def add_item(self, source, prefs, track, remaining, data=None):
if track:
self.tracks.append(track)
self.duration = self.duration + track.get_duration()
_iter = self.model.append()
escapedTitle = AlbumArtCache.get_media_title(track, True)
self.model.set(_iter,
[0, 1, 2, 3, 4, 5, 9],
[escapedTitle,
self.player.seconds_to_string(
track.get_duration()),
'', '', None, track, bool(track.get_lyrics())])
self.ui.get_object('running_length_label_info').set_text(
_("%d min") % (int(self.duration / 60) + 1))
@log
def _on_look_up(self, pixbuf, path, data=None):
_iter = self.iterToClean
if not pixbuf:
pixbuf = self.noArtworkIcon
self.ui.get_object('cover').set_from_pixbuf(pixbuf)
if _iter:
self.model.set(_iter, [4], [pixbuf])
@log
def update_model(self, player, playlist, currentIter):
# self is not our playlist, return
if (playlist != self.model):
return False
currentSong = playlist.get_value(currentIter, 5)
song_passed = False
_iter = playlist.get_iter_first()
self.duration = 0
while _iter:
song = playlist.get_value(_iter, 5)
self.duration += song.get_duration()
escapedTitle = AlbumArtCache.get_media_title(song, True)
if (song == currentSong):
title = '<b>%s</b>' % escapedTitle
song_passed = True
elif (song_passed):
title = '<span>%s</span>' % escapedTitle
else:
title = '<span color=\'grey\'>%s</span>' % escapedTitle
playlist.set_value(_iter, 0, title)
_iter = playlist.iter_next(_iter)
self.ui.get_object('running_length_label_info').set_text(
_("%d min") % (int(self.duration / 60) + 1))
return False
# @log
# def _on_star_toggled(self, widget, path):
# try:
# _iter = self.model.get_iter(path)
# except TypeError:
# return
# new_value = not self.model.get_value(_iter, 10)
# self.model.set_value(_iter, 10, new_value)
# song_item = self.model.get_value(_iter, 5) # er, will this definitely return MediaAudio obj.?
# grilo.toggle_favorite(song_item) # toggle favorite status in database
# playlists.update_static_playlist(StaticPlaylists.Favorites)
# # Use this flag to ignore the upcoming _on_item_activated call
# self.star_renderer_click = True
class ArtistAlbums(Gtk.Box):
def __repr__(self):
return '<ArtistAlbums>'
@log
def __init__(self, artist, albums, player,
header_bar, selection_toolbar, window, selectionModeAllowed=False):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.VERTICAL)
self.player = player
self.artist = artist
self.albums = albums
self.window = window
self.selectionMode = False
self.selectionModeAllowed = selectionModeAllowed
self.selection_toolbar = selection_toolbar
self.header_bar = header_bar
self.ui = Gtk.Builder()
self.ui.add_from_resource('/org/gnome/Music/ArtistAlbumsWidget.ui')
self.set_border_width(0)
self.ui.get_object('artist').set_label(self.artist)
self.widgets = []
self.model = Gtk.ListStore(GObject.TYPE_STRING, # title
GObject.TYPE_STRING,
Gtk.Image,
GObject.TYPE_BOOLEAN, # icon shown
GObject.TYPE_STRING, # icon
GObject.TYPE_OBJECT, # song object
GObject.TYPE_BOOLEAN,
GObject.TYPE_INT
)
self.row_changed_source_id = None
self._hbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self._albumBox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL,
spacing=48)
self._scrolledWindow = Gtk.ScrolledWindow()
self._scrolledWindow.set_policy(
Gtk.PolicyType.NEVER,
Gtk.PolicyType.AUTOMATIC)
self._scrolledWindow.add(self._hbox)
self._hbox.pack_start(self.ui.get_object('ArtistAlbumsWidget'),
False, False, 0)
self._hbox.pack_start(self._albumBox, False, False, 16)
self._coverSizeGroup = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
self._songsGridSizeGroup = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
self.pack_start(self._scrolledWindow, True, True, 0)
self.hide()
self.window._init_loading_notification()
for album in albums:
is_last_album = False
if album == albums[-1]:
is_last_album = True
self.add_album(album, is_last_album)
self.player.connect('playlist-item-changed', self.update_model)
def _on_last_album_displayed(self, data=None):
self.window.notification.dismiss()
self.show_all()
@log
def add_album(self, album, is_last_album=False):
self.window.notification.set_timeout(0)
widget = ArtistAlbumWidget(
self.artist, album, self.player, self.model,
self.header_bar, self.selectionModeAllowed
)
self._coverSizeGroup.add_widget(widget.cover)
self._songsGridSizeGroup.add_widget(widget.songsGrid)
self._albumBox.pack_start(widget, False, False, 0)
self.widgets.append(widget)
if is_last_album:
widget.connect('tracks-loaded', self._on_last_album_displayed)
@log
def update_model(self, player, playlist, currentIter):
# this is not our playlist, return
if playlist != self.model:
# TODO, only clean once, but that can wait util we have clean
# the code a bit, and until the playlist refactoring.
# the overhead is acceptable for now
self.clean_model()
return False
currentSong = playlist.get_value(currentIter, 5)
song_passed = False
itr = playlist.get_iter_first()
while itr:
song = playlist.get_value(itr, 5)
song_widget = song.song_widget
if not song_widget.can_be_played:
itr = playlist.iter_next(itr)
continue
escapedTitle = AlbumArtCache.get_media_title(song, True)
if (song == currentSong):
song_widget.now_playing_sign.show()
song_widget.title.set_markup('<b>%s</b>' % escapedTitle)
song_passed = True
elif (song_passed):
song_widget.now_playing_sign.hide()
song_widget.title.set_markup('<span>%s</span>' % escapedTitle)
else:
song_widget.now_playing_sign.hide()
song_widget.title\
.set_markup('<span color=\'grey\'>%s</span>' % escapedTitle)
itr = playlist.iter_next(itr)
return False
@log
def clean_model(self):
itr = self.model.get_iter_first()
while itr:
song = self.model.get_value(itr, 5)
song_widget = song.song_widget
escapedTitle = AlbumArtCache.get_media_title(song, True)
if song_widget.can_be_played:
song_widget.now_playing_sign.hide()
song_widget.title.set_markup('<span>%s</span>' % escapedTitle)
itr = self.model.iter_next(itr)
return False
@log
def set_selection_mode(self, selectionMode):
if self.selectionMode == selectionMode:
return
self.selectionMode = selectionMode
try:
if self.row_changed_source_id:
self.model.disconnect(self.row_changed_source_id)
self.row_changed_source_id = self.model.connect('row-changed', self._model_row_changed)
except Exception as e:
logger.warning("Exception while tracking row-changed: %s", e)
for widget in self.widgets:
widget.set_selection_mode(selectionMode)
@log
def _model_row_changed(self, model, path, _iter):
if not self.selectionMode:
return
selected_items = 0
for row in model:
if row[6]:
selected_items += 1
self.selection_toolbar\
._add_to_playlist_button.set_sensitive(selected_items > 0)
if selected_items > 0:
self.header_bar._selection_menu_label.set_text(
ngettext("Selected %d item", "Selected %d items", selected_items) % selected_items)
else:
self.header_bar._selection_menu_label.set_text(_("Click on items to select them"))
class AllArtistsAlbums(ArtistAlbums):
def __repr__(self):
return '<AllArtistsAlbums>'
@log
def __init__(self, player, header_bar, selection_toolbar, selectionModeAllowed=False):
ArtistAlbums.__init__(self, _("All Artists"), [], player,
header_bar, selection_toolbar, selectionModeAllowed)
self._offset = 0
self._populate()
@log
def _populate(self, data=None):
if grilo.tracker:
GLib.idle_add(grilo.populate_albums,
self._offset, self.add_item)
@log
def add_item(self, source, param, item, remaining=0, data=None):
if remaining == 0:
self._on_last_album_displayed()
if item:
self._offset += 1
self.add_album(item)
class ArtistAlbumWidget(Gtk.Box):
__gsignals__ = {
'tracks-loaded': (GObject.SignalFlags.RUN_FIRST, None, ()),
}
loadingIcon = AlbumArtCache.get_default().get_default_icon(128, 128, True)
noArtworkIcon = ALBUM_ART_CACHE.get_default_icon(128, 128, False)
def __repr__(self):
return '<ArtistAlbumWidget>'
@log
def __init__(self, artist, album, player, model, header_bar, selectionModeAllowed):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.HORIZONTAL)
self.player = player
self.album = album
self.artist = artist
self.model = model
self.model.connect('row-changed', self._model_row_changed)
self.header_bar = header_bar
self.selectionMode = False
self.selectionModeAllowed = selectionModeAllowed
self.songs = []
self.ui = Gtk.Builder()
self.ui.add_from_resource('/org/gnome/Music/ArtistAlbumWidget.ui')
GLib.idle_add(self._update_album_art)
self.cover = self.ui.get_object('cover')
self.cover.set_from_pixbuf(self.loadingIcon)
self.songsGrid = self.ui.get_object('grid1')
self.ui.get_object('title').set_label(album.get_title())
if album.get_creation_date():
self.ui.get_object('year').set_markup(
'<span color=\'grey\'>(%s)</span>' %
str(album.get_creation_date().get_year())
)
self.tracks = []
grilo.populate_album_songs(album, self.add_item)
self.pack_start(self.ui.get_object('ArtistAlbumWidget'), True, True, 0)
@log
def add_item(self, source, prefs, track, remaining, data=None):
if remaining == 0:
self.songsGrid.show_all()
self.emit("tracks-loaded")
if track:
self.tracks.append(track)
else:
for i, track in enumerate(self.tracks):
ui = Gtk.Builder()
ui.add_from_resource('/org/gnome/Music/TrackWidget.ui')
song_widget = ui.get_object('eventbox1')
self.songs.append(song_widget)
ui.get_object('num')\
.set_markup('<span color=\'grey\'>%d</span>'
% len(self.songs))
title = AlbumArtCache.get_media_title(track)
ui.get_object('title').set_text(title)
ui.get_object('title').set_alignment(0.0, 0.5)
ui.get_object('title').set_max_width_chars(MAX_TITLE_WIDTH)
self.songsGrid.attach(
song_widget,
int(i / (len(self.tracks) / 2)),
int(i % (len(self.tracks) / 2)), 1, 1
)
track.song_widget = song_widget
itr = self.model.append(None)
song_widget._iter = itr
song_widget.model = self.model
song_widget.title = ui.get_object('title')
song_widget.checkButton = ui.get_object('select')
song_widget.checkButton.set_visible(self.selectionMode)
song_widget.checkButton.connect(
'toggled', self._check_button_toggled, song_widget
)
self.model.set(itr,
[0, 1, 2, 3, 5],
[title, self.artist, self.cover, False, track])
song_widget.now_playing_sign = ui.get_object('image1')
song_widget.now_playing_sign.set_from_icon_name(
NOW_PLAYING_ICON_NAME,
Gtk.IconSize.SMALL_TOOLBAR)
song_widget.now_playing_sign.set_no_show_all('True')
song_widget.now_playing_sign.set_alignment(1, 0.6)
song_widget.can_be_played = True
song_widget.connect('button-release-event',
self.track_selected)
@log
def _update_album_art(self):
real_artist = self.album.get_string(Grl.METADATA_KEY_ARTIST)\
or self.album.get_author()\
or _("Unknown Artist")
ALBUM_ART_CACHE.lookup(
self.album, 128, 128, self._get_album_cover, None,
real_artist, self.album.get_title())
@log
def _get_album_cover(self, pixbuf, path, data=None):
if not pixbuf:
pixbuf = self.noArtworkIcon
self.cover.set_from_pixbuf(pixbuf)
@log
def track_selected(self, widget, event):
if not widget.can_be_played:
return
if not self.selectionMode and \
(event.button == Gdk.BUTTON_SECONDARY or
(event.button == 1 and event.state & Gdk.ModifierType.CONTROL_MASK)):
if self.selectionModeAllowed:
self.header_bar._select_button.set_active(True)
else:
return
if self.selectionMode:
self.model[widget._iter][6] = not self.model[widget._iter][6]
return
self.player.stop()
self.player.set_playlist('Artist', self.artist,
widget.model, widget._iter, 5, 6)
self.player.set_playing(True)
@log
def set_selection_mode(self, selectionMode):
if self.selectionMode == selectionMode:
return
self.selectionMode = selectionMode
for songWidget in self.songs:
songWidget.checkButton.set_visible(selectionMode)
if not selectionMode:
songWidget.model[songWidget._iter][6] = False
@log
def _check_button_toggled(self, button, songWidget):
if songWidget.model[songWidget._iter][6] != button.get_active():
songWidget.model[songWidget._iter][6] = button.get_active()
@log
def _model_row_changed(self, model, path, _iter):
if not self.selectionMode:
return
if not model[_iter][5]:
return
songWidget = model[_iter][5].song_widget
selected = model[_iter][6]
if model[_iter][11] == DiscoveryStatus.FAILED:
songWidget.now_playing_sign.set_from_icon_name(
ERROR_ICON_NAME,
Gtk.IconSize.SMALL_TOOLBAR)
songWidget.now_playing_sign.show()
songWidget.can_be_played = False
if selected != songWidget.checkButton.get_active():
songWidget.checkButton.set_active(selected)
class PlaylistDialog():
def __repr__(self):
return '<PlaylistDialog>'
@log
def __init__(self, parent):
self.ui = Gtk.Builder()
self.ui.add_from_resource('/org/gnome/Music/PlaylistDialog.ui')
self.dialog_box = self.ui.get_object('dialog1')
self.dialog_box.set_transient_for(parent)
self.view = self.ui.get_object('treeview1')
self.view.set_activate_on_single_click(False)
self.selection = self.ui.get_object('treeview-selection1')
self.selection.connect('changed', self._on_selection_changed)
self._add_list_renderers()
self.view.connect('row-activated', self._on_item_activated)
self.model = self.ui.get_object('liststore1')
self.populate()
self.title_bar = self.ui.get_object('headerbar1')
self.dialog_box.set_titlebar(self.title_bar)
self._cancel_button = self.ui.get_object('cancel-button')
self._select_button = self.ui.get_object('select-button')
self._select_button.set_sensitive(False)
self._cancel_button.connect('clicked', self._on_cancel_button_clicked)
self._select_button.connect('clicked', self._on_selection)
self.playlist = Playlists.get_default()
self.playlist.connect('playlist-created', self._on_playlist_created)
@log
def get_selected(self):
_iter = self.selection.get_selected()[1]
if not _iter or self.model[_iter][1]:
return None
return self.model[_iter][2]
@log
def _add_list_renderers(self):
cols = Gtk.TreeViewColumn()
type_renderer = Gd.StyledTextRenderer(
xpad=8,
ypad=8,
ellipsize=Pango.EllipsizeMode.END,
xalign=0.0
)
type_renderer.connect('editing-started', self._on_editing_started, None)
cols.pack_start(type_renderer, True)
cols.add_attribute(type_renderer, "text", 0)
cols.add_attribute(type_renderer, "editable", 1)
cols.set_cell_data_func(type_renderer, self._on_list_text_render)
self.view.append_column(cols)
@log
def populate(self):
self.add_playlist_iter = self.model.append()
self.model.set(self.add_playlist_iter, [0, 1], [_("New Playlist"), True])
if grilo.tracker:
GLib.idle_add(grilo.populate_playlists, 0, self._add_item)
@log
def _add_item(self, source, param, item, remaining=0, data=None):
if item:
self._add_item_to_model(item)
@log
def _add_item_to_model(self, item):
new_iter = self.model.insert_before(self.add_playlist_iter)
self.model.set(
new_iter,
[0, 1, 2],
[AlbumArtCache.get_media_title(item), False, item]
)
return new_iter
@log
def _on_list_text_render(self, col, cell, model, _iter, data):
editable = model.get_value(_iter, 1)
if editable:
cell.add_class("dim-label")
else:
cell.remove_class("dim-label")
@log
def _on_selection(self, select_button):
self.dialog_box.response(Gtk.ResponseType.ACCEPT)
@log
def _on_cancel_button_clicked(self, cancel_button):
self.dialog_box.response(Gtk.ResponseType.REJECT)
@log
def _on_item_activated(self, view, path, column):
_iter = self.model.get_iter(path)
if self.model.get_value(_iter, 1):
self.view.set_cursor(path, column, True)
else:
self.dialog_box.response(Gtk.ResponseType.ACCEPT)
@log
def _on_selection_changed(self, selection):
model, _iter = self.selection.get_selected()
if _iter == None or self.model.get_value(_iter, 1):
self._select_button.set_sensitive(False)
else:
self._select_button.set_sensitive(True)
@log
def _on_editing_started(self, renderer, editable, path, data=None):
editable.set_text('')
editable.connect('editing-done', self._on_editing_done, None)
@log
def _on_editing_done(self, editable, data=None):
if editable.get_text() != '':
self.playlist.create_playlist(editable.get_text())
@log
def _on_playlist_created(self, playlists, item):
new_iter = self._add_item_to_model(item)
if self.view.get_columns():
self.view.set_cursor(self.model.get_path(new_iter),
self.view.get_columns()[0], False)
self.view.row_activated(self.model.get_path(new_iter),
self.view.get_columns()[0])
class CellRendererClickablePixbuf(Gtk.CellRendererPixbuf):
__gsignals__ = {'clicked': (GObject.SignalFlags.RUN_LAST, GObject.TYPE_NONE,
(GObject.TYPE_STRING,))}
__gproperties__ = {
'show_star': (GObject.TYPE_INT, 'Show star', 'show star',0 ,2 ,1 , GObject.ParamFlags.READWRITE)}
starIcon = 'starred-symbolic'
nonStarIcon = 'non-starred-symbolic'
def __repr__(self):
return '<CellRendererClickablePixbuf>'
def __init__(self, view, hidden=False, *args, **kwargs):
Gtk.CellRendererPixbuf.__init__(self, *args, **kwargs)
self.set_property('mode', Gtk.CellRendererMode.ACTIVATABLE)
self.set_property('xpad', 32)
self.set_property('icon_name', '')
self.view = view
self.hidden = hidden
self.show_star = 0
def do_activate(self, event, widget, path, background_area, cell_area, flags):
self.show_star = 0
self.emit('clicked', path)
def do_get_property(self, property):
if property.name == 'show-star':
return self.show_star
def do_set_property(self, property, value):
if property.name == 'show-star':
if self.show_star == 1:
self.set_property('icon_name', self.starIcon)
elif self.show_star == 0:
self.set_property('icon_name', self.nonStarIcon)
else:
self.set_property('icon_name', '')
self.show_star = value
| gpl-2.0 | 936,807,593,558,934,900 | 37.82242 | 105 | 0.591098 | false |
Richard-Mathie/cassandra_benchmark | vendor/github.com/datastax/python-driver/tests/integration/cqlengine/columns/test_value_io.py | 6 | 7015 | # Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from datetime import datetime, timedelta, time
from decimal import Decimal
from uuid import uuid1, uuid4, UUID
import six
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import drop_table
from cassandra.cqlengine.models import Model
from cassandra.util import Date, Time
from tests.integration import PROTOCOL_VERSION
from tests.integration.cqlengine.base import BaseCassEngTestCase
class BaseColumnIOTest(BaseCassEngTestCase):
"""
Tests that values are come out of cassandra in the format we expect
To test a column type, subclass this test, define the column, and the primary key
and data values you want to test
"""
# The generated test model is assigned here
_generated_model = None
# the column we want to test
column = None
# the values we want to test against, you can
# use a single value, or multiple comma separated values
pkey_val = None
data_val = None
@classmethod
def setUpClass(cls):
super(BaseColumnIOTest, cls).setUpClass()
# if the test column hasn't been defined, bail out
if not cls.column:
return
# create a table with the given column
class IOTestModel(Model):
pkey = cls.column(primary_key=True)
data = cls.column()
cls._generated_model = IOTestModel
sync_table(cls._generated_model)
# tupleify the tested values
if not isinstance(cls.pkey_val, tuple):
cls.pkey_val = cls.pkey_val,
if not isinstance(cls.data_val, tuple):
cls.data_val = cls.data_val,
@classmethod
def tearDownClass(cls):
super(BaseColumnIOTest, cls).tearDownClass()
if not cls.column:
return
drop_table(cls._generated_model)
def comparator_converter(self, val):
""" If you want to convert the original value used to compare the model vales """
return val
def test_column_io(self):
""" Tests the given models class creates and retrieves values as expected """
if not self.column:
return
for pkey, data in zip(self.pkey_val, self.data_val):
# create
m1 = self._generated_model.create(pkey=pkey, data=data)
# get
m2 = self._generated_model.get(pkey=pkey)
assert m1.pkey == m2.pkey == self.comparator_converter(pkey), self.column
assert m1.data == m2.data == self.comparator_converter(data), self.column
# delete
self._generated_model.filter(pkey=pkey).delete()
class TestBlobIO(BaseColumnIOTest):
column = columns.Blob
pkey_val = six.b('blake'), uuid4().bytes
data_val = six.b('eggleston'), uuid4().bytes
class TestBlobIO2(BaseColumnIOTest):
column = columns.Blob
pkey_val = bytearray(six.b('blake')), uuid4().bytes
data_val = bytearray(six.b('eggleston')), uuid4().bytes
class TestTextIO(BaseColumnIOTest):
column = columns.Text
pkey_val = 'bacon'
data_val = 'monkey'
class TestNonBinaryTextIO(BaseColumnIOTest):
column = columns.Text
pkey_val = 'bacon'
data_val = '0xmonkey'
class TestInteger(BaseColumnIOTest):
column = columns.Integer
pkey_val = 5
data_val = 6
class TestBigInt(BaseColumnIOTest):
column = columns.BigInt
pkey_val = 6
data_val = pow(2, 63) - 1
class TestDateTime(BaseColumnIOTest):
column = columns.DateTime
now = datetime(*datetime.now().timetuple()[:6])
pkey_val = now
data_val = now + timedelta(days=1)
class TestUUID(BaseColumnIOTest):
column = columns.UUID
pkey_val = str(uuid4()), uuid4()
data_val = str(uuid4()), uuid4()
def comparator_converter(self, val):
return val if isinstance(val, UUID) else UUID(val)
class TestTimeUUID(BaseColumnIOTest):
column = columns.TimeUUID
pkey_val = str(uuid1()), uuid1()
data_val = str(uuid1()), uuid1()
def comparator_converter(self, val):
return val if isinstance(val, UUID) else UUID(val)
class TestFloatIO(BaseColumnIOTest):
column = columns.Float
pkey_val = 4.75
data_val = -1.5
class TestDoubleIO(BaseColumnIOTest):
column = columns.Double
pkey_val = 3.14
data_val = -1982.11
class TestDecimalIO(BaseColumnIOTest):
column = columns.Decimal
pkey_val = Decimal('1.35'), 5, '2.4'
data_val = Decimal('0.005'), 3.5, '8'
def comparator_converter(self, val):
return Decimal(repr(val) if isinstance(val, float) else val)
class ProtocolV4Test(BaseColumnIOTest):
@classmethod
def setUpClass(cls):
if PROTOCOL_VERSION >= 4:
super(ProtocolV4Test, cls).setUpClass()
@classmethod
def tearDownClass(cls):
if PROTOCOL_VERSION >= 4:
super(ProtocolV4Test, cls).tearDownClass()
class TestDate(ProtocolV4Test):
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
super(TestDate, self).setUp()
column = columns.Date
now = Date(datetime.now().date())
pkey_val = now
data_val = Date(now.days_from_epoch + 1)
class TestTime(ProtocolV4Test):
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
super(TestTime, self).setUp()
column = columns.Time
pkey_val = Time(time(2, 12, 7, 48))
data_val = Time(time(16, 47, 25, 7))
class TestSmallInt(ProtocolV4Test):
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
super(TestSmallInt, self).setUp()
column = columns.SmallInt
pkey_val = 16768
data_val = 32523
class TestTinyInt(ProtocolV4Test):
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
super(TestTinyInt, self).setUp()
column = columns.TinyInt
pkey_val = 1
data_val = 123
| apache-2.0 | -7,603,066,259,584,916,000 | 24.981481 | 134 | 0.664006 | false |
jerbob92/CouchPotatoServer | libs/guessit/transfo/guess_properties.py | 150 | 1273 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.transfo import SingleNodeGuesser
from guessit.patterns import find_properties
import logging
log = logging.getLogger(__name__)
def guess_properties(string):
try:
prop, value, pos, end = find_properties(string)[0]
return { prop: value }, (pos, end)
except IndexError:
return None, None
def process(mtree):
SingleNodeGuesser(guess_properties, 1.0, log).process(mtree)
| gpl-3.0 | 6,948,849,929,272,523,000 | 32.5 | 74 | 0.734485 | false |
spaceone/pyjs | examples/infohierarchy/public/services/jsonrpc/http.py | 24 | 1236 | """
Copyright (c) 2006 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from jsonrpc import SimpleServiceHandler
import urllib2
class HTTPClientConnectionHandler(SimpleServiceHandler):
def __init__(self, url, service,messageDelimiter=""):
self.url = url
SimpleServiceHandler.__init__(self, service,messageDelimiter=messageDelimiter)
def send(self, data):
req = urllib2.Request(self.url, data)
resp = urllib2.urlopen(req)
self.handlePartialData(resp.read())
| apache-2.0 | 5,313,945,474,049,252,000 | 36.454545 | 86 | 0.7411 | false |
msultan/mdtraj | mdtraj/utils/test.py | 9 | 6569 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""Tests for some of the utilities
"""
##############################################################################
# imports
##############################################################################
from __future__ import print_function, division
import numpy as np
from mdtraj.utils import ensure_type
from mdtraj.utils.validation import TypeCastPerformanceWarning
from mdtraj.utils.unit import in_units_of, _str_to_unit
from mdtraj.utils import (import_, lengths_and_angles_to_box_vectors,
box_vectors_to_lengths_and_angles)
from mdtraj.testing import raises, eq
import warnings
from itertools import combinations
##############################################################################
# globals
##############################################################################
a = np.ones(10, dtype=np.float32)
b = np.ones((10,10), dtype=np.float64, order='F')
random = np.random.RandomState(0)
##############################################################################
# tests
##############################################################################
def test_unitcell_0():
result = lengths_and_angles_to_box_vectors(1, 1, 1, 90.0, 90.0, 90.0)
expected = (np.array([1, 0, 0]), np.array([ 0., 1., 0.]), np.array([ 0., 0., 1.]))
for (a, b) in zip(result, expected):
np.testing.assert_array_almost_equal(a, b)
def test_unitcell_1():
# try round-tripping some random lengths and angles through
# lengths_and_angles_to_box_vectors and box_vectors_to_lengths_and_angles,
# and make sure we get back to where we started
for _ in range(10):
arg = np.hstack((random.rand(3), random.uniform(70, 110, size=3)))
vectors = lengths_and_angles_to_box_vectors(*arg)
out = box_vectors_to_lengths_and_angles(*vectors)
np.testing.assert_array_almost_equal(arg, out)
def test_ensure_type_1():
ensure_type(a, np.float32, 1, '', length=10)
def test_ensure_type_2():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
val = ensure_type(a, np.float64, 1, '', length=10)
assert val.dtype == np.float64
assert a.dtype == np.float32 # a should not be changed
assert len(w) == 1
assert issubclass(w[-1].category, TypeCastPerformanceWarning)
def test_ensure_type_25():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
val = ensure_type(a, np.float64, 1, '', length=10, warn_on_cast=False)
assert val.dtype == np.float64
assert a.dtype == np.float32 # a should not be changed
assert len(w) == 0 # no warning since we set warn_on_cast to False
@raises(ValueError)
def test_ensure_type_3():
ensure_type(a, np.float32, 1, '', length=11)
def test_ensure_type_4():
ensure_type(None, np.float64, 1, '', length=11, can_be_none=True)
@raises(ValueError)
def test_ensure_type_5():
ensure_type(a, np.float32, 1, '', length=11, can_be_none=True)
def test_ensure_type_6():
val = ensure_type(b, np.float64, 2, '', shape=(10,10))
assert val.flags.c_contiguous is True
def test_ensure_type_7():
c = ensure_type(a, np.float32, ndim=2, name='', add_newaxis_on_deficient_ndim=True)
assert c.shape == (1, len(a))
def test_ensure_type_8():
c = ensure_type(np.zeros((5,10)), np.float32, ndim=2, name='', shape=(None, 10))
assert c.shape == (5, 10)
@raises(ValueError)
def test_ensure_type_9():
c = ensure_type(np.zeros((5,11)), np.float32, ndim=2, name='', shape=(None, 10))
@raises(ValueError)
def test_ensure_type_10():
c = ensure_type([0,1], np.float32, ndim=2, name='')
def test_ensure_type_11():
c = ensure_type(0, np.float32, ndim=1, name='', add_newaxis_on_deficient_ndim=True)
assert c.shape == (1,)
@raises(TypeError)
def test_ensure_type_12():
ensure_type(np.zeros((2,2)), np.float32, ndim=3)
@raises(ValueError)
def test_ensure_type_13():
ensure_type(np.zeros((2,2)), np.float32, ndim=2, name='', shape=(None, None, None))
def test_ensure_type_14():
# test that the generators work
value = ensure_type(combinations(range(10), 2), int, ndim=2, name='')
assert isinstance(value, np.ndarray)
ref = np.array(list(combinations(range(10), 2)))
eq(value, ref)
def test_ensure_type_15():
# test that lists
x = [1, 2, 3]
value = ensure_type(x, int, ndim=1, name='')
ref = np.array(x)
eq(value, ref)
@raises(ImportError)
def test_delay_import_fail_1():
import_('sdfsdfsfsfdsdf')
def test_delay_import():
import_('scipy.sparse')
def test_unit_0():
a = np.array([1.0])
b = in_units_of(a, 'nanometers', 'angstroms', inplace=False)
c = in_units_of(a, 'angstroms', 'nanometers', inplace=False)
eq(b, np.array([10.0]))
eq(c, np.array([0.1]))
assert a.ctypes.data != b.ctypes.data
assert a.ctypes.data != c.ctypes.data
def test_unit_1():
a = np.array([1.0])
b = in_units_of(a, 'nanometers', 'angstroms', inplace=True)
eq(a, np.array([10.0]))
eq(b, np.array([10.0]))
# a and b point to the same memory
assert a.ctypes.data == b.ctypes.data
def test_unit_2():
a = np.array([1.0])
a.flags['WRITEABLE'] = False
b = in_units_of(a, 'nanometers', 'angstroms', inplace=True)
eq(b, np.array([10.0]))
# a and b do not point to the same memory, since a isn't writeable
assert a.ctypes.data != b.ctypes.data
def test_unit_3():
eq(1000000.0, in_units_of(1, 'meter**2/second', 'nanometers**2/picosecond'))
| lgpl-2.1 | 3,972,271,449,462,869,500 | 32.860825 | 90 | 0.594763 | false |
jmathai/elodie | elodie/tests/media/media_test.py | 1 | 5268 | # Project imports
import os
import sys
import hashlib
import random
import re
import shutil
import string
import tempfile
import time
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))))
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
import helper
from elodie.media.audio import Audio
from elodie.media.media import Media
from elodie.media.photo import Photo
from elodie.media.video import Video
os.environ['TZ'] = 'GMT'
setup_module = helper.setup_module
teardown_module = helper.teardown_module
def test_get_file_path():
media = Media(helper.get_file('plain.jpg'))
path = media.get_file_path()
assert 'plain.jpg' in path, path
def test_get_class_by_file_photo():
media = Media.get_class_by_file(helper.get_file('plain.jpg'), [Photo, Video])
assert media.__name__ == 'Photo'
def test_get_class_by_file_video():
media = Media.get_class_by_file(helper.get_file('video.mov'), [Photo, Video])
assert media.__name__ == 'Video'
def test_get_class_by_file_unsupported():
media = Media.get_class_by_file(helper.get_file('text.txt'), [Photo, Video])
assert media is None
def test_get_class_by_file_ds_store():
media = Media.get_class_by_file(helper.get_file('.DS_Store'),
[Photo, Video, Audio])
assert media is None
def test_get_class_by_file_invalid_type():
media = Media.get_class_by_file(None,
[Photo, Video, Audio])
assert media is None
media = Media.get_class_by_file(False,
[Photo, Video, Audio])
assert media is None
media = Media.get_class_by_file(True,
[Photo, Video, Audio])
assert media is None
def test_get_original_name():
temporary_folder, folder = helper.create_working_folder()
origin = '%s/%s' % (folder, 'with-original-name.jpg')
file = helper.get_file('with-original-name.jpg')
shutil.copyfile(file, origin)
media = Media.get_class_by_file(origin, [Photo])
original_name = media.get_original_name()
assert original_name == 'originalfilename.jpg', original_name
def test_get_original_name_invalid_file():
temporary_folder, folder = helper.create_working_folder()
origin = '%s/%s' % (folder, 'invalid.jpg')
file = helper.get_file('invalid.jpg')
shutil.copyfile(file, origin)
media = Media.get_class_by_file(origin, [Photo])
original_name = media.get_original_name()
assert original_name is None, original_name
def test_set_original_name_when_exists():
temporary_folder, folder = helper.create_working_folder()
origin = '%s/%s' % (folder, 'with-original-name.jpg')
file = helper.get_file('with-original-name.jpg')
shutil.copyfile(file, origin)
media = Media.get_class_by_file(origin, [Photo])
result = media.set_original_name()
assert result is None, result
def test_set_original_name_when_does_not_exist():
temporary_folder, folder = helper.create_working_folder()
origin = '%s/%s' % (folder, 'plain.jpg')
file = helper.get_file('plain.jpg')
shutil.copyfile(file, origin)
media = Media.get_class_by_file(origin, [Photo])
metadata_before = media.get_metadata()
result = media.set_original_name()
metadata_after = media.get_metadata()
assert metadata_before['original_name'] is None, metadata_before
assert metadata_after['original_name'] == 'plain.jpg', metadata_after
assert result is True, result
def test_set_original_name_with_arg():
temporary_folder, folder = helper.create_working_folder()
origin = '%s/%s' % (folder, 'plain.jpg')
file = helper.get_file('plain.jpg')
shutil.copyfile(file, origin)
new_name = helper.random_string(15)
media = Media.get_class_by_file(origin, [Photo])
metadata_before = media.get_metadata()
result = media.set_original_name(new_name)
metadata_after = media.get_metadata()
assert metadata_before['original_name'] is None, metadata_before
assert metadata_after['original_name'] == new_name, metadata_after
assert result is True, result
def test_set_original_name():
files = ['plain.jpg', 'audio.m4a', 'photo.nef', 'video.mov']
for file in files:
ext = os.path.splitext(file)[1]
temporary_folder, folder = helper.create_working_folder()
random_file_name = '%s%s' % (helper.random_string(10), ext)
origin = '%s/%s' % (folder, random_file_name)
file_path = helper.get_file(file)
if file_path is False:
file_path = helper.download_file(file, folder)
shutil.copyfile(file_path, origin)
media = Media.get_class_by_file(origin, [Audio, Media, Photo, Video])
metadata = media.get_metadata()
media.set_original_name()
metadata_updated = media.get_metadata()
shutil.rmtree(folder)
assert metadata['original_name'] is None, metadata['original_name']
assert metadata_updated['original_name'] == random_file_name, metadata_updated['original_name']
def is_valid():
media = Media()
assert not media.is_valid()
| apache-2.0 | -1,791,670,685,919,713,300 | 29.988235 | 131 | 0.655087 | false |
valkjsaaa/sl4a | python/src/Lib/encodings/mac_centeuro.py | 593 | 14358 | """ Python Character Mapping Codec mac_centeuro generated from 'MAPPINGS/VENDORS/APPLE/CENTEURO.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-centeuro',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
u'\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
u'\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
u'\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
u'\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
u'\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
u'\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
u'\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
u'\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
u'\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
u'\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
u'\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
u'\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
u'\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
u'\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
u'\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
u'\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
u'\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
u'\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
u'\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
u'\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
u'\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
u'\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
u'\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
u'\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
u'\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
u'\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
u'\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 | -4,002,140,888,249,020,000 | 45.76873 | 118 | 0.56122 | false |
adamrp/qiime | qiime/nmds.py | 15 | 1417 | #!/usr/bin/env python
from __future__ import division
import numpy
import os.path
import cogent.cluster.nmds as nmds_module
from qiime.format import format_nmds_coords
from qiime.parse import parse_distmat
__author__ = "Justin Kuzynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "[email protected]"
def nmds(file, dimensions=2):
samples, distmtx = parse_distmat(file)
nmds_res = nmds_module.NMDS(distmtx, verbosity=0, dimension=dimensions)
pts = nmds_res.getPoints()
stress = nmds_res.getStress()
return format_nmds_coords(samples, pts, stress)
def multiple_file_nmds(input_dir, output_dir, dimensions=2):
"""perform PCoAs on all distance matrices in the input_dir
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
file_names = os.listdir(input_dir)
file_names = [fname for fname in file_names if not fname.startswith('.')]
for fname in file_names:
base_fname, ext = os.path.splitext(fname)
infile = os.path.join(input_dir, fname)
lines = open(infile, 'U')
nmds_res_string = nmds(lines, dimensions)
outfile = os.path.join(output_dir, 'nmds_' + base_fname + '.txt')
outfile = open(outfile, 'w')
outfile.write(nmds_res_string)
outfile.close()
| gpl-2.0 | 3,933,455,474,679,528,000 | 31.953488 | 77 | 0.666196 | false |
andrebellafronte/stoq | stoqlib/gui/test/test_qualitytesteditor.py | 3 | 1183 | # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
from stoqlib.gui.editors.producteditor import QualityTestEditor
from stoqlib.gui.test.uitestutils import GUITest
class TestQualityTestEditor(GUITest):
def test_create(self):
editor = QualityTestEditor(self.store)
self.check_editor(editor, 'editor-qualitytest-create')
| gpl-2.0 | -7,406,430,197,945,387,000 | 34.848485 | 71 | 0.731192 | false |
leodavesne/leodavesne.net | leodavesne/settings.py | 1 | 3452 | """
Django settings for leodavesne project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = = bool(int(os.environ.get('DEBUG', 0)))
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'leodavesne.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'leodavesne.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
},
},
'root': {
'level': 'WARNING',
'handlers': ['console'],
},
}
django_heroku.settings(
locals(), logging=False
)
| mit | 5,920,516,162,070,657,000 | 23.834532 | 91 | 0.664832 | false |
MobinRanjbar/hue | desktop/core/ext-py/ndg_httpsclient-0.4.0/ndg/httpsclient/subj_alt_name.py | 79 | 6131 | """NDG HTTPS Client package
Use pyasn1 to provide support for parsing ASN.1 formatted subjectAltName
content for SSL peer verification. Code based on:
http://stackoverflow.com/questions/5519958/how-do-i-parse-subjectaltname-extension-data-using-pyasn1
"""
__author__ = "P J Kershaw"
__date__ = "01/02/12"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "[email protected]"
__revision__ = '$Id$'
try:
from pyasn1.type import univ, constraint, char, namedtype, tag
except ImportError as e:
import_error_msg = ('Error importing pyasn1, subjectAltName check for SSL '
'peer verification will be disabled. Import error '
'is: %s' % e)
import warnings
warnings.warn(import_error_msg)
class Pyasn1ImportError(ImportError):
"Raise for pyasn1 import error"
raise Pyasn1ImportError(import_error_msg)
MAX = 64
class DirectoryString(univ.Choice):
"""ASN.1 Directory string class"""
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType(
'printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType(
'universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType(
'utf8String', char.UTF8String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType(
'bmpString', char.BMPString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType(
'ia5String', char.IA5String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
)
class AttributeValue(DirectoryString):
"""ASN.1 Attribute value"""
class AttributeType(univ.ObjectIdentifier):
"""ASN.1 Attribute type"""
class AttributeTypeAndValue(univ.Sequence):
"""ASN.1 Attribute type and value class"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('value', AttributeValue()),
)
class RelativeDistinguishedName(univ.SetOf):
'''ASN.1 Realtive distinguished name'''
componentType = AttributeTypeAndValue()
class RDNSequence(univ.SequenceOf):
'''ASN.1 RDN sequence class'''
componentType = RelativeDistinguishedName()
class Name(univ.Choice):
'''ASN.1 name class'''
componentType = namedtype.NamedTypes(
namedtype.NamedType('', RDNSequence()),
)
class Extension(univ.Sequence):
'''ASN.1 extension class'''
componentType = namedtype.NamedTypes(
namedtype.NamedType('extnID', univ.ObjectIdentifier()),
namedtype.DefaultedNamedType('critical', univ.Boolean('False')),
namedtype.NamedType('extnValue', univ.OctetString()),
)
class Extensions(univ.SequenceOf):
'''ASN.1 extensions class'''
componentType = Extension()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class AnotherName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type-id', univ.ObjectIdentifier()),
namedtype.NamedType('value', univ.Any().subtype(
explicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)))
)
class GeneralName(univ.Choice):
'''ASN.1 configuration for X.509 certificate subjectAltNames fields'''
componentType = namedtype.NamedTypes(
namedtype.NamedType('otherName', AnotherName().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0))),
namedtype.NamedType('rfc822Name', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 1))),
namedtype.NamedType('dNSName', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 2))),
# namedtype.NamedType('x400Address', ORAddress().subtype(
# implicitTag=tag.Tag(tag.tagClassContext,
# tag.tagFormatSimple, 3))),
namedtype.NamedType('directoryName', Name().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 4))),
# namedtype.NamedType('ediPartyName', EDIPartyName().subtype(
# implicitTag=tag.Tag(tag.tagClassContext,
# tag.tagFormatSimple, 5))),
namedtype.NamedType('uniformResourceIdentifier', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 6))),
namedtype.NamedType('iPAddress', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 7))),
namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 8))),
)
class GeneralNames(univ.SequenceOf):
'''Sequence of names for ASN.1 subjectAltNames settings'''
componentType = GeneralName()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class SubjectAltName(GeneralNames):
'''ASN.1 implementation for subjectAltNames support'''
| apache-2.0 | 4,532,104,727,171,321,300 | 39.071895 | 100 | 0.608384 | false |
stefan-jonasson/home-assistant | homeassistant/components/device_tracker/keenetic_ndms2.py | 6 | 3728 | """
Support for Zyxel Keenetic NDMS2 based routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.keenetic_ndms2/
"""
import logging
from collections import namedtuple
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME
)
_LOGGER = logging.getLogger(__name__)
# Interface name to track devices for. Most likely one will not need to
# change it from default 'Home'. This is needed not to track Guest WI-FI-
# clients and router itself
CONF_INTERFACE = 'interface'
DEFAULT_INTERFACE = 'Home'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_INTERFACE, default=DEFAULT_INTERFACE): cv.string,
})
def get_scanner(_hass, config):
"""Validate the configuration and return a Nmap scanner."""
scanner = KeeneticNDMS2DeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
Device = namedtuple('Device', ['mac', 'name'])
class KeeneticNDMS2DeviceScanner(DeviceScanner):
"""This class scans for devices using keenetic NDMS2 web interface."""
def __init__(self, config):
"""Initialize the scanner."""
self.last_results = []
self._url = 'http://%s/rci/show/ip/arp' % config[CONF_HOST]
self._interface = config[CONF_INTERFACE]
self._username = config.get(CONF_USERNAME)
self._password = config.get(CONF_PASSWORD)
self.success_init = self._update_info()
_LOGGER.info("Scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device.mac for device in self.last_results]
def get_device_name(self, mac):
"""Return the name of the given device or None if we don't know."""
filter_named = [device.name for device in self.last_results
if device.mac == mac]
if filter_named:
return filter_named[0]
return None
def _update_info(self):
"""Get ARP from keenetic router."""
_LOGGER.info("Fetching...")
last_results = []
# doing a request
try:
from requests.auth import HTTPDigestAuth
res = requests.get(self._url, timeout=10, auth=HTTPDigestAuth(
self._username, self._password
))
except requests.exceptions.Timeout:
_LOGGER.error(
"Connection to the router timed out at URL %s", self._url)
return False
if res.status_code != 200:
_LOGGER.error(
"Connection failed with http code %s", res.status_code)
return False
try:
result = res.json()
except ValueError:
# If json decoder could not parse the response
_LOGGER.error("Failed to parse response from router")
return False
# parsing response
for info in result:
if info.get('interface') != self._interface:
continue
mac = info.get('mac')
name = info.get('name')
# No address = no item :)
if mac is None:
continue
last_results.append(Device(mac.upper(), name))
self.last_results = last_results
_LOGGER.info("Request successful")
return True
| mit | 8,196,167,684,461,196,000 | 29.809917 | 75 | 0.628219 | false |
ICromwell/OCaSimTest | 10 - Dentist Screening Appointment.py | 1 | 1935 | # -*- coding: utf-8 -*-
"""
A screening process wherein a person returns for regular checkups at a dentist
"""
# Time interval between appointments
appInt = 6*30
# Create a counter for the number of dental appointments, if one doesn't already exist
if getattr(entity, "count_DentAppt", 0) == 0:
entity.count_DentAppt = 0
def devOPL(entity, env):
while True:
t_OPL = random.normalvariate(500, 100)
yield env.timeout(t_OPL)
#entity.OPL.append(env.now)
print(env.now, 'Developed OPL')
entity.OPLStatus = 1
entity.time_OPL = env.now
env.exit()
def appointment_process(entity, env):
while True:
if entity.OPLStatus ==0:
yield env.timeout(appInt)
print(env.now, 'Everything looks fine, see you in %2.0f days'%appInt)
elif entity.OPLStatus == 1:
print(env.now, 'Found something at %2.0f'%env.now)
entity.time_detectOPL = env.now # The time at which an OPL is detected
entity.allTime = entity.allTime + entity.time_detectOPL # Update total simulation runtime
entity.currentState = "Detected OPL, undergoing evaluation" # Update state
entity.stateNum = 1.1
env.exit()
entity.count_DentAppt = entity.count_DentAppt +1 # Add running count to the number of dental appointments
# Run simulation
env = simpy.Environment()
env.process(devOPL(entity, env))
env.process(appointment_process(entity, env))
env.run()
# VARIABLES CREATED IN THIS STEP:
# count_DentAppt - a counter for how many dentist's appointments an entity has had
# time_OPL - the time that the entity develops an OPL
# time_detectOPL - the time that an OPL is detected by a dentist
# OPLStatus - a flag for whether or not an entity has an OPL
| gpl-3.0 | 3,353,878,584,481,434,000 | 35.941176 | 132 | 0.620672 | false |
hwu25/AppPkg | Applications/Python/Python-2.7.2/Lib/hmac.py | 66 | 4664 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
trans_5C = "".join ([chr (x ^ 0x5C) for x in xrange(256)])
trans_36 = "".join ([chr (x ^ 0x36) for x in xrange(256)])
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
# A unique object passed by HMAC.copy() to the HMAC constructor, in order
# that the latter return very quickly. HMAC("") in contrast is quite
# expensive.
_secret_backdoor_key = []
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object.
Defaults to hashlib.md5.
"""
if key is _secret_backdoor_key: # cheap
return
if digestmod is None:
import hashlib
digestmod = hashlib.md5
if hasattr(digestmod, '__call__'):
self.digest_cons = digestmod
else:
self.digest_cons = lambda d='': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
# Very low blocksize, most likely a legacy value like
# Lib/sha.py and Lib/md5.py have.
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = self.__class__(_secret_backdoor_key)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| bsd-2-clause | -7,037,095,824,735,547,000 | 33.067669 | 78 | 0.57783 | false |
fxsjy/pybrain | examples/rl/environments/cartpole/cart_all.py | 30 | 2089 | #!/usr/bin/env python
#########################################################################
# Reinforcement Learning with several optimization algorithms
# on the CartPoleEnvironment
#
# Requirements: pylab (for plotting only). If not available, comment the
# last 3 lines out
#########################################################################
__author__ = "Thomas Rueckstiess, Frank Sehnke"
from pybrain.tools.example_tools import ExTools
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.environments.cartpole import CartPoleEnvironment, BalanceTask
from pybrain.rl.agents import OptimizationAgent
from pybrain.optimization import PGPE #@UnusedImport
from pybrain.optimization import ExactNES #@UnusedImport
from pybrain.optimization import FEM #@UnusedImport
from pybrain.optimization import CMAES #@UnusedImport
from pybrain.rl.experiments import EpisodicExperiment
batch=2 #number of samples per learning step
prnts=100 #number of learning steps after results are printed
epis=4000/batch/prnts #number of roleouts
numbExp=40 #number of experiments
et = ExTools(batch, prnts) #tool for printing and plotting
expList = ["PGPE(storeAllEvaluations = True)", "ExactNES(storeAllEvaluations = True)", "FEM(storeAllEvaluations = True)", "CMAES(storeAllEvaluations = True)"]
for e in expList:
for runs in range(numbExp):
# create environment
env = CartPoleEnvironment()
# create task
task = BalanceTask(env, 200, desiredValue=None)
# create controller network
net = buildNetwork(4, 1, bias=False)
# create agent with controller and learner (and its options)
agent = OptimizationAgent(net, eval(e))
et.agent = agent
# create the experiment
experiment = EpisodicExperiment(task, agent)
#Do the experiment
for updates in range(epis):
for i in range(prnts):
experiment.doEpisodes(batch)
et.printResults((agent.learner._allEvaluations)[-50:-1], runs, updates)
et.addExps()
et.nextExps()
et.showExps()
| bsd-3-clause | -8,766,883,172,891,091,000 | 39.960784 | 158 | 0.675443 | false |
videetssinghai/Blog-Rest-Api | lib/python2.7/site-packages/pip/_vendor/distlib/util.py | 327 | 52991 | #
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
splittype, HTTPHandler, BaseConfigurator, valid_ident,
Container, configparser, URLError, ZipFile, fsdecode,
unquote)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on macOS
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification "
"'%s'" % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(' ', '-')
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get('Content-Type')
if not ct.startswith('application/json'):
logger.debug('Unexpected response for JSON request: %s', ct)
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
def get_project_data(name):
url = '%s/%s/project.json' % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else: # pragma: no cover
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
| mit | 7,577,552,194,039,158,000 | 31.893234 | 88 | 0.535732 | false |
drbild/boto | tests/unit/cloudsearch/test_search.py | 114 | 13726 | #!/usr/bin env python
from tests.compat import mock, unittest
from httpretty import HTTPretty
import json
import requests
from boto.cloudsearch.search import SearchConnection, SearchServiceException
from boto.compat import six, map
HOSTNAME = "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com"
FULL_URL = 'http://%s/2011-02-01/search' % HOSTNAME
class CloudSearchSearchBaseTest(unittest.TestCase):
hits = [
{
'id': '12341',
'title': 'Document 1',
},
{
'id': '12342',
'title': 'Document 2',
},
{
'id': '12343',
'title': 'Document 3',
},
{
'id': '12344',
'title': 'Document 4',
},
{
'id': '12345',
'title': 'Document 5',
},
{
'id': '12346',
'title': 'Document 6',
},
{
'id': '12347',
'title': 'Document 7',
},
]
content_type = "text/xml"
response_status = 200
def get_args(self, requestline):
(_, request, _) = requestline.split(b" ")
(_, request) = request.split(b"?", 1)
args = six.moves.urllib.parse.parse_qs(request)
return args
def setUp(self):
HTTPretty.enable()
body = self.response
if not isinstance(body, bytes):
body = json.dumps(body).encode('utf-8')
HTTPretty.register_uri(HTTPretty.GET, FULL_URL,
body=body,
content_type=self.content_type,
status=self.response_status)
def tearDown(self):
HTTPretty.disable()
class CloudSearchSearchTest(CloudSearchSearchBaseTest):
response = {
'rank': '-text_relevance',
'match-expr': "Test",
'hits': {
'found': 30,
'start': 0,
'hit': CloudSearchSearchBaseTest.hits
},
'info': {
'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08',
'time-ms': 2,
'cpu-time-ms': 0
}
}
def test_cloudsearch_qsearch(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test')
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'q'], [b"Test"])
self.assertEqual(args[b'start'], [b"0"])
self.assertEqual(args[b'size'], [b"10"])
def test_cloudsearch_bqsearch(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(bq="'Test'")
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'bq'], [b"'Test'"])
def test_cloudsearch_search_details(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', size=50, start=20)
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'q'], [b"Test"])
self.assertEqual(args[b'size'], [b"50"])
self.assertEqual(args[b'start'], [b"20"])
def test_cloudsearch_facet_single(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', facet=["Author"])
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet'], [b"Author"])
def test_cloudsearch_facet_multiple(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', facet=["author", "cat"])
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet'], [b"author,cat"])
def test_cloudsearch_facet_constraint_single(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(
q='Test',
facet_constraints={'author': "'John Smith','Mark Smith'"})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet-author-constraints'],
[b"'John Smith','Mark Smith'"])
def test_cloudsearch_facet_constraint_multiple(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(
q='Test',
facet_constraints={'author': "'John Smith','Mark Smith'",
'category': "'News','Reviews'"})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet-author-constraints'],
[b"'John Smith','Mark Smith'"])
self.assertEqual(args[b'facet-category-constraints'],
[b"'News','Reviews'"])
def test_cloudsearch_facet_sort_single(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', facet_sort={'author': 'alpha'})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet-author-sort'], [b'alpha'])
def test_cloudsearch_facet_sort_multiple(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', facet_sort={'author': 'alpha',
'cat': 'count'})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet-author-sort'], [b'alpha'])
self.assertEqual(args[b'facet-cat-sort'], [b'count'])
def test_cloudsearch_top_n_single(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', facet_top_n={'author': 5})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet-author-top-n'], [b'5'])
def test_cloudsearch_top_n_multiple(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', facet_top_n={'author': 5, 'cat': 10})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet-author-top-n'], [b'5'])
self.assertEqual(args[b'facet-cat-top-n'], [b'10'])
def test_cloudsearch_rank_single(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', rank=["date"])
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'rank'], [b'date'])
def test_cloudsearch_rank_multiple(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', rank=["date", "score"])
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'rank'], [b'date,score'])
def test_cloudsearch_result_fields_single(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', return_fields=['author'])
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'return-fields'], [b'author'])
def test_cloudsearch_result_fields_multiple(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', return_fields=['author', 'title'])
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'return-fields'], [b'author,title'])
def test_cloudsearch_t_field_single(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', t={'year': '2001..2007'})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b't-year'], [b'2001..2007'])
def test_cloudsearch_t_field_multiple(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', t={'year': '2001..2007', 'score': '10..50'})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b't-year'], [b'2001..2007'])
self.assertEqual(args[b't-score'], [b'10..50'])
def test_cloudsearch_results_meta(self):
"""Check returned metadata is parsed correctly"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
# These rely on the default response which is fed into HTTPretty
self.assertEqual(results.rank, "-text_relevance")
self.assertEqual(results.match_expression, "Test")
def test_cloudsearch_results_info(self):
"""Check num_pages_needed is calculated correctly"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
# This relies on the default response which is fed into HTTPretty
self.assertEqual(results.num_pages_needed, 3.0)
def test_cloudsearch_results_matched(self):
"""
Check that information objects are passed back through the API
correctly.
"""
search = SearchConnection(endpoint=HOSTNAME)
query = search.build_query(q='Test')
results = search(query)
self.assertEqual(results.search_service, search)
self.assertEqual(results.query, query)
def test_cloudsearch_results_hits(self):
"""Check that documents are parsed properly from AWS"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
hits = list(map(lambda x: x['id'], results.docs))
# This relies on the default response which is fed into HTTPretty
self.assertEqual(
hits, ["12341", "12342", "12343", "12344",
"12345", "12346", "12347"])
def test_cloudsearch_results_iterator(self):
"""Check the results iterator"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
results_correct = iter(["12341", "12342", "12343", "12344",
"12345", "12346", "12347"])
for x in results:
self.assertEqual(x['id'], next(results_correct))
def test_cloudsearch_results_internal_consistancy(self):
"""Check the documents length matches the iterator details"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
self.assertEqual(len(results), len(results.docs))
def test_cloudsearch_search_nextpage(self):
"""Check next page query is correct"""
search = SearchConnection(endpoint=HOSTNAME)
query1 = search.build_query(q='Test')
query2 = search.build_query(q='Test')
results = search(query2)
self.assertEqual(results.next_page().query.start,
query1.start + query1.size)
self.assertEqual(query1.q, query2.q)
class CloudSearchSearchFacetTest(CloudSearchSearchBaseTest):
response = {
'rank': '-text_relevance',
'match-expr': "Test",
'hits': {
'found': 30,
'start': 0,
'hit': CloudSearchSearchBaseTest.hits
},
'info': {
'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08',
'time-ms': 2,
'cpu-time-ms': 0
},
'facets': {
'tags': {},
'animals': {'constraints': [{'count': '2', 'value': 'fish'}, {'count': '1', 'value': 'lions'}]},
}
}
def test_cloudsearch_search_facets(self):
#self.response['facets'] = {'tags': {}}
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test', facet=['tags'])
self.assertTrue('tags' not in results.facets)
self.assertEqual(results.facets['animals'], {u'lions': u'1', u'fish': u'2'})
class CloudSearchNonJsonTest(CloudSearchSearchBaseTest):
response = b'<html><body><h1>500 Internal Server Error</h1></body></html>'
response_status = 500
content_type = 'text/xml'
def test_response(self):
search = SearchConnection(endpoint=HOSTNAME)
with self.assertRaises(SearchServiceException):
search.search(q='Test')
class CloudSearchUnauthorizedTest(CloudSearchSearchBaseTest):
response = b'<html><body><h1>403 Forbidden</h1>foo bar baz</body></html>'
response_status = 403
content_type = 'text/html'
def test_response(self):
search = SearchConnection(endpoint=HOSTNAME)
with self.assertRaisesRegexp(SearchServiceException, 'foo bar baz'):
search.search(q='Test')
class FakeResponse(object):
status_code = 405
content = b''
class CloudSearchConnectionTest(unittest.TestCase):
cloudsearch = True
def setUp(self):
super(CloudSearchConnectionTest, self).setUp()
self.conn = SearchConnection(
endpoint='test-domain.cloudsearch.amazonaws.com'
)
def test_expose_additional_error_info(self):
mpo = mock.patch.object
fake = FakeResponse()
fake.content = b'Nopenopenope'
# First, in the case of a non-JSON, non-403 error.
with mpo(requests, 'get', return_value=fake) as mock_request:
with self.assertRaises(SearchServiceException) as cm:
self.conn.search(q='not_gonna_happen')
self.assertTrue('non-json response' in str(cm.exception))
self.assertTrue('Nopenopenope' in str(cm.exception))
# Then with JSON & an 'error' key within.
fake.content = json.dumps({
'error': "Something went wrong. Oops."
}).encode('utf-8')
with mpo(requests, 'get', return_value=fake) as mock_request:
with self.assertRaises(SearchServiceException) as cm:
self.conn.search(q='no_luck_here')
self.assertTrue('Unknown error' in str(cm.exception))
self.assertTrue('went wrong. Oops' in str(cm.exception))
| mit | -2,314,041,319,793,198,000 | 31.070093 | 108 | 0.602069 | false |
hub-cap/lady-rainicorn | rainicorn/openstack/common/exception.py | 1 | 3454 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exceptions common to OpenStack projects
"""
import logging
from rainicorn.openstack.common.gettextutils import _
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class Error(Exception):
def __init__(self, message=None):
super(Error, self).__init__(message)
class ApiError(Error):
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
super(ApiError, self).__init__('%s: %s' % (code, message))
class NotFound(Error):
pass
class UnknownScheme(Error):
msg = "Unknown scheme '%s' found in URI"
def __init__(self, scheme):
msg = self.__class__.msg % scheme
super(UnknownScheme, self).__init__(msg)
class BadStoreUri(Error):
msg = "The Store URI %s was malformed. Reason: %s"
def __init__(self, uri, reason):
msg = self.__class__.msg % (uri, reason)
super(BadStoreUri, self).__init__(msg)
class Duplicate(Error):
pass
class NotAuthorized(Error):
pass
class NotEmpty(Error):
pass
class Invalid(Error):
pass
class BadInputError(Exception):
"""Error resulting from a client sending bad input to a server"""
pass
class MissingArgumentError(Error):
pass
class DatabaseMigrationError(Error):
pass
class ClientConnectionError(Exception):
"""Error resulting from a client connecting to a server"""
pass
def wrap_exception(f):
def _wrap(*args, **kw):
try:
return f(*args, **kw)
except Exception as e:
if not isinstance(e, Error):
#exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception(_('Uncaught exception'))
#logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise
_wrap.func_name = f.func_name
return _wrap
class OpenstackException(Exception):
"""
Base Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = "An unknown exception occurred"
def __init__(self, **kwargs):
try:
self._error_string = self.message % kwargs
except Exception as e:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise e
else:
# at least get the core message out if something happened
self._error_string = self.message
def __str__(self):
return self._error_string
class MalformedRequestBody(OpenstackException):
message = "Malformed message body: %(reason)s"
class InvalidContentType(OpenstackException):
message = "Invalid content type %(content_type)s"
| apache-2.0 | 8,148,286,690,390,137,000 | 23.323944 | 78 | 0.64447 | false |
mizzao/ggplot | ggplot/stats/stat_function.py | 12 | 4439 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
from ggplot.utils import make_iterable_ntimes
from ggplot.utils.exceptions import GgplotError
from .stat import stat
class stat_function(stat):
"""
Superimpose a function onto a plot
Uses a
Parameters
----------
x : list, 1darray
x values of data
fun : function
Function to draw.
n : int
Number of points to interpolate over. Must be greater than zero.
Defaults to 101.
color : str
Color to draw function with.
args : list, dict, object
List or dict of additional arguments to pass to function. If neither
list or dict, object is passed as second argument.
Examples
--------
Sin vs cos.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
gg = ggplot(pd.DataFrame({'x':np.arange(10)}),aes(x='x'))
gg = gg + stat_function(fun=np.sin,color="red")
gg = gg + stat_function(fun=np.cos,color="blue")
print(gg)
Compare random sample density to normal distribution.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
x = np.random.normal(size=100)
# normal distribution function
def dnorm(n):
return (1.0 / np.sqrt(2 * np.pi)) * (np.e ** (-0.5 * (n ** 2)))
data = pd.DataFrame({'x':x})
gg = ggplot(aes(x='x'),data=data) + geom_density()
gg = gg + stat_function(fun=dnorm,n=150)
print(gg)
Passing additional arguments to function as list.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
x = np.random.randn(100)
to_the_power_of = lambda n, p: n ** p
y = x ** 3
y += np.random.randn(100) # add noise
data = pd.DataFrame({'x':x,'y':y})
gg = ggplot(aes(x='x',y='y'),data=data) + geom_point()
gg = gg + stat_function(fun=to_the_power_of,args=[3])
print(gg)
Passing additional arguments to function as dict.
.. plot::
:include-source:
import scipy
import numpy as np
import pandas as pd
from ggplot import *
def dnorm(x, mean, var):
return scipy.stats.norm(mean,var).pdf(x)
data = pd.DataFrame({'x':np.arange(-5,6)})
gg = ggplot(aes(x='x'),data=data)
gg = gg + stat_function(fun=dnorm,color="blue",args={'mean':0.0,'var':0.2})
gg = gg + stat_function(fun=dnorm,color="red",args={'mean':0.0,'var':1.0})
gg = gg + stat_function(fun=dnorm,color="yellow",args={'mean':0.0,'var':5.0})
gg = gg + stat_function(fun=dnorm,color="green",args={'mean':-2.0,'var':0.5})
print(gg)
"""
# TODO: Should not have a required aesthetic, use the scale information
# maybe that is where the "scale trainning" helps
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'geom': 'path', 'position': 'identity', 'fun': None,
'n': 101, 'args': None}
_aes_renames = {'size': 'linewidth', 'linetype': 'linestyle'}
CREATES = {'y'}
def _calculate(self, data):
x = data.pop('x')
fun = self.params['fun']
n = self.params['n']
args = self.params['args']
if not hasattr(fun, '__call__'):
raise GgplotError("stat_function requires parameter 'fun' to be " +
"a function or any other callable object")
old_fun = fun
if isinstance(args,list):
fun = lambda x: old_fun(x, *args)
elif isinstance(args,dict):
fun = lambda x: old_fun(x, **args)
elif args is not None:
fun = lambda x: old_fun(x, args)
else:
fun = lambda x: old_fun(x)
x = np.linspace(x.min(), x.max(),n)
y = list(map(fun, x))
new_data = pd.DataFrame({'x': x, 'y': y})
# Copy the other aesthetics into the new dataframe
# Don't copy the any previous 'y' assignments
try:
del data['y']
except KeyError:
pass
n = len(x)
for ae in data:
new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
return new_data
| bsd-2-clause | -3,369,415,084,848,312,000 | 28.791946 | 85 | 0.548772 | false |
BhavySinghal/qassam-le2 | tools/perf/scripts/python/net_dropmonitor.py | 2669 | 1738 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 | -4,712,390,689,633,006,000 | 22.173333 | 90 | 0.640391 | false |
CiscoSystems/jujucharm-n1k | charms/precise/swift-storage/hooks/misc_utils.py | 1 | 2174 | from charmhelpers.contrib.storage.linux.utils import (
is_block_device,
zap_disk,
)
from charmhelpers.contrib.storage.linux.loopback import (
ensure_loopback_device,
)
from charmhelpers.contrib.storage.linux.lvm import (
deactivate_lvm_volume_group,
is_lvm_physical_volume,
remove_lvm_physical_volume,
)
from charmhelpers.core.host import (
mounts,
umount,
)
from charmhelpers.core.hookenv import (
log,
INFO,
ERROR,
)
DEFAULT_LOOPBACK_SIZE = '5G'
def ensure_block_device(block_device):
'''
Confirm block_device, create as loopback if necessary.
:param block_device: str: Full path of block device to ensure.
:returns: str: Full path of ensured block device.
'''
_none = ['None', 'none', None]
if (block_device in _none):
log('prepare_storage(): Missing required input: '
'block_device=%s.' % block_device, level=ERROR)
raise
if block_device.startswith('/dev/'):
bdev = block_device
elif block_device.startswith('/'):
_bd = block_device.split('|')
if len(_bd) == 2:
bdev, size = _bd
else:
bdev = block_device
size = DEFAULT_LOOPBACK_SIZE
bdev = ensure_loopback_device(bdev, size)
else:
bdev = '/dev/%s' % block_device
if not is_block_device(bdev):
log('Failed to locate valid block device at %s' % bdev, level=ERROR)
raise
return bdev
def clean_storage(block_device):
'''
Ensures a block device is clean. That is:
- unmounted
- any lvm volume groups are deactivated
- any lvm physical device signatures removed
- partition table wiped
:param block_device: str: Full path to block device to clean.
'''
for mp, d in mounts():
if d == block_device:
log('clean_storage(): Found %s mounted @ %s, unmounting.' %
(d, mp), level=INFO)
umount(mp, persist=True)
if is_lvm_physical_volume(block_device):
deactivate_lvm_volume_group(block_device)
remove_lvm_physical_volume(block_device)
else:
zap_disk(block_device)
| apache-2.0 | 633,189,320,005,733,400 | 24.880952 | 76 | 0.613615 | false |
tammoippen/nest-simulator | pynest/nest/tests/test_split_simulation.py | 13 | 2259 | # -*- coding: utf-8 -*-
#
# test_split_simulation.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import unittest
import nest
class TestSplit(unittest.TestCase):
steps = 100
time = 100
def __init__(self, *args, **kwargs):
super(TestSplit, self).__init__(*args, **kwargs)
self.spike = None
def setup(self):
nest.ResetKernel()
nest.SetDefaults('spike_detector', {'withtime': True})
n1 = nest.Create("iaf_psc_alpha")
nest.SetStatus(n1, {"I_e": 376.0})
self.spike = spike = nest.Create('spike_detector')
nest.Connect(n1, spike)
def runner(self, time, f):
spike = self.spike
nest.SetStatus(spike, [{'n_events': 0}])
f(time)
spikes = nest.GetStatus(spike, 'events')[0]
senders, times = spikes['senders'], spikes['times']
return zip(senders, times)
def runs(self):
self.setup()
steps, time = self.steps, self.time
with nest.RunManager():
return [
(s, t)
for _ in range(steps)
for s, t in self.runner(time, nest.Run)
]
def simulate(self):
self.setup()
steps, time = self.steps, self.time
return [
(s, t)
for s, t in self.runner(time * steps, nest.Simulate)
]
def test_split_match(self):
r0 = self.runs()
r1 = self.simulate()
self.assertEqual(r0, r1)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestSplit)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-2.0 | 6,234,758,310,035,019,000 | 26.888889 | 70 | 0.609562 | false |
Kryz/sentry | src/sentry/migrations/0129_auto__chg_field_release_id__chg_field_pendingteammember_id__chg_field_.py | 36 | 26026 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
"""
This migration skips all bigint upgrades as they're not generally useful
for organizations, and they're incredibly expensive to apply
"""
def backwards(self, orm):
pass
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| bsd-3-clause | 4,321,307,139,268,779,500 | 82.684887 | 223 | 0.554638 | false |
JohnOrlando/gnuradio-bitshark | gr-radar-mono/src/python/usrp_radar_mono.py | 11 | 3905 | #!/usr/bin/env python
#
# Copyright 2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio.radar_mono import radar
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys, time
n2s = eng_notation.num_to_str
logfile = None
def process_echo(echo):
global logfile
if logfile is not None:
logfile.write(echo)
def main():
global logfile
parser = OptionParser(option_class=eng_option)
parser.add_option("-T", "--tx-subdev-spec", type="subdev", default=None,
help="use transmitter board side A or B (default is first found)")
parser.add_option("-R", "--rx-subdev-spec", type="subdev", default=None,
help="use receiver board side A or B (default is first found)")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-f", "--frequency", type="eng_float", default=0.0,
help="set transmitter center frequency to FREQ in Hz, default is %default", metavar="FREQ")
parser.add_option("-w", "--chirp-width", type="eng_float", default=32e6,
help="set LFM chirp bandwidth in Hz, default is %default", metavar="FREQ")
parser.add_option("-a", "--amplitude", type="eng_float", default=15,
help="set waveform amplitude in % full scale, default is %default,")
parser.add_option("", "--ton", type="eng_float", default=5e-6,
help="set pulse on period in seconds, default is %default,")
parser.add_option("", "--tsw", type="eng_float", default=0.0,
help="set transmitter switching period in seconds, default is %default,")
parser.add_option("", "--tlook", type="eng_float", default=5e-6,
help="set receiver look time in seconds, default is %default,")
parser.add_option("", "--prf", type="eng_float", default=100,
help="set pulse repetition frequency in Hz, default is %default,")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="enable verbose output, default is disabled")
parser.add_option("-D", "--debug", action="store_true", default=False,
help="enable debugging output, default is disabled")
parser.add_option("-F", "--filename", default=None,
help="log received echos to file")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
if options.filename is not None:
if options.verbose:
print "Logging echo records to file: ", options.filename
logfile = open(options.filename, 'wb')
r = radar(options, process_echo)
r.set_ton(options.ton)
r.set_tsw(options.tsw)
r.set_tlook(options.tlook)
r.set_prf(options.prf)
r.set_amplitude(options.amplitude)
r.set_freq(options.frequency, options.chirp_width)
r.start()
raw_input("Press ENTER to stop.")
r.stop()
if logfile is not None:
logfile.close()
if __name__ == "__main__":
main()
| gpl-3.0 | 327,595,003,981,853,600 | 39.677083 | 113 | 0.648143 | false |
charanpald/APGL | apgl/graph/test/GeneralVertexListTest.py | 1 | 1641 |
from apgl.graph.GeneralVertexList import GeneralVertexList
from apgl.graph.test.AbstractVertexListTest import AbstractVertexListTest
from apgl.util.PathDefaults import PathDefaults
import unittest
import logging
class GeneralVertexListTest(unittest.TestCase, AbstractVertexListTest):
def setUp(self):
self.VListType = GeneralVertexList
self.numVertices = 10
self.vList = GeneralVertexList(self.numVertices)
self.emptyVertex = None
self.initialise()
def testConstructor(self):
self.assertEquals(self.vList.getNumVertices(), self.numVertices)
def testSaveLoad(self):
try:
vList = GeneralVertexList(self.numVertices)
vList.setVertex(0, "abc")
vList.setVertex(1, 12)
vList.setVertex(2, "num")
tempDir = PathDefaults.getTempDir()
fileName = tempDir + "vList"
vList.save(fileName)
vList2 = GeneralVertexList.load(fileName)
for i in range(self.numVertices):
self.assertEquals(vList.getVertex(i), vList2.getVertex(i))
except IOError as e:
logging.warn(e)
pass
def testAddVertices(self):
vList = GeneralVertexList(10)
vList.setVertex(1, 2)
self.assertEquals(vList.getNumVertices(), 10)
self.assertEquals(vList[1], 2)
vList.addVertices(5)
self.assertEquals(vList.getNumVertices(), 15)
vList.setVertex(11, 2)
self.assertEquals(vList[1], 2)
self.assertEquals(vList[1], 2)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -719,073,170,517,018,800 | 30.557692 | 74 | 0.634369 | false |
cdjones32/vertx-web | src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py | 168 | 26964 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Access to Python's configuration information."""
import codecs
import os
import re
import sys
from os.path import pardir, realpath
try:
import configparser
except ImportError:
import ConfigParser as configparser
__all__ = [
'get_config_h_filename',
'get_config_var',
'get_config_vars',
'get_makefile_filename',
'get_path',
'get_path_names',
'get_paths',
'get_platform',
'get_python_version',
'get_scheme_names',
'parse_config_h',
]
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
_cfg_read = False
def _ensure_cfg_read():
global _cfg_read
if not _cfg_read:
from distlib.resources import finder
backport_package = __name__.rsplit('.', 1)[0]
_finder = finder(backport_package)
_cfgfile = _finder.find('sysconfig.cfg')
assert _cfgfile, 'sysconfig.cfg exists'
with _cfgfile.as_stream() as s:
_SCHEMES.readfp(s)
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_SCHEMES.set(scheme, 'include', '{srcdir}/Include')
_SCHEMES.set(scheme, 'platinclude', '{projectbase}/.')
_cfg_read = True
_SCHEMES = configparser.RawConfigParser()
_VAR_REPL = re.compile(r'\{([^{]*?)\}')
def _expand_globals(config):
_ensure_cfg_read()
if config.has_section('globals'):
globals = config.items('globals')
else:
globals = tuple()
sections = config.sections()
for section in sections:
if section == 'globals':
continue
for option, value in globals:
if config.has_option(section, option):
continue
config.set(section, option, value)
config.remove_section('globals')
# now expanding local variables defined in the cfg file
#
for section in config.sections():
variables = dict(config.items(section))
def _replacer(matchobj):
name = matchobj.group(1)
if name in variables:
return variables[name]
return matchobj.group(0)
for option, value in config.items(section):
config.set(section, option, _VAR_REPL.sub(_replacer, value))
#_expand_globals(_SCHEMES)
# FIXME don't rely on sys.version here, its format is an implementation detail
# of CPython, use sys.version_info or sys.hexversion
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _subst_vars(path, local_vars):
"""In the string `path`, replace tokens like {some.thing} with the
corresponding value from the map `local_vars`.
If there is no corresponding value, leave the token unchanged.
"""
def _replacer(matchobj):
name = matchobj.group(1)
if name in local_vars:
return local_vars[name]
elif name in os.environ:
return os.environ[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, path)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _SCHEMES.items(scheme):
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def format_value(value, vars):
def _replacer(matchobj):
name = matchobj.group(1)
if name in vars:
return vars[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, value)
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
return env_base
else:
return joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
if env_base:
return env_base
else:
return joinuser("~", "Library", framework, "%d.%d" %
sys.version_info[:2])
if env_base:
return env_base
else:
return joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if (name.startswith('PY_') and
name[3:] in renamed_variables):
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
if hasattr(sys, 'abiflags'):
config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
else:
config_dir_name = 'config'
return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Return a tuple containing the schemes names."""
return tuple(sorted(_SCHEMES.sections()))
def get_path_names():
"""Return a tuple containing the paths names."""
# xxx see if we want a static list
return _SCHEMES.options('posix_prefix')
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
_ensure_cfg_read()
if expand:
return _expand_vars(scheme, vars)
else:
return dict(_SCHEMES.items(scheme))
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# distutils2 module.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
_CONFIG_VARS['abiflags'] = ''
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
if sys.version >= '2.6':
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
else:
_CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir'])
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile(r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if True:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if ((macrelease + '.') >= '10.4.' and
'-arch' in get_config_vars().get('CFLAGS', '').strip()):
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print('%s: ' % (title))
print('\t%s = "%s"' % (key, value))
def _main():
"""Display all information sysconfig detains."""
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
| apache-2.0 | -8,657,197,615,827,725,000 | 33.218274 | 80 | 0.540795 | false |
lungetech/luigi | luigi/contrib/sge.py | 11 | 11055 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SGE batch system Tasks.
Adapted by Jake Feala (@jfeala) from
`LSF extension <https://github.com/dattalab/luigi/blob/lsf/luigi/lsf.py>`_
by Alex Wiltschko (@alexbw)
Maintained by Jake Feala (@jfeala)
SunGrid Engine is a job scheduler used to allocate compute resources on a
shared cluster. Jobs are submitted using the ``qsub`` command and monitored
using ``qstat``. To get started, install luigi on all nodes.
To run luigi workflows on an SGE cluster, subclass
:class:`luigi.contrib.sge.SGEJobTask` as you would any :class:`luigi.Task`,
but override the ``work()`` method, instead of ``run()``, to define the job
code. Then, run your Luigi workflow from the master node, assigning > 1
``workers`` in order to distribute the tasks in parallel across the cluster.
The following is an example usage (and can also be found in ``sge_tests.py``)
.. code-block:: python
import logging
import luigi
from luigi.contrib.sge import SGEJobTask
logger = logging.getLogger('luigi-interface')
class TestJobTask(SGEJobTask):
i = luigi.Parameter()
def work(self):
logger.info('Running test job...')
with open(self.output().path, 'w') as f:
f.write('this is a test')
def output(self):
return luigi.LocalTarget(os.path.join('/home', 'testfile_' + str(self.i)))
if __name__ == '__main__':
tasks = [TestJobTask(i=str(i), n_cpu=i+1) for i in range(3)]
luigi.build(tasks, local_scheduler=True, workers=3)
The ``n-cpu`` parameter allows you to define different compute resource
requirements (or slots, in SGE terms) for each task. In this example, the
third Task asks for 3 CPU slots. If your cluster only contains nodes with
2 CPUs, this task will hang indefinitely in the queue. See the docs for
:class:`luigi.contrib.sge.SGEJobTask` for other SGE parameters. As for any
task, you can also set these in your luigi configuration file as shown below.
The default values below were matched to the values used by MIT StarCluster,
an open-source SGE cluster manager for use with Amazon EC2::
[SGEJobTask]
shared-tmp-dir = /home
parallel-env = orte
n-cpu = 2
"""
# This extension is modeled after the hadoop.py approach.
#
# Implementation notes
# The procedure:
# - Pickle the class
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner function hits the work button on it
import os
import subprocess
import time
import sys
import logging
import random
import shutil
try:
import cPickle as pickle
except ImportError:
import pickle
import luigi
import luigi.hadoop
from luigi.contrib import sge_runner
logger = logging.getLogger('luigi-interface')
logger.propagate = 0
POLL_TIME = 5 # decided to hard-code rather than configure here
def _parse_qstat_state(qstat_out, job_id):
"""Parse "state" column from `qstat` output for given job_id
Returns state for the *first* job matching job_id. Returns 'u' if
`qstat` output is empty or job_id is not found.
"""
if qstat_out.strip() == '':
return 'u'
lines = qstat_out.split('\n')
# skip past header
while not lines.pop(0).startswith('---'):
pass
for line in lines:
if line:
job, prior, name, user, state = line.strip().split()[0:5]
if int(job) == int(job_id):
return state
return 'u'
def _parse_qsub_job_id(qsub_out):
"""Parse job id from qsub output string.
Assume format:
"Your job <job_id> ("<job_name>") has been submitted"
"""
return int(qsub_out.split()[2])
def _build_qsub_command(cmd, job_name, outfile, errfile, pe, n_cpu):
"""Submit shell command to SGE queue via `qsub`"""
qsub_template = """echo {cmd} | qsub -o ":{outfile}" -e ":{errfile}" -V -r y -pe {pe} {n_cpu} -N {job_name}"""
return qsub_template.format(
cmd=cmd, job_name=job_name, outfile=outfile, errfile=errfile,
pe=pe, n_cpu=n_cpu)
class SGEJobTask(luigi.Task):
"""Base class for executing a job on SunGrid Engine
Override ``work()`` (rather than ``run()``) with your job code.
Parameters:
- n_cpu: Number of CPUs (or "slots") to allocate for the Task. This
value is passed as ``qsub -pe {pe} {n_cpu}``
- parallel_env: SGE parallel environment name. The default is "orte",
the parallel environment installed with MIT StarCluster. If you
are using a different cluster environment, check with your
sysadmin for the right pe to use. This value is passed as {pe}
to the qsub command above.
- shared_tmp_dir: Shared drive accessible from all nodes in the cluster.
Task classes and dependencies are pickled to a temporary folder on
this drive. The default is ``/home``, the NFS share location setup
by StarCluster
"""
n_cpu = luigi.IntParameter(default=2, significant=False)
shared_tmp_dir = luigi.Parameter(default='/home', significant=False)
parallel_env = luigi.Parameter(default='orte', significant=False)
def _fetch_task_failures(self):
if not os.path.exists(self.errfile):
logger.info('No error file')
return []
with open(self.errfile, "r") as f:
errors = f.readlines()
if errors == []:
return errors
if errors[0].strip() == 'stdin: is not a tty': # SGE complains when we submit through a pipe
errors.pop(0)
return errors
def _init_local(self):
# Set up temp folder in shared directory (trim to max filename length)
base_tmp_dir = self.shared_tmp_dir
random_id = '%016x' % random.getrandbits(64)
folder_name = self.task_id + '-' + random_id
self.tmp_dir = os.path.join(base_tmp_dir, folder_name)
max_filename_length = os.fstatvfs(0).f_namemax
self.tmp_dir = self.tmp_dir[:max_filename_length]
logger.info("Tmp dir: %s", self.tmp_dir)
os.makedirs(self.tmp_dir)
# Dump the code to be run into a pickle file
logging.debug("Dumping pickled class")
self._dump(self.tmp_dir)
# Make sure that all the class's dependencies are tarred and available
logging.debug("Tarballing dependencies")
# Grab luigi and the module containing the code to be run
packages = [luigi] + [__import__(self.__module__, None, None, 'dummy')]
luigi.hadoop.create_packages_archive(packages, os.path.join(self.tmp_dir, "packages.tar"))
def run(self):
self._init_local()
self._run_job()
# The procedure:
# - Pickle the class
# - Tarball the dependencies
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner class untars the dependencies
# - Runner function hits the button on the class's work() method
def work(self):
"""Override this method, rather than ``run()``, for your actual work."""
pass
def _dump(self, out_dir=''):
"""Dump instance to file."""
self.job_file = os.path.join(out_dir, 'job-instance.pickle')
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace('(c__main__', "(c" + module_name)
open(self.job_file, "w").write(d)
else:
pickle.dump(self, open(self.job_file, "w"))
def _run_job(self):
# Build a qsub argument that will run sge_runner.py on the directory we've specified
runner_path = sge_runner.__file__
if runner_path.endswith("pyc"):
runner_path = runner_path[:-3] + "py"
job_str = 'python {0} "{1}"'.format(runner_path, self.tmp_dir) # enclose tmp_dir in quotes to protect from special escape chars
# Build qsub submit command
self.outfile = os.path.join(self.tmp_dir, 'job.out')
self.errfile = os.path.join(self.tmp_dir, 'job.err')
submit_cmd = _build_qsub_command(job_str, self.task_family, self.outfile,
self.errfile, self.parallel_env, self.n_cpu)
logger.debug('qsub command: \n' + submit_cmd)
# Submit the job and grab job ID
output = subprocess.check_output(submit_cmd, shell=True)
self.job_id = _parse_qsub_job_id(output)
logger.debug("Submitted job to qsub with response:\n" + output)
self._track_job()
# Now delete the temporaries, if they're there.
if self.tmp_dir and os.path.exists(self.tmp_dir):
logger.info('Removing temporary directory %s' % self.tmp_dir)
shutil.rmtree(self.tmp_dir)
def _track_job(self):
while True:
# Sleep for a little bit
time.sleep(POLL_TIME)
# See what the job's up to
# ASSUMPTION
qstat_out = subprocess.check_output(['qstat'])
sge_status = _parse_qstat_state(qstat_out, self.job_id)
if sge_status == 'r':
logger.info('Job is running...')
elif sge_status == 'qw':
logger.info('Job is pending...')
elif 'E' in sge_status:
logger.error('Job has FAILED:\n' + '\n'.join(self._fetch_task_failures()))
break
elif sge_status == 't' or sge_status == 'u':
# Then the job could either be failed or done.
errors = self._fetch_task_failures()
if not errors:
logger.info('Job is done')
else:
logger.error('Job has FAILED:\n' + '\n'.join(errors))
break
else:
logger.info('Job status is UNKNOWN!')
logger.info('Status is : %s' % sge_status)
raise Exception("job status isn't one of ['r', 'qw', 'E*', 't', 'u']: %s" % sge_status)
class LocalSGEJobTask(SGEJobTask):
"""A local version of SGEJobTask, for easier debugging.
This version skips the ``qsub`` steps and simply runs ``work()``
on the local node, so you don't need to be on an SGE cluster to
use your Task in a test workflow.
"""
def run(self):
self.work()
| apache-2.0 | -4,173,189,063,609,959,000 | 35.127451 | 136 | 0.626142 | false |
gauravbose/digital-menu | digimenu2/tests/initial_sql_regress/tests.py | 13 | 1556 | from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model
from django.db import DEFAULT_DB_ALIAS, connections
from django.test import TestCase, override_settings
from .models import Simple
class InitialSQLTests(TestCase):
"""
The format of the included SQL file for this test suite is important.
It must end with a trailing newline in order to test the fix for #2161.
"""
def test_initial_sql(self):
"""
As pointed out by #14661, test data loaded by custom SQL
can't be relied upon; as a result, the test framework flushes the
data contents before every test. This test validates that this has
occurred.
"""
self.assertEqual(Simple.objects.count(), 0)
def test_custom_sql(self):
"""
Simulate the custom SQL loading by migrate.
"""
connection = connections[DEFAULT_DB_ALIAS]
custom_sql = custom_sql_for_model(Simple, no_style(), connection)
with connection.cursor() as cursor:
for sql in custom_sql:
cursor.execute(sql)
self.assertEqual(Simple.objects.count(), 9)
self.assertEqual(
Simple.objects.get(name__contains='placeholders').name,
'"100%" of % are not placeholders'
)
@override_settings(DEBUG=True)
def test_custom_sql_debug(self):
"""
Same test, ensure that CursorDebugWrapper doesn't alter sql loading
(#3485).
"""
self.test_custom_sql()
| bsd-3-clause | 9,166,358,305,554,776,000 | 33.577778 | 75 | 0.641388 | false |
blaxter/hamster-applet | hamster/storage.py | 1 | 4165 | # - coding: utf-8 -
# Copyright (C) 2007 Patryk Zawadzki <patrys at pld-linux.org>
# Copyright (C) 2007-2009 Toms Baugis <[email protected]>
# This file is part of Project Hamster.
# Project Hamster is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Project Hamster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Project Hamster. If not, see <http://www.gnu.org/licenses/>.
import datetime
class Storage(object):
def __init__(self, parent):
self.parent = parent
def run_fixtures(self):
pass
def dispatch(self, event, data):
self.parent.dispatch(event, data)
def get_fact(self, id):
return self.__get_fact(id)
def add_fact(self, activity_name, start_time = None, end_time = None):
result = self.__add_fact(activity_name, start_time, end_time)
if result:
self.dispatch('day_updated', result['start_time'])
return result
def touch_fact(self, fact, end_time = None):
end_time = end_time or datetime.datetime.now()
result = self.__touch_fact(fact, end_time)
self.dispatch('day_updated', fact['start_time'])
return result
def get_facts(self, date, end_date = None, category_id = None):
return self.__get_facts(date, end_date, category_id)
def get_popular_categories(self):
return self.__get_popular_categories()
def remove_fact(self, fact_id):
fact = self.get_fact(fact_id)
if fact:
self.__remove_fact(fact_id)
self.dispatch('day_updated', fact['start_time'])
def get_activities(self, category_id = None):
return self.__get_activities(category_id = category_id)
def get_sorted_activities(self):
return self.__get_sorted_activities()
def get_autocomplete_activities(self):
return self.__get_autocomplete_activities()
def get_last_activity(self):
return self.__get_last_activity()
def remove_activity(self, id):
result = self.__remove_activity(id)
self.dispatch('activity_updated', ())
return result
def remove_category(self, id):
self.__remove_category(id)
self.dispatch('activity_updated', ())
def move_activity(self, source_id, target_order, insert_after = True):
self.__move_activity(source_id, target_order, insert_after)
self.dispatch('activity_updated', ())
def change_category(self, id, category_id):
changed = self.__change_category(id, category_id)
if changed:
self.dispatch('activity_updated', ())
return changed
def swap_activities(self, id1, priority1, id2, priority2):
res = self.__swap_activities(id1, priority1, id2, priority2)
self.dispatch('activity_updated', ())
return res
def update_activity(self, id, name, category_id):
self.__update_activity(id, name, category_id)
self.dispatch('activity_updated', ())
def add_activity(self, name, category_id = -1):
new_id = self.__add_activity(name, category_id)
self.dispatch('activity_updated', ())
return new_id
def update_category(self, id, name):
self.__update_category(id, name)
self.dispatch('activity_updated', ())
def add_category(self, name):
res = self.__add_category(name)
self.dispatch('activity_updated', ())
return res
def get_category_list(self):
return self.__get_category_list()
def get_category_by_name(self, category):
return self.__get_category_by_name(category)
def get_activity_by_name(self, activity, category_id):
return self.__get_activity_by_name(activity, category_id)
| gpl-3.0 | 7,181,032,993,825,791,000 | 32.861789 | 75 | 0.642977 | false |
Mazecreator/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops_test.py | 7 | 205973 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
NAN = float('nan')
metrics = metrics_lib
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_local_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
class StreamingMeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean(array_ops.ones([4, 3]))
_assert_local_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
class StreamingMeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_tensor(array_ops.ones([4, 3]))
_assert_local_variables(self, ('mean/total_tensor:0',
'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class StreamingAccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_local_variables(self, ('my_accuracy/count:0',
'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=2)
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class StreamingTruePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives((0, 1, 0), (0, 1, 1))
_assert_local_variables(self, ('true_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tp, tp_update_op = metrics.streaming_true_positives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(1, tp_update_op.eval())
self.assertEqual(1, tp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels, weights=37.0)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(37.0, tp_update_op.eval())
self.assertEqual(37.0, tp.eval())
class StreamingFalseNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives((0, 1, 0),
(0, 1, 1))
_assert_local_variables(self, ('false_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fn, fn_update_op = metrics.streaming_false_negatives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(2, fn_update_op.eval())
self.assertEqual(2, fn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels, weights=((3.0,), (5.0,), (7.0,)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(8.0, fn_update_op.eval())
self.assertEqual(8.0, fn.eval())
class StreamingFalsePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives((0, 1, 0),
(0, 1, 1))
_assert_local_variables(self, ('false_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fp, fp_update_op = metrics.streaming_false_positives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(4, fp_update_op.eval())
self.assertEqual(4, fp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
fp, fp_update_op = metrics.streaming_false_positives(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(42.0, fp_update_op.eval())
self.assertEqual(42.0, fp.eval())
class StreamingTrueNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives((0, 1, 0),
(0, 1, 1))
_assert_local_variables(self, ('true_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tn, tn_update_op = metrics.streaming_true_negatives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(5, tn_update_op.eval())
self.assertEqual(5, tn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(15.0, tn_update_op.eval())
self.assertEqual(15.0, tn.eval())
class StreamingTruePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_local_variables(self, ('true_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, weights=37.0, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
class StreamingFalseNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(
0.15,
0.5,
0.85,))
_assert_local_variables(self, ('false_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions,
labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
class StreamingFalsePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_local_variables(self, ('false_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
class StreamingTrueNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_local_variables(self, ('true_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions,
labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
class StreamingPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class StreamingRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('recall/false_negatives/count:0',
'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class StreamingCurvePointsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metric_ops.streaming_curve_points(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(
self,
('curve_points/true_positives:0', 'curve_points/false_negatives:0',
'curve_points/false_positives:0', 'curve_points/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
points, _ = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [points])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def _testValueTensorIsIdempotent(self, curve):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(high=2, size=(10, 3)), dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels, predictions=predictions, curve=curve)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
initial_points = points.eval()
sess.run(update_op)
self.assertAllClose(initial_points, points.eval())
def testValueTensorIsIdempotentROC(self):
self._testValueTensorIsIdempotent(curve='ROC')
def testValueTensorIsIdempotentPR(self):
self._testValueTensorIsIdempotent(curve='PR')
def _testCase(self, labels, predictions, curve, expected_points):
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels=labels_tensor,
predictions=predictions_tensor,
num_thresholds=3,
curve=curve)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAllClose(expected_points, points.eval())
def testEdgeCasesROC(self):
self._testCase([[1]], [[1]], 'ROC', [[0, 1], [0, 1], [0, 0]])
self._testCase([[0]], [[0]], 'ROC', [[1, 1], [0, 1], [0, 1]])
self._testCase([[0]], [[1]], 'ROC', [[1, 1], [1, 1], [0, 1]])
self._testCase([[1]], [[0]], 'ROC', [[0, 1], [0, 0], [0, 0]])
def testManyValuesROC(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'ROC',
[[1.0, 1.0], [0.0, 0.75], [0.0, 0.0]])
def testEdgeCasesPR(self):
self._testCase([[1]], [[1]], 'PR', [[1, 1], [1, 1], [0, 1]])
self._testCase([[0]], [[0]], 'PR', [[1, 0], [1, 1], [1, 1]])
self._testCase([[0]], [[1]], 'PR', [[1, 0], [1, 0], [1, 1]])
self._testCase([[1]], [[0]], 'PR', [[1, 1], [0, 1], [0, 1]])
def testManyValuesPR(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'PR',
[[1.0, 4.0 / 6.0], [0.75, 1.0], [0.0, 1.0]])
class StreamingAUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_auc(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_auc(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testPredictionsOutOfRange(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, -1, 1, -1], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
_, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertRaises(errors_impl.InvalidArgumentError, update_op.eval)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.test_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def np_auc(self, predictions, labels, weights):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples), np.random.exponential(
scale=1.0, size=num_samples)):
expected_auc = self.np_auc(predictions, labels, weights)
with self.test_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (_enqueue_as_batches(weights, enqueue_ops) if
weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.streaming_auc(
tf_predictions,
tf_labels,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class StreamingSpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_local_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class StreamingSensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_local_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, weights=weights, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class StreamingPrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_local_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates, then verify idempotency.
sess.run([prec_op, rec_op])
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
sess.run([prec_op, rec_op])
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.streaming_precision_at_thresholds(tf_predictions,
tf_labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(tf_predictions,
tf_labels,
thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.
# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.
# Until then, this test validates that both ops yield the same results.
class StreamingRecallAtKTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
self._batch_size = 4
self._num_classes = 3
self._np_predictions = np.matrix(('0.1 0.2 0.7;'
'0.6 0.2 0.2;'
'0.0 0.9 0.1;'
'0.2 0.0 0.8'))
self._np_labels = [0, 0, 0, 0]
def testVars(self):
metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1)
_assert_local_variables(self, ('recall_at_1/count:0',
'recall_at_1/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testSingleUpdateKIs1(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=1)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.25, sess.run(update_op))
self.assertEqual(0.25, recall.eval())
self.assertEqual(0.25, sess.run(sp_update_op))
self.assertEqual(0.25, sp_recall.eval())
def testSingleUpdateKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=2)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, recall.eval())
self.assertEqual(0.5, sess.run(sp_update_op))
self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=3)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=3)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
weights = constant_op.constant(
[0, 1, 0, 1], shape=(self._batch_size,), dtype=dtypes_lib.float32)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2, weights=weights)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions,
array_ops.reshape(labels, (self._batch_size, 1)),
k=2,
weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
class StreamingSparsePrecisionTest(test.TestCase):
def _test_streaming_sparse_precision_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
labels=labels,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_k(self,
predictions,
labels,
k,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_top_k(
top_k_predictions, labels, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def test_top_k_rank_invalid(self):
with self.test_session():
# top_k_predictions has rank < 2.
top_k_predictions = [9, 4, 6, 2, 0]
sp_labels = sparse_tensor.SparseTensorValue(
indices=np.array([[0,], [1,], [2,]], np.int64),
values=np.array([2, 7, 8], np.int64),
dense_shape=np.array([10,], np.int64))
with self.assertRaises(ValueError):
precision, _ = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int64),
labels=sp_labels)
variables.variables_initializer(variables.local_variables()).run()
precision.eval()
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_top_k_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [(ex1 + ex2) / 2
for ex1, ex2 in zip(precision_ex1, precision_ex2)]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])
self._test_streaming_sparse_precision_at_top_k(
predictions_top_k, labels, expected=streaming_precision[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
self._test_streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k], predictions_top_k_ex2[:k]),
labels,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
def test_average_precision_at_top_k_static_shape_check(self):
predictions_top_k = array_ops.placeholder(shape=(2, None),
dtype=dtypes_lib.int64)
labels = np.array(((1,), (2,)), dtype=np.int64)
# Fails due to non-static predictions_idx shape.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
predictions_top_k = (2, 1)
# Fails since rank of predictions_idx is less than one.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
predictions_top_k = ((2,), (1,))
# Valid static shape.
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=7.0 / 20)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected_precision = 0.5
with self.test_session():
_, precision = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_precision, precision.eval())
class StreamingSparseRecallTest(test.TestCase):
def _test_streaming_sparse_recall_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_sparse_recall_at_top_k(self,
labels,
top_k_predictions,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metric_ops.sparse_recall_at_top_k(
labels=labels,
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in (sparse_labels, dense_labels):
for class_id in (-1, 0, 1, 4):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_one_label_at_k1_no_predictions(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 0 predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2)
def test_one_label_at_k1_weighted(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=3, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.6 / 0.9, weights=(0.6, 0.3))
def test_three_labels_at_k5_nan(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 8: 1 label, no predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=8)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=3.0 / 6)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 6)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=2.0 / 2,
class_id=2)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=2.0 / 2,
class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=1.0 / 1,
class_id=5)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=1.0 / 1,
class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=0.0 / 1,
class_id=7)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=0.0 / 1,
class_id=7)
# All classes: 8 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=3.0 / 8)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 labels, all correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=7.0 / 12)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=7.0 / 12)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4],
[0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 1, 0],
[0, 0, 0, 1]]
expected_recall = 0.5
with self.test_session():
_, recall = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_recall, recall.eval())
class StreamingMeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_absolute_error/count:0',
'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_absolute_error(predictions,
labels, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class StreamingMeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_relative_error/count:0',
'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(predictions,
labels, normalizer)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=array_ops.zeros_like(labels))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class StreamingMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_squared_error/count:0',
'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_squared_error(predictions, labels,
weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.streaming_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.test_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.streaming_mean_squared_error(
predictions0, labels0, name='msd0')
mse1, update_op1 = metrics.streaming_mean_squared_error(
predictions1, labels1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.streaming_mean_absolute_error(predictions,
labels)
mse, ms_update_op = metrics.streaming_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class StreamingRootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('root_mean_squared_error/count:0',
'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels,
weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
class StreamingCovarianceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_local_variables(self, (
'covariance/comoment:0',
'covariance/count:0',
'covariance/mean_label:0',
'covariance/mean_prediction:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
cov, update_op = metrics.streaming_covariance(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_cov = cov.eval()
for _ in range(10):
self.assertEqual(initial_cov, cov.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)
self.assertAlmostEqual(expected_cov, cov.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, update_op.eval())
self.assertAlmostEqual(expected_cov, cov.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 7], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 3, 1], shape=(1, 4), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(
predictions, labels, weights=weights)
expected_cov = np.cov([2, 4, 6, 8],
[1, 3, 2, 7],
fweights=[0, 1, 3, 1])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op))
self.assertAlmostEqual(expected_cov, cov.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
class StreamingPearsonRTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_local_variables(self, (
'pearson_r/covariance/comoment:0',
'pearson_r/covariance/count:0',
'pearson_r/covariance/mean_label:0',
'pearson_r/covariance/mean_prediction:0',
'pearson_r/variance_labels/count:0',
'pearson_r/variance_labels/comoment:0',
'pearson_r/variance_labels/mean_label:0',
'pearson_r/variance_labels/mean_prediction:0',
'pearson_r/variance_predictions/comoment:0',
'pearson_r/variance_predictions/count:0',
'pearson_r/variance_predictions/mean_label:0',
'pearson_r/variance_predictions/mean_prediction:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_r = pearson_r.eval()
for _ in range(10):
self.assertEqual(initial_r, pearson_r.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op), 5)
self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, update_op.eval())
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = np.array([2, 4, 6, 8])
labels = np.array([1, 3, 2, 7])
weights = np.array([0, 1, 3, 1])
predictions_t = constant_op.constant(
predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels_t = constant_op.constant(
labels, shape=(1, 4), dtype=dtypes_lib.float32)
weights_t = constant_op.constant(
weights, shape=(1, 4), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
cmat = np.cov(predictions, labels, fweights=weights)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op))
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
expected_r = np.corrcoef(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
cmat = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndSingletonBatches(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
weights = (np.arange(n).reshape(n//stride, stride) % stride == 0)
for row in weights:
np.random.shuffle(row)
# Now, weights is one-hot by row - one item per batch has non-zero weight.
weights = weights.reshape((n,))
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
cmat = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
actual_r = sess.run(update_op, feed_dict=feed_dict)
self.assertEqual(np.isnan(expected_r), np.isnan(actual_r))
self.assertEqual(np.isnan(expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(expected_r):
self.assertAlmostEqual(
expected_r, actual_r, 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
class StreamingMeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_local_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_percentage_less(values=array_ops.ones((10,)), threshold=2)
_assert_local_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class StreamingMeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_local_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(predictions, labels, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=2)
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes=num_classes)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_miou = miou.eval()
for _ in range(10):
self.assertEqual(initial_miou, miou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0, 0.])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
weights = array_ops.concat(
[
constant_op.constant(
0, shape=[1]), constant_op.constant(
1, shape=[8]), constant_op.constant(
0, shape=[1])
],
0)
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
class StreamingConcatTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_concat(values=array_ops.ones((10,)))
_assert_local_variables(self, (
'streaming_concat/array:0',
'streaming_concat/size:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
value, _ = metrics.streaming_concat(
values=array_ops.ones((10,)), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [value])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_concat(
values=array_ops.ones((10,)), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testNextArraySize(self):
next_array_size = metric_ops._next_array_size # pylint: disable=protected-access
with self.test_session():
self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)
self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)
self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)
def testStreamingConcat(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())
sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
self.assertAllEqual(np.arange(10), concatenated.eval())
def testStreamingConcatStringValues(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.string, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertItemsEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: ['a', 'b', 'c']})
self.assertItemsEqual([b'a', b'b', b'c'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['d', 'e']})
self.assertItemsEqual([b'a', b'b', b'c', b'd', b'e'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['f', 'g', 'h', 'i', 'j']})
self.assertItemsEqual(
[b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j'],
concatenated.eval())
def testStreamingConcatMaxSize(self):
with self.test_session() as sess:
values = math_ops.range(3)
concatenated, update_op = metrics.streaming_concat(values, max_size=5)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
def testStreamingConcat2D(self):
with self.test_session() as sess:
values = array_ops.reshape(math_ops.range(3), (3, 1))
concatenated, update_op = metrics.streaming_concat(values, axis=-1)
sess.run(variables.local_variables_initializer())
for _ in range(10):
sess.run([update_op])
self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10], concatenated.eval())
def testStreamingConcatErrors(self):
with self.assertRaises(ValueError):
metrics.streaming_concat(array_ops.placeholder(dtypes_lib.float32))
values = array_ops.zeros((2, 3))
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=-3, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=2, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(
array_ops.placeholder(dtypes_lib.float32, [None, None]))
def testStreamingConcatReset(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run(variables.local_variables_initializer())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([3, 4], concatenated.eval())
class AggregateMetricsTest(test.TestCase):
def testAggregateNoMetricsRaisesValueError(self):
with self.assertRaises(ValueError):
metrics.aggregate_metrics()
def testAggregateSingleMetricReturnsOneItemLists(self):
values = array_ops.ones((10, 4))
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean(values))
self.assertEqual(len(value_tensors), 1)
self.assertEqual(len(update_ops), 1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, update_ops[0].eval())
self.assertEqual(1, value_tensors[0].eval())
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean_absolute_error(predictions, labels),
metrics.streaming_mean_squared_error(predictions, labels))
self.assertEqual(len(value_tensors), 2)
self.assertEqual(len(update_ops), 2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, update_ops[0].eval())
self.assertEqual(4, update_ops[1].eval())
self.assertEqual(2, value_tensors[0].eval())
self.assertEqual(4, value_tensors[1].eval())
class AggregateMetricMapTest(test.TestCase):
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
names_to_values, names_to_updates = metrics.aggregate_metric_map({
'm1': metrics.streaming_mean_absolute_error(predictions, labels),
'm2': metrics.streaming_mean_squared_error(predictions, labels),
})
self.assertEqual(2, len(names_to_values))
self.assertEqual(2, len(names_to_updates))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, names_to_updates['m1'].eval())
self.assertEqual(4, names_to_updates['m2'].eval())
self.assertEqual(2, names_to_values['m1'].eval())
self.assertEqual(4, names_to_values['m2'].eval())
if __name__ == '__main__':
test.main()
| apache-2.0 | 2,265,827,565,293,071,600 | 38.277841 | 85 | 0.594156 | false |
KeyWeeUsr/kivy | kivy/tests/test_doc_gallery.py | 17 | 1278 | from doc.gallery import *
def test_parse_docstring_info():
assert 'error' in parse_docstring_info("No Docstring")
assert 'error' in parse_docstring_info("'''No Docstring Title'''")
assert 'error' in parse_docstring_info(
"'''No Sentence\n======\nPeriods'''"
)
assert 'error' in parse_docstring_info(
"'\nSingle Quotes\n===\n\nNo singles.'")
d = parse_docstring_info("""'''
3D Rendering Monkey Head
========================
This example demonstrates using OpenGL to display a
rotating monkey head. This
includes loading a Blender OBJ file, shaders written in OpenGL's Shading
Language (GLSL), and using scheduled callbacks.
The file monkey.obj is a OBJ file output form the Blender free 3D creation
software. The file is text, listing vertices and faces. It is loaded
into a scene using objloader.py's ObjFile class. The file simple.glsl is
a simple vertex and fragment shader written in GLSL.
'''
blah blah
blah blah
""")
assert 'error' not in d
assert '3D Rendering' in d['docstring'] and \
'This example' in d['docstring']
assert '3D Rendering' in d['title']
assert 'monkey head' in d['first_sentence']
if __name__ == '__main__':
test_parse_docstring_info()
| mit | -8,511,310,327,823,185,000 | 32.631579 | 74 | 0.654147 | false |
jtyr/ansible-modules-core | cloud/openstack/_quantum_floating_ip_associate.py | 6 | 8225 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import time
try:
from novaclient.v1_1 import client as nova_client
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_floating_ip_associate
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_floating_ip instead
short_description: Associate or disassociate a particular floating IP with an instance
description:
- Associates or disassociates a specific floating IP with a particular instance
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- the tenant name of the login user
required: true
default: true
auth_url:
description:
- the keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- name of the region
required: false
default: None
state:
description:
- indicates the desired state of the resource
choices: ['present', 'absent']
default: present
instance_name:
description:
- name of the instance to which the public IP should be assigned
required: true
default: None
ip_address:
description:
- floating ip that should be assigned to the instance
required: true
default: None
requirements:
- "python >= 2.6"
- "python-novaclient"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
- name: Associate a specific floating IP with an Instance
quantum_floating_ip_associate:
state: present
login_username: admin
login_password: admin
login_tenant_name: admin
ip_address: 1.1.1.1
instance_name: vm1
'''
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception as e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception as e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception as e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _get_server_state(module, nova):
server_info = None
server = None
try:
for server in nova.servers.list():
if server:
info = server._info
if info['name'] == module.params['instance_name']:
if info['status'] != 'ACTIVE' and module.params['state'] == 'present':
module.fail_json(msg="The VM is available but not Active. state:" + info['status'])
server_info = info
break
except Exception as e:
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
return server_info, server
def _get_port_id(neutron, module, instance_id):
kwargs = dict(device_id = instance_id)
try:
ports = neutron.list_ports(**kwargs)
except Exception as e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
return ports['ports'][0]['id']
def _get_floating_ip_id(module, neutron):
kwargs = {
'floating_ip_address': module.params['ip_address']
}
try:
ips = neutron.list_floatingips(**kwargs)
except Exception as e:
module.fail_json(msg = "error in fetching the floatingips's %s" % e.message)
if not ips['floatingips']:
module.fail_json(msg = "Could find the ip specified in parameter, Please check")
ip = ips['floatingips'][0]['id']
if not ips['floatingips'][0]['port_id']:
state = "detached"
else:
state = "attached"
return state, ip
def _update_floating_ip(neutron, module, port_id, floating_ip_id):
kwargs = {
'port_id': port_id
}
try:
result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs})
except Exception as e:
module.fail_json(msg = "There was an error in updating the floating ip address: %s" % e.message)
module.exit_json(changed = True, result = result, public_ip=module.params['ip_address'])
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=True),
instance_name = dict(required=True),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-novaclient, python-keystoneclient, and either python-neutronclient or python-quantumclient are required')
try:
nova = nova_client.Client(module.params['login_username'], module.params['login_password'],
module.params['login_tenant_name'], module.params['auth_url'], service_type='compute')
except Exception as e:
module.fail_json( msg = " Error in authenticating to nova: %s" % e.message)
neutron = _get_neutron_client(module, module.params)
state, floating_ip_id = _get_floating_ip_id(module, neutron)
if module.params['state'] == 'present':
if state == 'attached':
module.exit_json(changed = False, result = 'attached', public_ip=module.params['ip_address'])
server_info, server_obj = _get_server_state(module, nova)
if not server_info:
module.fail_json(msg = " The instance name provided cannot be found")
port_id = _get_port_id(neutron, module, server_info['id'])
if not port_id:
module.fail_json(msg = "Cannot find a port for this instance, maybe fixed ip is not assigned")
_update_floating_ip(neutron, module, port_id, floating_ip_id)
if module.params['state'] == 'absent':
if state == 'detached':
module.exit_json(changed = False, result = 'detached')
if state == 'attached':
_update_floating_ip(neutron, module, None, floating_ip_id)
module.exit_json(changed = True, result = "detached")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 | 3,004,950,790,051,463,700 | 34.917031 | 142 | 0.634772 | false |
boada/photometrypipeline | pp_combine.py | 2 | 13483 | #!/usr/bin/env python3
""" PP_COMBINE - combine frames based on wcs
v1.0: 2017-10-03, [email protected]
"""
from __future__ import print_function, division
# Photometry Pipeline
# Copyright (C) 2016-2018 Michael Mommert, [email protected]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import numpy
import os
import sys
import shutil
import logging
import subprocess
import argparse
import shlex
import time
from astropy.io import fits
from past.utils import old_div
from astroquery.jplhorizons import Horizons
# pipeline-specific modules
import _pp_conf
import toolbox
# create a portable DEVNULL
# necessary to prevent subprocess.PIPE and STDOUT from clogging if
# Source Extractor runs for too long
try:
from subprocess import DEVNULL # Py3
except ImportError:
import os # Py2
DEVNULL = open(os.devnull, 'wb')
# only import if Python3 is used
if sys.version_info > (3, 0):
from builtins import str
# setup logging
logging.basicConfig(filename=_pp_conf.log_filename,
level=_pp_conf.log_level,
format=_pp_conf.log_formatline,
datefmt=_pp_conf.log_datefmt)
def combine(filenames, obsparam, comoving, targetname,
manual_rates, combine_method, keep_files,
backsub=False, display=True, diagnostics=True):
"""
image combination wrapper
output: diagnostic properties
"""
# start logging
logging.info('starting image combination with parameters: %s' %
(', '.join([('%s: %s' % (var, str(val))) for
var, val in list(locals().items())])))
# check if images have been run through pp_prepare
try:
midtime_jd = fits.open(filenames[0], verify='silentfix',
ignore_missing_end=True)[0].header['MIDTIMJD']
except KeyError:
raise KeyError(('%s image header incomplete, have the data run ' +
'through pp_prepare?') % filenames[0])
return None
# adopt first frame as reference frame
hdulist = fits.open(filenames[0])
header = hdulist[0].header
refdate = float(header['MIDTIMJD'])
# read out ra and dec from header
if obsparam['radec_separator'] == 'XXX':
ref_ra_deg = float(header[obsparam['ra']])
ref_dec_deg = float(header[obsparam['dec']])
if obsparam['telescope_keyword'] == 'UKIRTWFCAM':
ref_ra_deg = ref_ra_deg/24.*360. - 795/3600.
ref_dec_deg -= 795/3600.
else:
ra_string = header[obsparam['ra']].split(
obsparam['radec_separator'])
dec_string = header[obsparam['dec']].split(
obsparam['radec_separator'])
ref_ra_deg = 15.*(float(ra_string[0]) +
old_div(float(ra_string[1]), 60.) +
old_div(float(ra_string[2]), 3600.))
ref_dec_deg = (abs(float(dec_string[0])) +
old_div(float(dec_string[1]), 60.) +
old_div(float(dec_string[2]), 3600.))
if dec_string[0].find('-') > -1:
ref_dec_deg = -1 * ref_dec_deg
if obsparam['telescope_keyword'] == 'UKIRTWFCAM':
ref_ra_deg = ref_ra_deg/24.*360.
if obsparam['telescope_keyword'] == "UKIRTWFCAM":
ref_ra_deg -= float(header['TRAOFF'])/3600
ref_dec_deg -= float(header['TDECOFF'])/3600
hdulist.close()
# modify individual frames if comoving == True
if comoving:
movingfilenames = []
# sort filenames by MIDTIMJD
mjds = []
for filename in filenames:
hdulist = fits.open(filename)
mjds.append(float(hdulist[0].header['MIDTIMJD']))
filenames = [filenames[i] for i in numpy.argsort(mjds)]
for filename in filenames:
movingfilename = filename[:filename.find('.fits')]+'_moving.fits'
print('shifting %s -> %s' % (filename, movingfilename))
logging.info('shifting %s -> %s' % (filename, movingfilename))
# read out date and pointing information
hdulist = fits.open(filename)
header = hdulist[0].header
date = hdulist[0].header['MIDTIMJD']
data = hdulist[0].data
hdulist.close()
# use ephemerides from Horizons if no manual rates are provided
if manual_rates is None:
# call HORIZONS to get target coordinates
obj = Horizons(targetname.replace('_', ' '), epochs=date,
location=str(obsparam['observatory_code']))
try:
eph = obj.ephemerides()
n = len(eph)
except ValueError:
print('Target (%s) not an asteroid' % targetname)
logging.warning('Target (%s) not an asteroid' % targetname)
n = None
time.sleep(0.5)
if n is None or n == 0:
logging.warning('WARNING: No position from Horizons!' +
'Name (%s) correct?' % targetname)
logging.warning('HORIZONS call: %s' % eph.url)
raise(ValueError, 'no Horizons ephemerides available')
else:
logging.info('ephemerides for %s pulled from Horizons' %
targetname)
logging.info('Horizons call: %s' %
obj.uri)
target_ra, target_dec = eph[0]['RA'], eph[0]['DEC']
# get image pointing from header
if obsparam['radec_separator'] == 'XXX':
ra_deg = float(header[obsparam['ra']])
dec_deg = float(header[obsparam['dec']])
if obsparam['telescope_keyword'] == 'UKIRTWFCAM':
ra_deg = ra_deg/24.*360. - 795/3600.
dec_deg -= 795/3600.
else:
ra_string = header[obsparam['ra']].split(
obsparam['radec_separator'])
dec_string = header[obsparam['dec']].split(
obsparam['radec_separator'])
ra_deg = 15.*(float(ra_string[0]) +
old_div(float(ra_string[1]), 60.) +
old_div(float(ra_string[2]), 3600.))
dec_deg = (abs(float(dec_string[0])) +
old_div(float(dec_string[1]), 60.) +
old_div(float(dec_string[2]), 3600.))
if dec_string[0].find('-') > -1:
dec_deg = -1 * dec_deg
if filename == filenames[0]:
ref_offset_ra = target_ra - ref_ra_deg
ref_offset_dec = target_dec - ref_dec_deg
offset_ra = target_ra - ref_ra_deg - ref_offset_ra
offset_dec = target_dec - ref_dec_deg - ref_offset_dec
else:
# use manual rates (since they are provided)
offset_ra = ((float(header['MIDTIMJD'])-refdate)*86400 *
float(manual_rates[0]))/3600
offset_dec = ((float(header['MIDTIMJD'])-refdate)*86400 *
float(manual_rates[1]))/3600
logging.info('offsets in RA and Dec: %f, %f arcsec' %
(offset_ra*3600, offset_dec*3600))
crval1 = float(header['CRVAL1'])
crval2 = float(header['CRVAL2'])
# write new CRVALi keywords in different file
new_hdu = fits.PrimaryHDU(data)
new_hdu.header = header
new_hdu.header['CRVAL1'] = (crval1-offset_ra,
'updated in the moving frame of the object')
new_hdu.header['CRVAL2'] = (crval2-offset_dec,
'updated in the moving frame of the object')
movingfilenames.append(movingfilename)
new_hdu.writeto(movingfilename, overwrite=True,
output_verify='silentfix')
if comoving:
outfile_name = 'comove.fits'
fileline = " ".join(movingfilenames)
n_frames = len(movingfilenames)
else:
outfile_name = 'skycoadd.fits'
fileline = " ".join(filenames)
n_frames = len(filenames)
# run swarp on all image catalogs using different catalogs
commandline = (('swarp -combine Y -combine_type %s -delete_tmpfiles ' +
'Y -imageout_name %s -interpolate Y -subtract_back %s ' +
'-weight_type NONE -copy_keywords %s -write_xml N ' +
'-CENTER_TYPE MOST %s') %
({'median': 'MEDIAN', 'average': 'AVERAGE',
'clipped': 'CLIPPED -CLIP_AMPFRAC 0.2 -CLIP_SIGMA 0.1 '}
[combine_method],
outfile_name,
{True: 'Y', False: 'N'}[backsub],
obsparam['copy_keywords'], fileline))
logging.info('call SWARP as: %s' % commandline)
print('running SWARP to combine {:d} frames...'.format(n_frames))
try:
swarp = subprocess.Popen(shlex.split(commandline),
stdout=DEVNULL,
stderr=DEVNULL,
close_fds=True)
# do not direct stdout to subprocess.PIPE:
# for large FITS files, PIPE will clog, stalling
# subprocess.Popen
except Exception as e:
print('SWARP call:', (e))
logging.error('SWARP call:', (e))
return None
swarp.wait()
print('done!')
# remove files that are not needed anymore
if not keep_files:
if comoving:
for filename in movingfilenames:
os.remove(filename)
# update combined image header
total_exptime = 0
for filename in filenames:
hdulist = fits.open(filename)
total_exptime += float(hdulist[0].header[obsparam['exptime']])
hdulist = fits.open(outfile_name, mode='update')
hdulist[0].header[obsparam['exptime']] = (total_exptime, 'PP: cumulative')
hdulist[0].header['COMBO_N'] = (len(filenames), 'PP: N files combo')
hdulist[0].header['COMBO_M'] = (combine_method, 'PP: combo method')
hdulist[0].header['COMOVE'] = (str(comoving), 'PP: comoving?')
hdulist.flush()
return n_frames
if __name__ == '__main__':
# command line arguments
parser = argparse.ArgumentParser(description='image combination')
parser.add_argument("-comoving", action="store_true",
help='combine in moving target frame')
parser.add_argument("-targetname",
help='moving target name')
parser.add_argument("-manual_rates", help='manual rates in arcsec/s',
nargs=2)
parser.add_argument('-method',
help='combination method',
choices=['average', 'median', 'clipped'],
default='clipped')
parser.add_argument("-backsub", action="store_true",
help='subtract background in each frame ')
parser.add_argument("-keep_files", action="store_true",
help='keep intermediate files', default=False)
parser.add_argument('images', help='images to process', nargs='+')
args = parser.parse_args()
comoving = args.comoving
targetname = args.targetname
manual_rates = args.manual_rates
combine_method = args.method
backsub = args.backsub
keep_files = args.keep_files
filenames = args.images
# read telescope and filter information from fits headers
# check that they are the same for all images
instruments = []
for filename in filenames:
hdulist = fits.open(filename, ignore_missing_end=True,
verify='silentfix')
header = hdulist[0].header
for key in _pp_conf.instrument_keys:
if key in header:
instruments.append(header[key])
if len(instruments) == 0:
raise KeyError('cannot identify telescope/instrument; please update'
'_pp_conf.instrument_keys accordingly')
# assign telescope parameters (telescopes.py)
telescope = _pp_conf.instrument_identifiers[instruments[0]]
obsparam = _pp_conf.telescope_parameters[telescope]
if manual_rates is not None:
comoving = True
if comoving and targetname is None:
targetname = header[obsparam['object']]
# run image combination wrapper
combination = combine(filenames, obsparam, comoving, targetname,
manual_rates, combine_method, keep_files,
backsub, display=True, diagnostics=True)
| gpl-3.0 | -2,672,041,404,341,241,300 | 38.772861 | 84 | 0.557072 | false |
aarsan/azure-sdk-for-python | azure-servicemanagement-legacy/azure/servicemanagement/_common_error.py | 13 | 1505 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure.common import (
AzureHttpError,
)
_ERROR_CONFLICT = 'Conflict ({0})'
_ERROR_NOT_FOUND = 'Not found ({0})'
_ERROR_UNKNOWN = 'Unknown error ({0})'
_ERROR_VALUE_NONE = '{0} should not be None.'
_ERROR_ASYNC_OP_FAILURE = 'Asynchronous operation did not succeed.'
_ERROR_ASYNC_OP_TIMEOUT = 'Timed out waiting for async operation to complete.'
def _general_error_handler(http_error):
''' Simple error handler for azure.'''
message = str(http_error)
if http_error.respbody is not None:
message += '\n' + http_error.respbody.decode('utf-8-sig')
raise AzureHttpError(message, http_error.status)
def _validate_not_none(param_name, param):
if param is None:
raise ValueError(_ERROR_VALUE_NONE.format(param_name))
| apache-2.0 | 7,633,349,980,039,614,000 | 37.538462 | 78 | 0.646707 | false |
Nitaco/ansible | lib/ansible/modules/network/f5/bigip_device_group_member.py | 8 | 7464 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_device_group_member
short_description: Manages members in a device group
description:
- Manages members in a device group. Members in a device group can only
be added or removed, never updated. This is because the members are
identified by unique name values and changing that name would invalidate
the uniqueness.
version_added: 2.5
options:
name:
description:
- Specifies the name of the device that you want to add to the
device group. Often this will be the hostname of the device.
This member must be trusted by the device already. Trusting
can be done with the C(bigip_device_trust) module and the
C(peer_hostname) option to that module.
required: True
device_group:
description:
- The device group that you want to add the member to.
required: True
state:
description:
- When C(present), ensures that the device group member.
- When C(absent), ensures the device group member is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Add the current device to the "device_trust_group" device group
bigip_device_group_member:
name: "{{ inventory_hostname }}"
device_group: device_trust_group
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Add the hosts in the current scope to "device_trust_group"
bigip_device_group_member:
name: "{{ item }}"
device_group: device_trust_group
password: secret
server: lb.mydomain.com
state: present
user: admin
with_items: "{{ hostvars.keys() }}"
run_once: true
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {}
api_attributes = []
returnables = []
updatables = []
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class Changes(Parameters):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = Parameters(params=self.module.params)
self.have = None
self.changes = Changes()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def present(self):
if self.exists():
return False
else:
return self.create()
def exists(self):
parent = self.client.api.tm.cm.device_groups.device_group.load(
name=self.want.device_group
)
exists = parent.devices_s.devices.exists(name=self.want.name)
if exists:
return True
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to remove the member from the device group.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
parent = self.client.api.tm.cm.device_groups.device_group.load(
name=self.want.device_group
)
parent.devices_s.devices.create(name=self.want.name)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
parent = self.client.api.tm.cm.device_groups.device_group.load(
name=self.want.device_group
)
resource = parent.devices_s.devices.load(name=self.want.name)
if resource:
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
device_group=dict(required=True),
state=dict(
default='present',
choices=['absent', 'present']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 | -7,432,941,680,942,992,000 | 28.856 | 91 | 0.634914 | false |
sencha/chromium-spacewalk | tools/telemetry/telemetry/core/platform/profiler/perf_profiler.py | 6 | 8619 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import signal
import subprocess
import sys
import tempfile
from telemetry.core import platform
from telemetry.core import util
from telemetry.core.platform import profiler
from telemetry.core.platform.profiler import android_profiling_helper
from telemetry.util import support_binaries
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android')
from pylib.perf import perf_control # pylint: disable=F0401
_PERF_OPTIONS = [
# In perf 3.13 --call-graph requires an argument, so use the -g short-hand
# which does not.
'-g',
# Increase sampling frequency for better coverage.
'--freq', '2000',
]
_PERF_OPTIONS_ANDROID = [
# Increase priority to avoid dropping samples. Requires root.
'--realtime', '80',
]
def _NicePath(path):
rel_path = os.path.relpath(path, os.curdir)
return rel_path if len(rel_path) < len(path) else path
def _PrepareHostForPerf():
kptr_file = '/proc/sys/kernel/kptr_restrict'
with open(kptr_file) as f:
if f.read().strip() != '0':
logging.warning('Making kernel symbols unrestricted. You might have to '
'enter your password for "sudo".')
with tempfile.NamedTemporaryFile() as zero:
zero.write('0')
zero.flush()
subprocess.call(['sudo', 'cp', zero.name, kptr_file])
def _InstallPerfHost():
host = platform.GetHostPlatform()
if not host.CanLaunchApplication('perfhost'):
host.InstallApplication('perfhost')
return support_binaries.FindPath('perfhost', host.GetOSName())
class _SingleProcessPerfProfiler(object):
"""An internal class for using perf for a given process.
On android, this profiler uses pre-built binaries from AOSP.
See more details in prebuilt/android/README.txt.
"""
def __init__(self, pid, output_file, browser_backend, platform_backend,
perf_binary, perfhost_binary):
self._pid = pid
self._browser_backend = browser_backend
self._platform_backend = platform_backend
self._output_file = output_file
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
self._is_android = platform_backend.GetOSName() == 'android'
self._perfhost_binary = perfhost_binary
cmd_prefix = []
perf_args = ['record', '--pid', str(pid)]
if self._is_android:
cmd_prefix = ['adb', '-s', browser_backend.adb.device_serial(), 'shell',
perf_binary]
perf_args += _PERF_OPTIONS_ANDROID
output_file = os.path.join('/sdcard', 'perf_profiles',
os.path.basename(output_file))
self._device_output_file = output_file
browser_backend.adb.RunShellCommand(
'mkdir -p ' + os.path.dirname(self._device_output_file))
browser_backend.adb.RunShellCommand('rm -f ' + self._device_output_file)
else:
cmd_prefix = [perf_binary]
perf_args += ['--output', output_file] + _PERF_OPTIONS
self._proc = subprocess.Popen(cmd_prefix + perf_args,
stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
def CollectProfile(self):
if ('renderer' in self._output_file and
not self._is_android and
not self._platform_backend.GetCommandLine(self._pid)):
logging.warning('Renderer was swapped out during profiling. '
'To collect a full profile rerun with '
'"--extra-browser-args=--single-process"')
if self._is_android:
device = self._browser_backend.adb.device()
device.KillAll('perf', signum=signal.SIGINT, blocking=True)
self._proc.send_signal(signal.SIGINT)
exit_code = self._proc.wait()
try:
if exit_code == 128:
raise Exception(
"""perf failed with exit code 128.
Try rerunning this script under sudo or setting
/proc/sys/kernel/perf_event_paranoid to "-1".\nOutput:\n%s""" %
self._GetStdOut())
elif exit_code not in (0, -2):
raise Exception(
'perf failed with exit code %d. Output:\n%s' % (exit_code,
self._GetStdOut()))
finally:
self._tmp_output_file.close()
cmd = '%s report -n -i %s' % (_NicePath(self._perfhost_binary),
self._output_file)
if self._is_android:
device = self._browser_backend.adb.device()
device.old_interface.Adb().Pull(self._device_output_file,
self._output_file)
required_libs = \
android_profiling_helper.GetRequiredLibrariesForPerfProfile(
self._output_file)
symfs_root = os.path.dirname(self._output_file)
kallsyms = android_profiling_helper.CreateSymFs(device,
symfs_root,
required_libs,
use_symlinks=True)
cmd += ' --symfs %s --kallsyms %s' % (symfs_root, kallsyms)
for lib in required_libs:
lib = os.path.join(symfs_root, lib[1:])
if not os.path.exists(lib):
continue
objdump_path = android_profiling_helper.GetToolchainBinaryPath(
lib, 'objdump')
if objdump_path:
cmd += ' --objdump %s' % _NicePath(objdump_path)
break
print 'To view the profile, run:'
print ' ', cmd
return self._output_file
def _GetStdOut(self):
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
class PerfProfiler(profiler.Profiler):
def __init__(self, browser_backend, platform_backend, output_path, state):
super(PerfProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
process_output_file_map = self._GetProcessOutputFileMap()
self._process_profilers = []
self._perf_control = None
perf_binary = perfhost_binary = _InstallPerfHost()
try:
if platform_backend.GetOSName() == 'android':
device = browser_backend.adb.device()
perf_binary = android_profiling_helper.PrepareDeviceForPerf(device)
self._perf_control = perf_control.PerfControl(device)
self._perf_control.SetPerfProfilingMode()
else:
_PrepareHostForPerf()
for pid, output_file in process_output_file_map.iteritems():
if 'zygote' in output_file:
continue
self._process_profilers.append(
_SingleProcessPerfProfiler(
pid, output_file, browser_backend, platform_backend,
perf_binary, perfhost_binary))
except:
if self._perf_control:
self._perf_control.SetDefaultPerfMode()
raise
@classmethod
def name(cls):
return 'perf'
@classmethod
def is_supported(cls, browser_type):
if sys.platform != 'linux2':
return False
if browser_type.startswith('cros'):
return False
return True
@classmethod
def CustomizeBrowserOptions(cls, browser_type, options):
options.AppendExtraBrowserArgs([
'--no-sandbox',
'--allow-sandbox-debugging',
])
def CollectProfile(self):
if self._perf_control:
self._perf_control.SetDefaultPerfMode()
output_files = []
for single_process in self._process_profilers:
output_files.append(single_process.CollectProfile())
return output_files
@classmethod
def GetTopSamples(cls, file_name, number):
"""Parses the perf generated profile in |file_name| and returns a
{function: period} dict of the |number| hottests functions.
"""
assert os.path.exists(file_name)
with open(os.devnull, 'w') as devnull:
_InstallPerfHost()
report = subprocess.Popen(
['perfhost', 'report', '--show-total-period', '-U', '-t', '^', '-i',
file_name],
stdout=subprocess.PIPE, stderr=devnull).communicate()[0]
period_by_function = {}
for line in report.split('\n'):
if not line or line.startswith('#'):
continue
fields = line.split('^')
if len(fields) != 5:
continue
period = int(fields[1])
function = fields[4].partition(' ')[2]
function = re.sub('<.*>', '', function) # Strip template params.
function = re.sub('[(].*[)]', '', function) # Strip function params.
period_by_function[function] = period
if len(period_by_function) == number:
break
return period_by_function
| bsd-3-clause | -4,435,052,418,082,825,700 | 34.763485 | 79 | 0.623274 | false |
rickerc/nova_audit | nova/api/openstack/compute/views/limits.py | 14 | 3505 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova.openstack.common import timeutils
class ViewBuilder(object):
"""OpenStack API base limits view builder."""
def build(self, rate_limits, absolute_limits):
rate_limits = self._build_rate_limits(rate_limits)
absolute_limits = self._build_absolute_limits(absolute_limits)
output = {
"limits": {
"rate": rate_limits,
"absolute": absolute_limits,
},
}
return output
def _build_absolute_limits(self, absolute_limits):
"""Builder for absolute limits
absolute_limits should be given as a dict of limits.
For example: {"ram": 512, "gigabytes": 1024}.
"""
limit_names = {
"ram": ["maxTotalRAMSize"],
"instances": ["maxTotalInstances"],
"cores": ["maxTotalCores"],
"key_pairs": ["maxTotalKeypairs"],
"floating_ips": ["maxTotalFloatingIps"],
"metadata_items": ["maxServerMeta", "maxImageMeta"],
"injected_files": ["maxPersonality"],
"injected_file_content_bytes": ["maxPersonalitySize"],
"security_groups": ["maxSecurityGroups"],
"security_group_rules": ["maxSecurityGroupRules"],
}
limits = {}
for name, value in absolute_limits.iteritems():
if name in limit_names and value is not None:
for name in limit_names[name]:
limits[name] = value
return limits
def _build_rate_limits(self, rate_limits):
limits = []
for rate_limit in rate_limits:
_rate_limit_key = None
_rate_limit = self._build_rate_limit(rate_limit)
# check for existing key
for limit in limits:
if (limit["uri"] == rate_limit["URI"] and
limit["regex"] == rate_limit["regex"]):
_rate_limit_key = limit
break
# ensure we have a key if we didn't find one
if not _rate_limit_key:
_rate_limit_key = {
"uri": rate_limit["URI"],
"regex": rate_limit["regex"],
"limit": [],
}
limits.append(_rate_limit_key)
_rate_limit_key["limit"].append(_rate_limit)
return limits
def _build_rate_limit(self, rate_limit):
_get_utc = datetime.datetime.utcfromtimestamp
next_avail = _get_utc(rate_limit["resetTime"])
return {
"verb": rate_limit["verb"],
"value": rate_limit["value"],
"remaining": int(rate_limit["remaining"]),
"unit": rate_limit["unit"],
"next-available": timeutils.isotime(at=next_avail),
}
| apache-2.0 | 1,521,693,284,542,638,600 | 34.05 | 78 | 0.564907 | false |
tommyip/zulip | zerver/lib/bugdown/api_code_examples.py | 1 | 10016 | import re
import json
import inspect
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from typing import Any, Dict, Optional, List, Tuple
import markdown
import zerver.openapi.python_examples
from zerver.lib.openapi import get_openapi_fixture, openapi_spec, \
get_openapi_parameters
MACRO_REGEXP = re.compile(r'\{generate_code_example(\(\s*(.+?)\s*\))*\|\s*(.+?)\s*\|\s*(.+?)\s*(\(\s*(.+)\s*\))?\}')
CODE_EXAMPLE_REGEX = re.compile(r'\# \{code_example\|\s*(.+?)\s*\}')
PYTHON_CLIENT_CONFIG = """
#!/usr/bin/env python3
import zulip
# Pass the path to your zuliprc file here.
client = zulip.Client(config_file="~/zuliprc")
"""
PYTHON_CLIENT_ADMIN_CONFIG = """
#!/usr/bin/env python
import zulip
# The user for this zuliprc file must be an organization administrator
client = zulip.Client(config_file="~/zuliprc-admin")
"""
DEFAULT_API_URL = "localhost:9991/api"
DEFAULT_AUTH_EMAIL = "BOT_EMAIL_ADDRESS"
DEFAULT_AUTH_API_KEY = "BOT_API_KEY"
DEFAULT_EXAMPLE = {
"integer": 1,
"string": "demo",
"boolean": False,
}
def parse_language_and_options(input_str: Optional[str]) -> Tuple[str, Dict[str, Any]]:
if not input_str:
return ("", {})
language_and_options = re.match(r"(?P<language>\w+)(,\s*(?P<options>[\"\'\w\d\[\],= ]+))?", input_str)
assert(language_and_options is not None)
kwargs_pattern = re.compile(r"(?P<key>\w+)\s*=\s*(?P<value>[\'\"\w\d]+|\[[\'\",\w\d ]+\])")
language = language_and_options.group("language")
assert(language is not None)
if language_and_options.group("options"):
_options = kwargs_pattern.finditer(language_and_options.group("options"))
options = {}
for m in _options:
options[m.group("key")] = json.loads(m.group("value").replace("'", '"'))
return (language, options)
return (language, {})
def extract_python_code_example(source: List[str], snippet: List[str]) -> List[str]:
start = -1
end = -1
for line in source:
match = CODE_EXAMPLE_REGEX.search(line)
if match:
if match.group(1) == 'start':
start = source.index(line)
elif match.group(1) == 'end':
end = source.index(line)
break
if (start == -1 and end == -1):
return snippet
snippet.extend(source[start + 1: end])
snippet.append(' print(result)')
snippet.append('\n')
source = source[end + 1:]
return extract_python_code_example(source, snippet)
def render_python_code_example(function: str, admin_config: Optional[bool]=False) -> List[str]:
method = zerver.openapi.python_examples.TEST_FUNCTIONS[function]
function_source_lines = inspect.getsourcelines(method)[0]
if admin_config:
config = PYTHON_CLIENT_ADMIN_CONFIG.splitlines()
else:
config = PYTHON_CLIENT_CONFIG.splitlines()
snippet = extract_python_code_example(function_source_lines, [])
code_example = []
code_example.append('```python')
code_example.extend(config)
for line in snippet:
# Remove one level of indentation and strip newlines
code_example.append(line[4:].rstrip())
code_example.append('```')
return code_example
def curl_method_arguments(endpoint: str, method: str,
api_url: str) -> List[str]:
method = method.upper()
url = "{}/v1{}".format(api_url, endpoint)
valid_methods = ["GET", "POST", "DELETE", "PUT", "PATCH", "OPTIONS"]
if method == valid_methods[0]:
# Then we need to make sure that each -d option translates to becoming
# a GET parameter (in the URL) and not a POST parameter (in the body).
# TODO: remove the -X part by updating the linting rule. It's redundant.
return ["-X", "GET", "-G", url]
elif method in valid_methods:
return ["-X", method, url]
else:
msg = "The request method {} is not one of {}".format(method,
valid_methods)
raise ValueError(msg)
def generate_curl_example(endpoint: str, method: str,
auth_email: str=DEFAULT_AUTH_EMAIL,
auth_api_key: str=DEFAULT_AUTH_API_KEY,
api_url: str=DEFAULT_API_URL,
exclude: List[str]=[]) -> List[str]:
lines = ["```curl"]
openapi_entry = openapi_spec.spec()['paths'][endpoint][method.lower()]
curl_first_line_parts = ["curl"] + curl_method_arguments(endpoint, method,
api_url)
lines.append(" ".join(curl_first_line_parts))
authentication_required = openapi_entry.get("security", False)
if authentication_required:
lines.append(" -u %s:%s" % (auth_email, auth_api_key))
openapi_example_params = get_openapi_parameters(endpoint, method)
for packet in openapi_example_params:
param_name = packet["name"]
if param_name in exclude:
continue
param_type = packet["schema"]["type"]
if param_type in ["object", "array"]:
example_value = packet.get("example", None)
if not example_value:
msg = """All array and object type request parameters must have
concrete examples. The openAPI documentation for {}/{} is missing an example
value for the {} parameter. Without this we cannot automatically generate a
cURL example.""".format(endpoint, method, param_name)
raise ValueError(msg)
ordered_ex_val_str = json.dumps(example_value, sort_keys=True)
line = " --data-urlencode {}='{}'".format(param_name, ordered_ex_val_str)
else:
example_value = packet.get("example", DEFAULT_EXAMPLE[param_type])
if type(example_value) == bool:
example_value = str(example_value).lower()
line = " -d '{}={}'".format(param_name, example_value)
lines.append(line)
for i in range(1, len(lines)-1):
lines[i] = lines[i] + " \\"
lines.append("```")
return lines
def render_curl_example(function: str, exclude: List[str]=[]) -> List[str]:
""" A simple wrapper around generate_curl_example. """
parts = function.split(":")
endpoint = parts[0]
method = parts[1]
kwargs = dict() # type: Dict[str, Any]
if len(parts) > 2:
kwargs["auth_email"] = parts[2]
if len(parts) > 3:
kwargs["auth_api_key"] = parts[3]
if len(parts) > 4:
kwargs["api_url"] = parts[4]
kwargs["exclude"] = exclude
return generate_curl_example(endpoint, method, **kwargs)
SUPPORTED_LANGUAGES = {
'python': {
'client_config': PYTHON_CLIENT_CONFIG,
'admin_config': PYTHON_CLIENT_ADMIN_CONFIG,
'render': render_python_code_example,
},
'curl': {
'render': render_curl_example
}
} # type: Dict[str, Any]
class APICodeExamplesGenerator(Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
md.preprocessors.add(
'generate_code_example', APICodeExamplesPreprocessor(md, self.getConfigs()), '_begin'
)
class APICodeExamplesPreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
super(APICodeExamplesPreprocessor, self).__init__(md)
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = MACRO_REGEXP.search(line)
if match:
language, options = parse_language_and_options(match.group(2))
function = match.group(3)
key = match.group(4)
argument = match.group(6)
if key == 'fixture':
if argument:
text = self.render_fixture(function, name=argument)
else:
text = self.render_fixture(function)
elif key == 'example':
if argument == 'admin_config=True':
text = SUPPORTED_LANGUAGES[language]['render'](function, admin_config=True)
else:
text = SUPPORTED_LANGUAGES[language]['render'](function, **options)
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = MACRO_REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding] + text + [following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def render_fixture(self, function: str, name: Optional[str]=None) -> List[str]:
fixture = []
# We assume that if the function we're rendering starts with a slash
# it's a path in the endpoint and therefore it uses the new OpenAPI
# format.
if function.startswith('/'):
path, method = function.rsplit(':', 1)
fixture_dict = get_openapi_fixture(path, method, name)
else:
fixture_dict = zerver.openapi.python_examples.FIXTURES[function]
fixture_json = json.dumps(fixture_dict, indent=4, sort_keys=True,
separators=(',', ': '))
fixture.append('```')
fixture.extend(fixture_json.splitlines())
fixture.append('```')
return fixture
def makeExtension(*args: Any, **kwargs: str) -> APICodeExamplesGenerator:
return APICodeExamplesGenerator(**kwargs)
| apache-2.0 | -3,526,686,930,778,416,000 | 36.796226 | 116 | 0.577476 | false |
binhex/moviegrabber | lib/site-packages/cherrypy/_cpchecker.py | 87 | 14739 | import os
import warnings
import cherrypy
from cherrypy._cpcompat import iteritems, copykeys, builtins
class Checker(object):
"""A checker for CherryPy sites and their mounted applications.
When this object is called at engine startup, it executes each
of its own methods whose names start with ``check_``. If you wish
to disable selected checks, simply add a line in your global
config which sets the appropriate method to False::
[global]
checker.check_skipped_app_config = False
You may also dynamically add or replace ``check_*`` methods in this way.
"""
on = True
"""If True (the default), run all checks; if False, turn off all checks."""
def __init__(self):
self._populate_known_types()
def __call__(self):
"""Run all check_* methods."""
if self.on:
oldformatwarning = warnings.formatwarning
warnings.formatwarning = self.formatwarning
try:
for name in dir(self):
if name.startswith("check_"):
method = getattr(self, name)
if method and hasattr(method, '__call__'):
method()
finally:
warnings.formatwarning = oldformatwarning
def formatwarning(self, message, category, filename, lineno, line=None):
"""Function to format a warning."""
return "CherryPy Checker:\n%s\n\n" % message
# This value should be set inside _cpconfig.
global_config_contained_paths = False
def check_app_config_entries_dont_start_with_script_name(self):
"""Check for Application config with sections that repeat script_name."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
if sn == '':
continue
sn_atoms = sn.strip("/").split("/")
for key in app.config.keys():
key_atoms = key.strip("/").split("/")
if key_atoms[:len(sn_atoms)] == sn_atoms:
warnings.warn(
"The application mounted at %r has config " \
"entries that start with its script name: %r" % (sn, key))
def check_site_config_entries_in_app_config(self):
"""Check for mounted Applications that have site-scoped config."""
for sn, app in iteritems(cherrypy.tree.apps):
if not isinstance(app, cherrypy.Application):
continue
msg = []
for section, entries in iteritems(app.config):
if section.startswith('/'):
for key, value in iteritems(entries):
for n in ("engine.", "server.", "tree.", "checker."):
if key.startswith(n):
msg.append("[%s] %s = %s" % (section, key, value))
if msg:
msg.insert(0,
"The application mounted at %r contains the following "
"config entries, which are only allowed in site-wide "
"config. Move them to a [global] section and pass them "
"to cherrypy.config.update() instead of tree.mount()." % sn)
warnings.warn(os.linesep.join(msg))
def check_skipped_app_config(self):
"""Check for mounted Applications that have no config."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
msg = "The Application mounted at %r has an empty config." % sn
if self.global_config_contained_paths:
msg += (" It looks like the config you passed to "
"cherrypy.config.update() contains application-"
"specific sections. You must explicitly pass "
"application config via "
"cherrypy.tree.mount(..., config=app_config)")
warnings.warn(msg)
return
def check_app_config_brackets(self):
"""Check for Application config with extraneous brackets in section names."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
for key in app.config.keys():
if key.startswith("[") or key.endswith("]"):
warnings.warn(
"The application mounted at %r has config " \
"section names with extraneous brackets: %r. "
"Config *files* need brackets; config *dicts* "
"(e.g. passed to tree.mount) do not." % (sn, key))
def check_static_paths(self):
"""Check Application config for incorrect static paths."""
# Use the dummy Request object in the main thread.
request = cherrypy.request
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
request.app = app
for section in app.config:
# get_resource will populate request.config
request.get_resource(section + "/dummy.html")
conf = request.config.get
if conf("tools.staticdir.on", False):
msg = ""
root = conf("tools.staticdir.root")
dir = conf("tools.staticdir.dir")
if dir is None:
msg = "tools.staticdir.dir is not set."
else:
fulldir = ""
if os.path.isabs(dir):
fulldir = dir
if root:
msg = ("dir is an absolute path, even "
"though a root is provided.")
testdir = os.path.join(root, dir[1:])
if os.path.exists(testdir):
msg += ("\nIf you meant to serve the "
"filesystem folder at %r, remove "
"the leading slash from dir." % testdir)
else:
if not root:
msg = "dir is a relative path and no root provided."
else:
fulldir = os.path.join(root, dir)
if not os.path.isabs(fulldir):
msg = "%r is not an absolute path." % fulldir
if fulldir and not os.path.exists(fulldir):
if msg:
msg += "\n"
msg += ("%r (root + dir) is not an existing "
"filesystem path." % fulldir)
if msg:
warnings.warn("%s\nsection: [%s]\nroot: %r\ndir: %r"
% (msg, section, root, dir))
# -------------------------- Compatibility -------------------------- #
obsolete = {
'server.default_content_type': 'tools.response_headers.headers',
'log_access_file': 'log.access_file',
'log_config_options': None,
'log_file': 'log.error_file',
'log_file_not_found': None,
'log_request_headers': 'tools.log_headers.on',
'log_to_screen': 'log.screen',
'show_tracebacks': 'request.show_tracebacks',
'throw_errors': 'request.throw_errors',
'profiler.on': ('cherrypy.tree.mount(profiler.make_app('
'cherrypy.Application(Root())))'),
}
deprecated = {}
def _compat(self, config):
"""Process config and warn on each obsolete or deprecated entry."""
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if k in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead.\n"
"section: [%s]" %
(k, self.obsolete[k], section))
elif k in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead.\n"
"section: [%s]" %
(k, self.deprecated[k], section))
else:
if section in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead."
% (section, self.obsolete[section]))
elif section in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead."
% (section, self.deprecated[section]))
def check_compatibility(self):
"""Process config and warn on each obsolete or deprecated entry."""
self._compat(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._compat(app.config)
# ------------------------ Known Namespaces ------------------------ #
extra_config_namespaces = []
def _known_ns(self, app):
ns = ["wsgi"]
ns.extend(copykeys(app.toolboxes))
ns.extend(copykeys(app.namespaces))
ns.extend(copykeys(app.request_class.namespaces))
ns.extend(copykeys(cherrypy.config.namespaces))
ns += self.extra_config_namespaces
for section, conf in app.config.items():
is_path_section = section.startswith("/")
if is_path_section and isinstance(conf, dict):
for k, v in conf.items():
atoms = k.split(".")
if len(atoms) > 1:
if atoms[0] not in ns:
# Spit out a special warning if a known
# namespace is preceded by "cherrypy."
if (atoms[0] == "cherrypy" and atoms[1] in ns):
msg = ("The config entry %r is invalid; "
"try %r instead.\nsection: [%s]"
% (k, ".".join(atoms[1:]), section))
else:
msg = ("The config entry %r is invalid, because "
"the %r config namespace is unknown.\n"
"section: [%s]" % (k, atoms[0], section))
warnings.warn(msg)
elif atoms[0] == "tools":
if atoms[1] not in dir(cherrypy.tools):
msg = ("The config entry %r may be invalid, "
"because the %r tool was not found.\n"
"section: [%s]" % (k, atoms[1], section))
warnings.warn(msg)
def check_config_namespaces(self):
"""Process config and warn on each unknown config namespace."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_ns(app)
# -------------------------- Config Types -------------------------- #
known_config_types = {}
def _populate_known_types(self):
b = [x for x in vars(builtins).values()
if type(x) is type(str)]
def traverse(obj, namespace):
for name in dir(obj):
# Hack for 3.2's warning about body_params
if name == 'body_params':
continue
vtype = type(getattr(obj, name, None))
if vtype in b:
self.known_config_types[namespace + "." + name] = vtype
traverse(cherrypy.request, "request")
traverse(cherrypy.response, "response")
traverse(cherrypy.server, "server")
traverse(cherrypy.engine, "engine")
traverse(cherrypy.log, "log")
def _known_types(self, config):
msg = ("The config entry %r in section %r is of type %r, "
"which does not match the expected type %r.")
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
else:
k, v = section, conf
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
def check_config_types(self):
"""Assert that config values are of the same type as default values."""
self._known_types(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_types(app.config)
# -------------------- Specific config warnings -------------------- #
def check_localhost(self):
"""Warn if any socket_host is 'localhost'. See #711."""
for k, v in cherrypy.config.items():
if k == 'server.socket_host' and v == 'localhost':
warnings.warn("The use of 'localhost' as a socket host can "
"cause problems on newer systems, since 'localhost' can "
"map to either an IPv4 or an IPv6 address. You should "
"use '127.0.0.1' or '[::1]' instead.")
| gpl-3.0 | -1,075,984,245,209,143,600 | 44.073394 | 85 | 0.471063 | false |
nitishaggarwal/wed | wed/users/views.py | 55 | 1681 | # -*- coding: utf-8 -*-
# Import the reverse lookup function
from django.core.urlresolvers import reverse
# view imports
from django.views.generic import DetailView
from django.views.generic import RedirectView
from django.views.generic import UpdateView
from django.views.generic import ListView
# Only authenticated users can access views using this.
from braces.views import LoginRequiredMixin
# Import the form from users/forms.py
from .forms import UserForm
# Import the customized User model
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
form_class = UserForm
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username" | bsd-3-clause | 4,551,063,443,299,605,500 | 28.508772 | 69 | 0.728138 | false |
mcking49/apache-flask | Python/Lib/htmllib.py | 312 | 12869 | """HTML 2.0 parser.
See the HTML 2.0 specification:
http://www.w3.org/hypertext/WWW/MarkUp/html-spec/html-spec_toc.html
"""
from warnings import warnpy3k
warnpy3k("the htmllib module has been removed in Python 3.0",
stacklevel=2)
del warnpy3k
import sgmllib
from formatter import AS_IS
__all__ = ["HTMLParser", "HTMLParseError"]
class HTMLParseError(sgmllib.SGMLParseError):
"""Error raised when an HTML document can't be parsed."""
class HTMLParser(sgmllib.SGMLParser):
"""This is the basic HTML parser class.
It supports all entity names required by the XHTML 1.0 Recommendation.
It also defines handlers for all HTML 2.0 and many HTML 3.0 and 3.2
elements.
"""
from htmlentitydefs import entitydefs
def __init__(self, formatter, verbose=0):
"""Creates an instance of the HTMLParser class.
The formatter parameter is the formatter instance associated with
the parser.
"""
sgmllib.SGMLParser.__init__(self, verbose)
self.formatter = formatter
def error(self, message):
raise HTMLParseError(message)
def reset(self):
sgmllib.SGMLParser.reset(self)
self.savedata = None
self.isindex = 0
self.title = None
self.base = None
self.anchor = None
self.anchorlist = []
self.nofill = 0
self.list_stack = []
# ------ Methods used internally; some may be overridden
# --- Formatter interface, taking care of 'savedata' mode;
# shouldn't need to be overridden
def handle_data(self, data):
if self.savedata is not None:
self.savedata = self.savedata + data
else:
if self.nofill:
self.formatter.add_literal_data(data)
else:
self.formatter.add_flowing_data(data)
# --- Hooks to save data; shouldn't need to be overridden
def save_bgn(self):
"""Begins saving character data in a buffer instead of sending it
to the formatter object.
Retrieve the stored data via the save_end() method. Use of the
save_bgn() / save_end() pair may not be nested.
"""
self.savedata = ''
def save_end(self):
"""Ends buffering character data and returns all data saved since
the preceding call to the save_bgn() method.
If the nofill flag is false, whitespace is collapsed to single
spaces. A call to this method without a preceding call to the
save_bgn() method will raise a TypeError exception.
"""
data = self.savedata
self.savedata = None
if not self.nofill:
data = ' '.join(data.split())
return data
# --- Hooks for anchors; should probably be overridden
def anchor_bgn(self, href, name, type):
"""This method is called at the start of an anchor region.
The arguments correspond to the attributes of the <A> tag with
the same names. The default implementation maintains a list of
hyperlinks (defined by the HREF attribute for <A> tags) within
the document. The list of hyperlinks is available as the data
attribute anchorlist.
"""
self.anchor = href
if self.anchor:
self.anchorlist.append(href)
def anchor_end(self):
"""This method is called at the end of an anchor region.
The default implementation adds a textual footnote marker using an
index into the list of hyperlinks created by the anchor_bgn()method.
"""
if self.anchor:
self.handle_data("[%d]" % len(self.anchorlist))
self.anchor = None
# --- Hook for images; should probably be overridden
def handle_image(self, src, alt, *args):
"""This method is called to handle images.
The default implementation simply passes the alt value to the
handle_data() method.
"""
self.handle_data(alt)
# --------- Top level elememts
def start_html(self, attrs): pass
def end_html(self): pass
def start_head(self, attrs): pass
def end_head(self): pass
def start_body(self, attrs): pass
def end_body(self): pass
# ------ Head elements
def start_title(self, attrs):
self.save_bgn()
def end_title(self):
self.title = self.save_end()
def do_base(self, attrs):
for a, v in attrs:
if a == 'href':
self.base = v
def do_isindex(self, attrs):
self.isindex = 1
def do_link(self, attrs):
pass
def do_meta(self, attrs):
pass
def do_nextid(self, attrs): # Deprecated
pass
# ------ Body elements
# --- Headings
def start_h1(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h1', 0, 1, 0))
def end_h1(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h2(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h2', 0, 1, 0))
def end_h2(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h3(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h3', 0, 1, 0))
def end_h3(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h4(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h4', 0, 1, 0))
def end_h4(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h5(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h5', 0, 1, 0))
def end_h5(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h6(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h6', 0, 1, 0))
def end_h6(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
# --- Block Structuring Elements
def do_p(self, attrs):
self.formatter.end_paragraph(1)
def start_pre(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
self.nofill = self.nofill + 1
def end_pre(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
self.nofill = max(0, self.nofill - 1)
def start_xmp(self, attrs):
self.start_pre(attrs)
self.setliteral('xmp') # Tell SGML parser
def end_xmp(self):
self.end_pre()
def start_listing(self, attrs):
self.start_pre(attrs)
self.setliteral('listing') # Tell SGML parser
def end_listing(self):
self.end_pre()
def start_address(self, attrs):
self.formatter.end_paragraph(0)
self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
def end_address(self):
self.formatter.end_paragraph(0)
self.formatter.pop_font()
def start_blockquote(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_margin('blockquote')
def end_blockquote(self):
self.formatter.end_paragraph(1)
self.formatter.pop_margin()
# --- List Elements
def start_ul(self, attrs):
self.formatter.end_paragraph(not self.list_stack)
self.formatter.push_margin('ul')
self.list_stack.append(['ul', '*', 0])
def end_ul(self):
if self.list_stack: del self.list_stack[-1]
self.formatter.end_paragraph(not self.list_stack)
self.formatter.pop_margin()
def do_li(self, attrs):
self.formatter.end_paragraph(0)
if self.list_stack:
[dummy, label, counter] = top = self.list_stack[-1]
top[2] = counter = counter+1
else:
label, counter = '*', 0
self.formatter.add_label_data(label, counter)
def start_ol(self, attrs):
self.formatter.end_paragraph(not self.list_stack)
self.formatter.push_margin('ol')
label = '1.'
for a, v in attrs:
if a == 'type':
if len(v) == 1: v = v + '.'
label = v
self.list_stack.append(['ol', label, 0])
def end_ol(self):
if self.list_stack: del self.list_stack[-1]
self.formatter.end_paragraph(not self.list_stack)
self.formatter.pop_margin()
def start_menu(self, attrs):
self.start_ul(attrs)
def end_menu(self):
self.end_ul()
def start_dir(self, attrs):
self.start_ul(attrs)
def end_dir(self):
self.end_ul()
def start_dl(self, attrs):
self.formatter.end_paragraph(1)
self.list_stack.append(['dl', '', 0])
def end_dl(self):
self.ddpop(1)
if self.list_stack: del self.list_stack[-1]
def do_dt(self, attrs):
self.ddpop()
def do_dd(self, attrs):
self.ddpop()
self.formatter.push_margin('dd')
self.list_stack.append(['dd', '', 0])
def ddpop(self, bl=0):
self.formatter.end_paragraph(bl)
if self.list_stack:
if self.list_stack[-1][0] == 'dd':
del self.list_stack[-1]
self.formatter.pop_margin()
# --- Phrase Markup
# Idiomatic Elements
def start_cite(self, attrs): self.start_i(attrs)
def end_cite(self): self.end_i()
def start_code(self, attrs): self.start_tt(attrs)
def end_code(self): self.end_tt()
def start_em(self, attrs): self.start_i(attrs)
def end_em(self): self.end_i()
def start_kbd(self, attrs): self.start_tt(attrs)
def end_kbd(self): self.end_tt()
def start_samp(self, attrs): self.start_tt(attrs)
def end_samp(self): self.end_tt()
def start_strong(self, attrs): self.start_b(attrs)
def end_strong(self): self.end_b()
def start_var(self, attrs): self.start_i(attrs)
def end_var(self): self.end_i()
# Typographic Elements
def start_i(self, attrs):
self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
def end_i(self):
self.formatter.pop_font()
def start_b(self, attrs):
self.formatter.push_font((AS_IS, AS_IS, 1, AS_IS))
def end_b(self):
self.formatter.pop_font()
def start_tt(self, attrs):
self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
def end_tt(self):
self.formatter.pop_font()
def start_a(self, attrs):
href = ''
name = ''
type = ''
for attrname, value in attrs:
value = value.strip()
if attrname == 'href':
href = value
if attrname == 'name':
name = value
if attrname == 'type':
type = value.lower()
self.anchor_bgn(href, name, type)
def end_a(self):
self.anchor_end()
# --- Line Break
def do_br(self, attrs):
self.formatter.add_line_break()
# --- Horizontal Rule
def do_hr(self, attrs):
self.formatter.add_hor_rule()
# --- Image
def do_img(self, attrs):
align = ''
alt = '(image)'
ismap = ''
src = ''
width = 0
height = 0
for attrname, value in attrs:
if attrname == 'align':
align = value
if attrname == 'alt':
alt = value
if attrname == 'ismap':
ismap = value
if attrname == 'src':
src = value
if attrname == 'width':
try: width = int(value)
except ValueError: pass
if attrname == 'height':
try: height = int(value)
except ValueError: pass
self.handle_image(src, alt, ismap, align, width, height)
# --- Really Old Unofficial Deprecated Stuff
def do_plaintext(self, attrs):
self.start_pre(attrs)
self.setnomoretags() # Tell SGML parser
# --- Unhandled tags
def unknown_starttag(self, tag, attrs):
pass
def unknown_endtag(self, tag):
pass
def test(args = None):
import sys, formatter
if not args:
args = sys.argv[1:]
silent = args and args[0] == '-s'
if silent:
del args[0]
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
if silent:
f = formatter.NullFormatter()
else:
f = formatter.AbstractFormatter(formatter.DumbWriter())
p = HTMLParser(f)
p.feed(data)
p.close()
if __name__ == '__main__':
test()
| mit | 3,196,804,299,953,962,500 | 25.209776 | 76 | 0.572383 | false |
rbalda/neural_ocr | env/lib/python2.7/site-packages/scipy/optimize/tests/test_nonlin.py | 67 | 15160 | """ Unit tests for nonlinear solvers
Author: Ondrej Certik
May 2007
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_, dec, TestCase, run_module_suite
from scipy._lib.six import xrange
from scipy.optimize import nonlin, root
from numpy import matrix, diag, dot
from numpy.linalg import inv
import numpy as np
from test_minpack import pressure_network
SOLVERS = {'anderson': nonlin.anderson, 'diagbroyden': nonlin.diagbroyden,
'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing,
'broyden1': nonlin.broyden1, 'broyden2': nonlin.broyden2,
'krylov': nonlin.newton_krylov}
MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1,
'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov}
#-------------------------------------------------------------------------------
# Test problems
#-------------------------------------------------------------------------------
def F(x):
x = np.asmatrix(x).T
d = matrix(diag([3,2,1.5,1,0.5]))
c = 0.01
f = -d*x - c*float(x.T*x)*x
return f
F.xin = [1,1,1,1,1]
F.KNOWN_BAD = {}
def F2(x):
return x
F2.xin = [1,2,3,4,5,6]
F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing}
def F2_lucky(x):
return x
F2_lucky.xin = [0,0,0,0,0,0]
F2_lucky.KNOWN_BAD = {}
def F3(x):
A = np.mat('-2 1 0; 1 -2 1; 0 1 -2')
b = np.mat('1 2 3')
return np.dot(A, x) - b
F3.xin = [1,2,3]
F3.KNOWN_BAD = {}
def F4_powell(x):
A = 1e4
return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
F4_powell.xin = [-1, -2]
F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing,
'diagbroyden': nonlin.diagbroyden}
def F5(x):
return pressure_network(x, 4, np.array([.5, .5, .5, .5]))
F5.xin = [2., 0, 2, 0]
F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
def F6(x):
x1, x2 = x
J0 = np.array([[-4.256, 14.7],
[0.8394989, 0.59964207]])
v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
np.sin(x2 * np.exp(x1) - 1)])
return -np.linalg.solve(J0, v)
F6.xin = [-0.5, 1.4]
F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
class TestNonlin(object):
"""
Check the Broyden methods for a few test problems.
broyden1, broyden2, and newton_krylov must succeed for
all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
"""
def _check_nonlin_func(self, f, func, f_tol=1e-2):
x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
assert_(np.absolute(f(x)).max() < f_tol)
def _check_root(self, f, method, f_tol=1e-2):
res = root(f, f.xin, method=method,
options={'ftol': f_tol, 'maxiter': 200, 'disp': 0})
assert_(np.absolute(res.fun).max() < f_tol)
@dec.knownfailureif(True)
def _check_func_fail(self, *a, **kw):
pass
def test_problem_nonlin(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for func in SOLVERS.values():
if func in f.KNOWN_BAD.values():
if func in MUST_WORK.values():
yield self._check_func_fail, f, func
continue
yield self._check_nonlin_func, f, func
def test_tol_norm_called(self):
# Check that supplying tol_norm keyword to nonlin_solve works
self._tol_norm_used = False
def local_norm_func(x):
self._tol_norm_used = True
return np.absolute(x).max()
nonlin.newton_krylov(F, F.xin, f_tol=1e-2, maxiter=200, verbose=0,
tol_norm=local_norm_func)
assert_(self._tol_norm_used)
def test_problem_root(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for meth in SOLVERS:
if meth in f.KNOWN_BAD:
if meth in MUST_WORK:
yield self._check_func_fail, f, meth
continue
yield self._check_root, f, meth
class TestSecant(TestCase):
"""Check that some Jacobian approximations satisfy the secant condition"""
xs = [np.array([1,2,3,4,5], float),
np.array([2,3,4,5,1], float),
np.array([3,4,5,1,2], float),
np.array([4,5,1,2,3], float),
np.array([9,1,9,1,3], float),
np.array([0,1,9,1,3], float),
np.array([5,5,7,1,1], float),
np.array([1,2,7,5,1], float),]
fs = [x**2 - 1 for x in xs]
def _check_secant(self, jac_cls, npoints=1, **kw):
"""
Check that the given Jacobian approximation satisfies secant
conditions for last `npoints` points.
"""
jac = jac_cls(**kw)
jac.setup(self.xs[0], self.fs[0], None)
for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
jac.update(x, f)
for k in xrange(min(npoints, j+1)):
dx = self.xs[j-k+1] - self.xs[j-k]
df = self.fs[j-k+1] - self.fs[j-k]
assert_(np.allclose(dx, jac.solve(df)))
# Check that the `npoints` secant bound is strict
if j >= npoints:
dx = self.xs[j-npoints+1] - self.xs[j-npoints]
df = self.fs[j-npoints+1] - self.fs[j-npoints]
assert_(not np.allclose(dx, jac.solve(df)))
def test_broyden1(self):
self._check_secant(nonlin.BroydenFirst)
def test_broyden2(self):
self._check_secant(nonlin.BroydenSecond)
def test_broyden1_update(self):
# Check that BroydenFirst update works as for a dense matrix
jac = nonlin.BroydenFirst(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
B = np.identity(5) * (-1/0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx)
jac.update(x, f)
assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
def test_broyden2_update(self):
# Check that BroydenSecond update works as for a dense matrix
jac = nonlin.BroydenSecond(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
H = np.identity(5) * (-0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df)
jac.update(x, f)
assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
def test_anderson(self):
# Anderson mixing (with w0=0) satisfies secant conditions
# for the last M iterates, see [Ey]_
#
# .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
class TestLinear(TestCase):
"""Solve a linear equation;
some methods find the exact solution in a finite number of steps"""
def _check(self, jac, N, maxiter, complex=False, **kw):
np.random.seed(123)
A = np.random.randn(N, N)
if complex:
A = A + 1j*np.random.randn(N, N)
b = np.random.randn(N)
if complex:
b = b + 1j*np.random.randn(N)
def func(x):
return dot(A, x) - b
sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter,
f_tol=1e-6, line_search=None, verbose=0)
assert_(np.allclose(dot(A, sol), b, atol=1e-6))
def test_broyden1(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
def test_broyden2(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
def test_anderson(self):
# Anderson is rather similar to Broyden, if given enough storage space
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
def test_krylov(self):
# Krylov methods solve linear systems exactly in N inner steps
self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
class TestJacobianDotSolve(object):
"""Check that solve/dot methods in Jacobian approximations are consistent"""
def _func(self, x):
return x**2 - 1 + np.dot(self.A, x)
def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
np.random.seed(123)
N = 7
def rand(*a):
q = np.random.rand(*a)
if complex:
q = q + 1j*np.random.rand(*a)
return q
def assert_close(a, b, msg):
d = abs(a - b).max()
f = tol + abs(b).max()*tol
if d > f:
raise AssertionError('%s: err %g' % (msg, d))
self.A = rand(N, N)
# initialize
x0 = np.random.rand(N)
jac = jac_cls(**kw)
jac.setup(x0, self._func(x0), self._func)
# check consistency
for k in xrange(2*N):
v = rand(N)
if hasattr(jac, '__array__'):
Jd = np.array(jac)
if hasattr(jac, 'solve'):
Gv = jac.solve(v)
Gv2 = np.linalg.solve(Jd, v)
assert_close(Gv, Gv2, 'solve vs array')
if hasattr(jac, 'rsolve'):
Gv = jac.rsolve(v)
Gv2 = np.linalg.solve(Jd.T.conj(), v)
assert_close(Gv, Gv2, 'rsolve vs array')
if hasattr(jac, 'matvec'):
Jv = jac.matvec(v)
Jv2 = np.dot(Jd, v)
assert_close(Jv, Jv2, 'dot vs array')
if hasattr(jac, 'rmatvec'):
Jv = jac.rmatvec(v)
Jv2 = np.dot(Jd.T.conj(), v)
assert_close(Jv, Jv2, 'rmatvec vs array')
if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
Jv = jac.matvec(v)
Jv2 = jac.solve(jac.matvec(Jv))
assert_close(Jv, Jv2, 'dot vs solve')
if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
Jv = jac.rmatvec(v)
Jv2 = jac.rmatvec(jac.rsolve(Jv))
assert_close(Jv, Jv2, 'rmatvec vs rsolve')
x = rand(N)
jac.update(x, self._func(x))
def test_broyden1(self):
self._check_dot(nonlin.BroydenFirst, complex=False)
self._check_dot(nonlin.BroydenFirst, complex=True)
def test_broyden2(self):
self._check_dot(nonlin.BroydenSecond, complex=False)
self._check_dot(nonlin.BroydenSecond, complex=True)
def test_anderson(self):
self._check_dot(nonlin.Anderson, complex=False)
self._check_dot(nonlin.Anderson, complex=True)
def test_diagbroyden(self):
self._check_dot(nonlin.DiagBroyden, complex=False)
self._check_dot(nonlin.DiagBroyden, complex=True)
def test_linearmixing(self):
self._check_dot(nonlin.LinearMixing, complex=False)
self._check_dot(nonlin.LinearMixing, complex=True)
def test_excitingmixing(self):
self._check_dot(nonlin.ExcitingMixing, complex=False)
self._check_dot(nonlin.ExcitingMixing, complex=True)
def test_krylov(self):
self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-4)
self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-4)
class TestNonlinOldTests(TestCase):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def test_broyden1(self):
x = nonlin.broyden1(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_broyden2(self):
x = nonlin.broyden2(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_anderson(self):
x = nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5)
assert_(nonlin.norm(x) < 0.33)
def test_linearmixing(self):
x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5)
assert_(nonlin.norm(x) < 1e-7)
assert_(nonlin.norm(F(x)) < 1e-7)
def test_exciting(self):
x = nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5)
assert_(nonlin.norm(x) < 1e-5)
assert_(nonlin.norm(F(x)) < 1e-5)
def test_diagbroyden(self):
x = nonlin.diagbroyden(F,F.xin,iter=11,alpha=1)
assert_(nonlin.norm(x) < 1e-8)
assert_(nonlin.norm(F(x)) < 1e-8)
def test_root_broyden1(self):
res = root(F, F.xin, method='broyden1',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_broyden2(self):
res = root(F, F.xin, method='broyden2',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_anderson(self):
res = root(F, F.xin, method='anderson',
options={'nit': 12,
'jac_options': {'alpha': 0.03, 'M': 5}})
assert_(nonlin.norm(res.x) < 0.33)
def test_root_linearmixing(self):
res = root(F, F.xin, method='linearmixing',
options={'nit': 60,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-7)
assert_(nonlin.norm(res.fun) < 1e-7)
def test_root_excitingmixing(self):
res = root(F, F.xin, method='excitingmixing',
options={'nit': 20,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-5)
assert_(nonlin.norm(res.fun) < 1e-5)
def test_root_diagbroyden(self):
res = root(F, F.xin, method='diagbroyden',
options={'nit': 11,
'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-8)
assert_(nonlin.norm(res.fun) < 1e-8)
if __name__ == "__main__":
run_module_suite()
| mit | -7,945,108,262,660,079,000 | 33.770642 | 88 | 0.538259 | false |
hydrospanner/DForurm | DForurm/env/Lib/site-packages/django/contrib/auth/base_user.py | 59 | 4973 | """
This module allows importing AbstractBaseUser even when django.contrib.auth is
not in INSTALLED_APPS.
"""
from __future__ import unicode_literals
import unicodedata
from django.contrib.auth import password_validation
from django.contrib.auth.hashers import (
check_password, is_password_usable, make_password,
)
from django.db import models
from django.utils.crypto import get_random_string, salted_hmac
from django.utils.deprecation import CallableFalse, CallableTrue
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the email address by lowercasing the domain part of it.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generate a random password with the given length and given
allowed_chars. The default value of allowed_chars does not have "I" or
"O" or letters and digits that look similar -- just to avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), blank=True, null=True)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the identifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __init__(self, *args, **kwargs):
super(AbstractBaseUser, self).__init__(*args, **kwargs)
# Stores the raw password if set_password() is called so that it can
# be passed to password_changed() after the model is saved.
self._password = None
def __str__(self):
return self.get_username()
def clean(self):
setattr(self, self.USERNAME_FIELD, self.normalize_username(self.get_username()))
def save(self, *args, **kwargs):
super(AbstractBaseUser, self).save(*args, **kwargs)
if self._password is not None:
password_validation.password_changed(self._password, self)
self._password = None
def natural_key(self):
return (self.get_username(),)
@property
def is_anonymous(self):
"""
Always return False. This is a way of comparing User objects to
anonymous users.
"""
return CallableFalse
@property
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return CallableTrue
def set_password(self, raw_password):
self.password = make_password(raw_password)
self._password = raw_password
def check_password(self, raw_password):
"""
Return a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
# Password hash upgrades shouldn't be considered password changes.
self._password = None
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Set a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_full_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_full_name() method')
def get_short_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_short_name() method.')
def get_session_auth_hash(self):
"""
Return an HMAC of the password field.
"""
key_salt = "django.contrib.auth.models.AbstractBaseUser.get_session_auth_hash"
return salted_hmac(key_salt, self.password).hexdigest()
@classmethod
def get_email_field_name(cls):
try:
return cls.EMAIL_FIELD
except AttributeError:
return 'email'
@classmethod
def normalize_username(cls, username):
return unicodedata.normalize('NFKC', force_text(username))
| mit | -6,938,767,137,227,467,000 | 32.375839 | 107 | 0.638045 | false |
Chandlercjy/OnePy | OnePy/environment.py | 1 | 5171 | import logging
from collections import defaultdict
import arrow
import OnePy as op
from OnePy.event_engine import EventEngine
from OnePy.utils.easy_func import get_day_ratio
class Environment(object):
"""作为全局共享变量为各模块提供支持"""
# general context
sys_date: str = None
sys_frequency: str = None
instrument: str = None
fromdate: str = None
todate: str = None
tickers: list = []
# general setting
execute_on_close_or_next_open: str = 'open'
is_save_original: bool = False # 是否保存原始信号的开关
is_live_trading: bool = False
is_show_today_signals: bool = False # 是否显示当前最新信号的开关
# backtest modules dict
readers: dict = {}
feeds: dict = {}
cleaners: dict = {}
cleaners_feeds: dict = {}
strategies: dict = {}
brokers: dict = {}
risk_managers: dict = {}
recorders: dict = {}
recorder = None # type: op.RecorderBase
# system memory
signals_normal: list = [] # 保存最原始的所有信号
signals_pending: list = [] # 保存最原始的所有挂单信号
signals_trigger: list = [] # 保存最原始的所有触发单信号
signals_cancel: list = [] # 保存最原始的所有挂单信号
# 动态地临时信号, 会不断刷新
signals_normal_cur: list = []
signals_pending_cur: list = []
signals_trigger_cur: list = []
signals_cancel_cur: list = []
orders_mkt_normal_cur: list = [] # 动态地保存当前订单, 会不断刷新
orders_child_of_mkt_dict: dict = {} # 动态地保存跟随市价单的挂单
orders_mkt_absolute_cur: list = [] # 动态地保存触发的挂单并成交信息, 会不断刷新
orders_mkt_submitted_cur: list = [] # 动态地保存成交单, 会不断刷新
orders_pending: list = [] # 动态地保存挂单,触发会删除
orders_cancel_cur: list = [] # 动态地保存撤单, 会不断刷新
orders_cancel_submitted_cur: list = [] # 动态地保存撤单, 会不断刷新
cur_suspended_tickers: list = [] # 动态保存当前停牌或者没更新数据的ticker
suspended_tickers_record: defaultdict = defaultdict(list) # 记录停牌
# system modules
logger = logging.getLogger("OnePy")
event_engine = EventEngine()
cache: dict = {}
@classmethod
def initialize_env(cls):
"""刷新environment防止缓存累积"""
cls.signals_normal.clear()
cls.signals_pending.clear()
cls.signals_trigger.clear()
cls.signals_cancel.clear()
cls.signals_normal_cur.clear()
cls.signals_pending_cur.clear()
cls.signals_trigger_cur.clear()
cls.signals_cancel_cur.clear()
cls.orders_mkt_normal_cur.clear()
cls.orders_mkt_absolute_cur.clear()
cls.orders_mkt_submitted_cur.clear()
cls.orders_pending.clear()
cls.orders_child_of_mkt_dict.clear()
cls.orders_cancel_cur.clear()
cls.orders_cancel_submitted_cur.clear()
cls.tickers.clear()
cls.cur_suspended_tickers.clear()
cls.suspended_tickers_record.clear()
cls.cache.clear()
if not cls.is_live_trading:
ratio = get_day_ratio(cls.sys_frequency)
cls.sys_date = arrow.get(cls.fromdate).shift(
days=-ratio).format('YYYY-MM-DD HH:mm:ss')
cls.reset_all_counters()
@classmethod
def clear_modules(cls):
"""刷新environment防止缓存累积"""
cls.sys_date: str = None
cls.sys_frequency: str = None
cls.instrument: str = None
cls.fromdate: str = None
cls.todate: str = None
cls.tickers: list = []
cls.cur_suspended_tickers: list = []
cls.suspended_tickers_record: defaultdict = defaultdict(list)
cls.market_maker = None
cls.readers: dict = {}
cls.feeds: dict = {}
cls.cleaners: dict = {}
cls.cleaners_feeds: dict = {}
cls.strategies: dict = {}
cls.brokers: dict = {}
cls.risk_managers: dict = {}
cls.recorders: dict = {}
cls.recorder = None # type: op.RecorderBase
cls.event_loop = None # type: List[Dict]
cls.cache = {}
cls.execute_on_close_or_next_open: str = 'open'
cls.is_save_original: bool = False
cls.is_live_trading: bool = False
cls.is_show_today_signals: bool = False
@classmethod
def reset_all_counters(cls):
from itertools import count
from OnePy.sys_module.models import signals
from OnePy.sys_module.base_cleaner import CleanerBase
from OnePy.sys_module.models.orders.base_order import OrderBase
from OnePy.sys_module.components.order_generator import OrderGenerator
CleanerBase.counter = count(1)
signals.Signal.counter = count(1)
signals.SignalByTrigger.counter = count(1)
signals.SignalForPending.counter = count(1)
signals.SignalCancelTST.counter = count(1)
signals.SignalCancelPending.counter = count(1)
OrderBase.counter = count(1)
OrderGenerator.counter = count(1)
| mit | 7,527,293,996,103,642,000 | 31.701389 | 78 | 0.621576 | false |
marco-lancini/Showcase | django/utils/dates.py | 488 | 2237 | "Commonly-used date structures"
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
WEEKDAYS = {
0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'),
5:_('Saturday'), 6:_('Sunday')
}
WEEKDAYS_ABBR = {
0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'),
5:_('Sat'), 6:_('Sun')
}
WEEKDAYS_REV = {
'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4,
'saturday':5, 'sunday':6
}
MONTHS = {
1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'),
7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'),
12:_('December')
}
MONTHS_3 = {
1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'),
7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec')
}
MONTHS_3_REV = {
'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8,
'sep':9, 'oct':10, 'nov':11, 'dec':12
}
MONTHS_AP = { # month names in Associated Press style
1: pgettext_lazy('abbrev. month', 'Jan.'),
2: pgettext_lazy('abbrev. month', 'Feb.'),
3: pgettext_lazy('abbrev. month', 'March'),
4: pgettext_lazy('abbrev. month', 'April'),
5: pgettext_lazy('abbrev. month', 'May'),
6: pgettext_lazy('abbrev. month', 'June'),
7: pgettext_lazy('abbrev. month', 'July'),
8: pgettext_lazy('abbrev. month', 'Aug.'),
9: pgettext_lazy('abbrev. month', 'Sept.'),
10: pgettext_lazy('abbrev. month', 'Oct.'),
11: pgettext_lazy('abbrev. month', 'Nov.'),
12: pgettext_lazy('abbrev. month', 'Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
| mit | 1,775,859,375,883,398,400 | 38.245614 | 89 | 0.549397 | false |
rzhxeo/youtube-dl | youtube_dl/extractor/radiode.py | 22 | 1776 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
class RadioDeIE(InfoExtractor):
IE_NAME = 'radio.de'
_VALID_URL = r'https?://(?P<id>.+?)\.(?:radio\.(?:de|at|fr|pt|es|pl|it)|rad\.io)'
_TEST = {
'url': 'http://ndr2.radio.de/',
'md5': '3b4cdd011bc59174596b6145cda474a4',
'info_dict': {
'id': 'ndr2',
'ext': 'mp3',
'title': 're:^NDR 2 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:591c49c702db1a33751625ebfb67f273',
'thumbnail': 're:^https?://.*\.png',
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
radio_id = self._match_id(url)
webpage = self._download_webpage(url, radio_id)
broadcast = json.loads(self._search_regex(
r'_getBroadcast\s*=\s*function\(\s*\)\s*{\s*return\s+({.+?})\s*;\s*}',
webpage, 'broadcast'))
title = self._live_title(broadcast['name'])
description = broadcast.get('description') or broadcast.get('shortDescription')
thumbnail = broadcast.get('picture4Url') or broadcast.get('picture4TransUrl')
formats = [{
'url': stream['streamUrl'],
'ext': stream['streamContentFormat'].lower(),
'acodec': stream['streamContentFormat'],
'abr': stream['bitRate'],
'asr': stream['sampleRate']
} for stream in broadcast['streamUrls']]
self._sort_formats(formats)
return {
'id': radio_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'is_live': True,
'formats': formats,
}
| unlicense | 4,797,217,983,046,140,000 | 31.290909 | 87 | 0.520833 | false |
andykimpe/chromium-test-npapi | tools/resources/list_resources_removed_by_repack.py | 95 | 3297 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
usage = """%s BUILDTYPE BUILDDIR
BUILDTYPE: either chromium or chrome.
BUILDDIR: The path to the output directory. e.g. relpath/to/out/Release
Prints out (to stdout) the sorted list of resource ids that are marked as
unused during the repacking process in the given build log (via stdin).
Additionally, attempt to print out the name of the resource and the generated
header file that contains the resource.
This script is used to print the list of resources that are not used so that
developers will notice and fix their .grd files.
"""
def GetResourceIdsFromRepackMessage(in_data):
"""Returns sorted set of resource ids that are not used from in_data.
"""
unused_resources = set()
unused_pattern = re.compile(
'RePackFromDataPackStrings Removed Key: (?P<resource_id>[0-9]+)')
for line in in_data:
match = unused_pattern.match(line)
if match:
resource_id = int(match.group('resource_id'))
unused_resources.add(resource_id)
return sorted(unused_resources)
def Main():
if len(sys.argv) != 3:
sys.stderr.write(usage % sys.argv[0])
return 1
build_type = sys.argv[1]
build_dir = sys.argv[2]
if build_type not in ('chromium', 'chrome'):
sys.stderr.write(usage % sys.argv[0])
return 1
generated_output_dir = os.path.join(build_dir, 'gen')
if not os.path.exists(generated_output_dir):
sys.stderr.write('Cannot find gen dir %s' % generated_output_dir)
return 1
if build_type == 'chromium':
excluded_header = 'google_chrome_strings.h'
else:
excluded_header = 'chromium_strings.h'
data_files = []
for root, dirs, files in os.walk(generated_output_dir):
if os.path.basename(root) != 'grit':
continue
header_files = [header for header in files if header.endswith('.h')]
if excluded_header in header_files:
header_files.remove(excluded_header)
data_files.extend([os.path.join(root, header) for header in header_files])
resource_id_to_name_file_map = {}
resource_pattern = re.compile('#define (?P<resource_name>[A-Z0-9_]+).* '
'(?P<resource_id>[0-9]+)$')
for f in data_files:
data = open(f).read()
for line in data.splitlines():
match = resource_pattern.match(line)
if match:
resource_id = int(match.group('resource_id'))
resource_name = match.group('resource_name')
if resource_id in resource_id_to_name_file_map:
print 'Duplicate:', resource_id
print (resource_name, f)
print resource_id_to_name_file_map[resource_id]
raise
resource_id_to_name_file_map[resource_id] = (resource_name, f)
unused_resources = GetResourceIdsFromRepackMessage(sys.stdin)
for resource_id in unused_resources:
if resource_id not in resource_id_to_name_file_map:
print 'WARNING: Unknown resource id', resource_id
continue
(resource_name, filename) = resource_id_to_name_file_map[resource_id]
sys.stdout.write('%d: %s in %s\n' % (resource_id, resource_name, filename))
return 0
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause | 4,113,470,212,081,949,000 | 32.30303 | 79 | 0.676372 | false |
gitaarik/monkful | tests/tests/basic_resource/put_identifier_field.py | 2 | 5212 | import unittest
import json
from datetime import datetime
from pymongo import MongoClient
from apps.basic_resource import server
from apps.basic_resource.documents import Article, Comment
class ResourcePutIdentifierField(unittest.TestCase):
"""
Test if a HTTP PUT that updates a resource that has an embedded
document with an identifier field which is used in the update gives
the right response and updates the document correctly.
"""
@classmethod
def setUpClass(cls):
cls.app = server.app.test_client()
cls.mongo_client = MongoClient()
cls.initial_data = {
'title': "Test title",
'text': "Test text",
'publish': True,
'publish_date': datetime(2013, 10, 9, 8, 7, 8),
'comments': [
Comment(text="Test comment "),
Comment(text="Test comment 2"),
Comment(text="Test comment 3"),
],
'top_comment': Comment(text="Top comment"),
'tags': ['test', 'unittest', 'python', 'flask']
}
cls.article = Article(**cls.initial_data).save()
# the `id` field is the identifier field (duh)
cls.comments_update = {
'comments': [
{
'id': unicode(cls.article['comments'][0]['id']),
'text': "Test comment update"
},
{
'id': unicode(cls.article['comments'][1]['id']),
'text': "Test comment update 2"
}
]
}
cls.response = cls.app.put(
'/articles/{}/'.format(unicode(cls.article['id'])),
headers={'content-type': 'application/json'},
data=json.dumps(cls.comments_update)
)
@classmethod
def tearDownClass(cls):
cls.mongo_client.unittest_monkful.article.remove()
def test_status_code(self):
"""
Test if the response status code is 200.
"""
self.assertEqual(self.response.status_code, 200)
def test_content_type(self):
"""
Test if the content-type header is 'application/json'.
"""
self.assertEqual(
self.response.headers['content-type'],
'application/json'
)
def test_json(self):
"""
Test if the response data is valid JSON.
"""
try:
json.loads(self.response.data)
except:
self.fail("Response is not valid JSON.")
def test_content(self):
"""
Test if the deserialized response data evaluates back to our
data we posted to the resource in `setUpClass`.
"""
response_data = json.loads(self.response.data)
# Remap the response data so that it only has the fields our
# orignal data also had.
response_data = {
'title': response_data['title'],
'text': response_data['text'],
'publish': response_data['publish'],
'publish_date': response_data['publish_date'],
'comments': [
{
'id': response_data['comments'][0]['id'],
'text': response_data['comments'][0]['text']
},
{
'id': response_data['comments'][1]['id'],
'text': response_data['comments'][1]['text']
}
],
'top_comment': {
'text': response_data['top_comment']['text']
},
'tags': response_data['tags']
}
self.assertEqual(
response_data,
{
'title': self.initial_data['title'],
'text': self.initial_data['text'],
'publish': self.initial_data['publish'],
'publish_date': self.initial_data['publish_date'].isoformat(),
'comments': self.comments_update['comments'],
'top_comment': {
'text': self.initial_data['top_comment']['text']
},
'tags': self.initial_data['tags']
}
)
def test_documents(self):
"""
Test if the POST-ed data really ended up in the documents.
"""
article = Article.objects[0]
self.assertEqual(article.title, self.initial_data['title'])
self.assertEqual(article.text, self.initial_data['text'])
self.assertEqual(article.publish, self.initial_data['publish'])
self.assertEqual(
article.publish_date,
self.initial_data['publish_date']
)
self.assertEqual(
article.comments[0].text,
self.comments_update['comments'][0]['text']
)
self.assertEqual(
article.comments[1].text,
self.comments_update['comments'][1]['text']
)
# The complete `comments` field should've been overwritten so
# there should be only 2 comments instead of 3.
self.assertEqual(len(article.comments), 2)
self.assertEqual(
article.tags,
self.initial_data['tags']
)
| lgpl-3.0 | 1,759,723,128,228,298,200 | 31.17284 | 78 | 0.517843 | false |
nvie/python-mode | pymode/environment.py | 11 | 6338 | """ Define interfaces. """
from __future__ import print_function
import vim
import json
import time
import os.path
from ._compat import PY2
class VimPymodeEnviroment(object):
""" Vim User interface. """
prefix = '[Pymode]'
def __init__(self):
""" Init VIM environment. """
self.current = vim.current
self.options = dict(encoding=vim.eval('&enc'))
self.options['debug'] = self.var('g:pymode_debug', True)
@property
def curdir(self):
""" Return current working directory. """
return self.var('getcwd()')
@property
def curbuf(self):
""" Return current buffer. """
return self.current.buffer
@property
def cursor(self):
""" Return current window position.
:return tuple: (row, col)
"""
return self.current.window.cursor
@property
def source(self):
""" Return source of current buffer. """
return "\n".join(self.lines)
@property
def lines(self):
""" Iterate by lines in current file.
:return list:
"""
if not PY2:
return self.curbuf
return [l.decode(self.options.get('encoding')) for l in self.curbuf]
@staticmethod
def var(name, to_bool=False, silence=False):
""" Get vim variable.
:return vimobj:
"""
try:
value = vim.eval(name)
except vim.error:
if silence:
return None
raise
if to_bool:
try:
value = bool(int(value))
except ValueError:
value = value
return value
@staticmethod
def message(msg, history=False):
""" Show message to user.
:return: :None
"""
if history:
return vim.command('echom "%s"' % str(msg))
return vim.command('call pymode#wide_message("%s")' % str(msg))
def user_input(self, msg, default=''):
""" Return user input or default.
:return str:
"""
msg = '%s %s ' % (self.prefix, msg)
if default != '':
msg += '[%s] ' % default
try:
vim.command('echohl Debug')
input_str = vim.eval('input("%s> ")' % msg)
vim.command('echohl none')
except KeyboardInterrupt:
input_str = ''
return input_str or default
def user_confirm(self, msg, yes=False):
""" Get user confirmation.
:return bool:
"""
default = 'yes' if yes else 'no'
action = self.user_input(msg, default)
return action and 'yes'.startswith(action)
def user_input_choices(self, msg, *options):
""" Get one of many options.
:return str: A choosen option
"""
choices = ['%s %s' % (self.prefix, msg)]
choices += [
"%s. %s" % (num, opt) for num, opt in enumerate(options, 1)]
try:
input_str = int(
vim.eval('inputlist(%s)' % self.prepare_value(choices)))
except (KeyboardInterrupt, ValueError):
input_str = 0
if not input_str:
self.message('Cancelled!')
return False
try:
return options[input_str - 1]
except (IndexError, ValueError):
self.error('Invalid option: %s' % input_str)
return self.user_input_choices(msg, *options)
@staticmethod
def error(msg):
""" Show error to user. """
vim.command('call pymode#error("%s")' % str(msg))
def debug(self, msg, *args):
""" Print debug information. """
if self.options.get('debug'):
print("%s %s [%s]" % (
int(time.time()), msg, ', '.join([str(a) for a in args])))
def stop(self, value=None):
""" Break Vim function. """
cmd = 'return'
if value is not None:
cmd += ' ' + self.prepare_value(value)
vim.command(cmd)
def catch_exceptions(self, func):
""" Decorator. Make execution more silence.
:return func:
"""
def _wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except (Exception, vim.error) as e: # noqa
if self.options.get('debug'):
raise
self.error(e)
return None
return _wrapper
def run(self, name, *args):
""" Run vim function. """
vim.command('call %s(%s)' % (name, ", ".join([
self.prepare_value(a) for a in args
])))
def let(self, name, value):
""" Set variable. """
cmd = 'let %s = %s' % (name, self.prepare_value(value))
self.debug(cmd)
vim.command(cmd)
def prepare_value(self, value, dumps=True):
""" Decode bstr to vim encoding.
:return unicode string:
"""
if dumps:
value = json.dumps(value)
if PY2:
value = value.decode('utf-8').encode(self.options.get('encoding'))
return value
def get_offset_params(self, cursor=None, base=""):
""" Calculate current offset.
:return tuple: (source, offset)
"""
row, col = cursor or env.cursor
source = ""
offset = 0
for i, line in enumerate(self.lines, 1):
if i == row:
source += line[:col] + base
offset = len(source)
source += line[col:]
else:
source += line
source += '\n'
env.debug('Get offset', base or None, row, col, offset)
return source, offset
@staticmethod
def goto_line(line):
""" Go to line. """
vim.command('normal %sggzz' % line)
def goto_file(self, path, cmd='e', force=False):
""" Function description. """
if force or os.path.abspath(path) != self.curbuf.name:
self.debug('read', path)
if ' ' in path and os.name == 'posix':
path = path.replace(' ', '\\ ')
vim.command("%s %s" % (cmd, path))
@staticmethod
def goto_buffer(bufnr):
""" Open buffer. """
if str(bufnr) != '-1':
vim.command('buffer %s' % bufnr)
env = VimPymodeEnviroment()
| lgpl-3.0 | 5,744,207,367,387,973,000 | 24.453815 | 78 | 0.506311 | false |
ZLLab-Mooc/edx-platform | cms/djangoapps/contentstore/views/program.py | 15 | 2058 | """Programs views for use with Studio."""
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import Http404, JsonResponse
from django.utils.decorators import method_decorator
from django.views.generic import View
from edxmako.shortcuts import render_to_response
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.lib.token_utils import get_id_token
class ProgramAuthoringView(View):
"""View rendering a template which hosts the Programs authoring app.
The Programs authoring app is a Backbone SPA maintained in a separate repository.
The app handles its own routing and provides a UI which can be used to create and
publish new Programs (e.g, XSeries).
"""
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
"""Populate the template context with values required for the authoring app to run."""
programs_config = ProgramsApiConfig.current()
if programs_config.is_studio_tab_enabled and request.user.is_staff:
return render_to_response('program_authoring.html', {
'show_programs_header': programs_config.is_studio_tab_enabled,
'authoring_app_config': programs_config.authoring_app_config,
'programs_api_url': programs_config.public_api_url,
'programs_token_url': reverse('programs_id_token'),
'studio_home_url': reverse('home'),
})
else:
raise Http404
class ProgramsIdTokenView(View):
"""Provides id tokens to JavaScript clients for use with the Programs API."""
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
"""Generate and return a token, if the integration is enabled."""
if ProgramsApiConfig.current().is_studio_tab_enabled:
id_token = get_id_token(request.user, 'programs')
return JsonResponse({'id_token': id_token})
else:
raise Http404
| agpl-3.0 | 4,210,757,589,727,792,600 | 41.875 | 94 | 0.68999 | false |
pombredanne/acora | acora/__init__.py | 2 | 10484 | """\
Acora - a multi-keyword search engine based on Aho-Corasick trees.
Usage::
>>> from acora import AcoraBuilder
Collect some keywords::
>>> builder = AcoraBuilder('ab', 'bc', 'de')
>>> builder.add('a', 'b')
Generate the Acora search engine::
>>> ac = builder.build()
Search a string for all occurrences::
>>> ac.findall('abc')
[('a', 0), ('ab', 0), ('b', 1), ('bc', 1)]
>>> ac.findall('abde')
[('a', 0), ('ab', 0), ('b', 1), ('de', 2)]
"""
from __future__ import absolute_import
import sys
IS_PY3 = sys.version_info[0] >= 3
if IS_PY3:
unicode = str
FILE_BUFFER_SIZE = 32 * 1024
class PyAcora(object):
"""A simple (and very slow) Python implementation of the Acora
search engine.
"""
transitions = None
def __init__(self, machine, transitions=None):
if transitions is not None:
# old style format
start_state = machine
self.transitions = dict([
((state.id, char), (target_state.id, target_state.matches))
for ((state, char), target_state) in transitions.items()])
else:
# new style Machine format
start_state = machine.start_state
ignore_case = machine.ignore_case
self.transitions = transitions = {}
child_states = machine.child_states
child_targets = {}
state_matches = {}
needs_bytes_conversion = None
for state in child_states:
state_id = state.id
child_targets[state_id], state_matches[state_id] = (
_merge_targets(state, ignore_case))
if needs_bytes_conversion is None and state_matches[state_id]:
if IS_PY3:
needs_bytes_conversion = any(
isinstance(s, bytes) for s in state_matches[state_id])
elif any(isinstance(s, unicode) for s in state_matches[state_id]):
# in Py2, some keywords might be str even though we're processing unicode
needs_bytes_conversion = False
if needs_bytes_conversion is None and not IS_PY3:
needs_bytes_conversion = True
if needs_bytes_conversion:
if IS_PY3:
convert = ord
else:
from codecs import latin_1_encode
def convert(s):
return latin_1_encode(s)[0]
else:
convert = None
get_child_targets = child_targets.get
get_matches = state_matches.get
state_id = start_state.id
for ch, child in _merge_targets(start_state, ignore_case)[0].items():
child_id = child.id
if convert is not None:
ch = convert(ch)
transitions[(state_id, ch)] = (child_id, get_matches(child_id))
for state in child_states:
state_id = state.id
for ch, child in get_child_targets(state_id).items():
child_id = child.id
if convert is not None:
ch = convert(ch)
transitions[(state_id, ch)] = (child_id, get_matches(child_id))
self.start_state = start_state.id
def finditer(self, s):
"""Iterate over all occurrences of any keyword in the string.
Returns (keyword, offset) pairs.
"""
state = self.start_state
start_state = (state, [])
next_state = self.transitions.get
pos = 0
for char in s:
pos += 1
state, matches = next_state((state, char), start_state)
if matches:
for match in matches:
yield (match, pos-len(match))
def findall(self, s):
"""Find all occurrences of any keyword in the string.
Returns a list of (keyword, offset) pairs.
"""
return list(self.finditer(s))
def filefind(self, f):
"""Iterate over all occurrences of any keyword in a file.
Returns (keyword, offset) pairs.
"""
opened = False
if not hasattr(f, 'read'):
f = open(f, 'rb')
opened = True
try:
state = self.start_state
start_state = (state, ())
next_state = self.transitions.get
pos = 0
while 1:
data = f.read(FILE_BUFFER_SIZE)
if not data:
break
for char in data:
pos += 1
state, matches = next_state((state, char), start_state)
if matches:
for match in matches:
yield (match, pos-len(match))
finally:
if opened:
f.close()
def filefindall(self, f):
"""Find all occurrences of any keyword in a file.
Returns a list of (keyword, offset) pairs.
"""
return list(self.filefind(f))
# import from shared Python/Cython module
from acora._acora import (
insert_bytes_keyword, insert_unicode_keyword,
build_trie as _build_trie, build_MachineState as _MachineState, merge_targets as _merge_targets)
# import from Cython module if available
try:
from acora._cacora import (
UnicodeAcora, BytesAcora, insert_bytes_keyword, insert_unicode_keyword)
except ImportError:
# C module not there ...
UnicodeAcora = BytesAcora = PyAcora
class AcoraBuilder(object):
"""The main builder class for an Acora search engine.
Add keywords by calling ``.add(*keywords)`` or by passing them
into the constructor. Then build the search engine by calling
``.build()``.
Builds a case insensitive search engine when passing
``ignore_case=True``, and a case sensitive engine otherwise.
"""
ignore_case = False
def __init__(self, *keywords, **kwargs):
if kwargs:
self.ignore_case = kwargs.pop('ignore_case', False)
if kwargs:
raise TypeError(
"%s() got unexpected keyword argument %s" % (
self.__class__.__name__, next(iter(kwargs))))
if len(keywords) == 1 and isinstance(keywords[0], (list, tuple)):
keywords = keywords[0]
self.for_unicode = None
self.state_counter = 1
self.keywords = set()
self.tree = _MachineState(0)
if keywords:
self.update(keywords)
def __update(self, keywords):
"""Add more keywords to the search engine builder.
Adding keywords does not impact previously built search
engines.
"""
if not keywords:
return
self.tree = None
self.keywords.update(keywords)
if self.for_unicode is None:
for keyword in keywords:
if isinstance(keyword, unicode):
self.for_unicode = True
elif isinstance(keyword, bytes):
self.for_unicode = False
else:
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
break
# validate input string types
marker = object()
if self.for_unicode:
for keyword in keywords:
if not isinstance(keyword, unicode):
break
else:
keyword = marker
else:
for keyword in keywords:
if not isinstance(keyword, bytes):
break
else:
keyword = marker
if keyword is not marker:
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
def add(self, *keywords):
"""Add more keywords to the search engine builder.
Adding keywords does not impact previously built search
engines.
"""
if keywords:
self.update(keywords)
def build(self, ignore_case=None, acora=None):
"""Build a search engine from the aggregated keywords.
Builds a case insensitive search engine when passing
``ignore_case=True``, and a case sensitive engine otherwise.
"""
if acora is None:
if self.for_unicode:
acora = UnicodeAcora
else:
acora = BytesAcora
if self.for_unicode == False and ignore_case:
import sys
if sys.version_info[0] >= 3:
raise ValueError(
"Case insensitive search is not supported for byte strings in Python 3")
if ignore_case is not None and ignore_case != self.ignore_case:
# must rebuild tree
builder = type(self)(ignore_case=ignore_case)
builder.update(self.keywords)
return builder.build(acora=acora)
return acora(_build_trie(self.tree, ignore_case=self.ignore_case))
def update(self, keywords):
for_unicode = self.for_unicode
ignore_case = self.ignore_case
insert_keyword = insert_unicode_keyword if for_unicode else insert_bytes_keyword
for keyword in keywords:
if for_unicode is None:
for_unicode = self.for_unicode = isinstance(keyword, unicode)
insert_keyword = (
insert_unicode_keyword if for_unicode else insert_bytes_keyword)
elif for_unicode != isinstance(keyword, unicode):
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
self.state_counter = insert_keyword(
self.tree, keyword, self.state_counter, ignore_case)
self.keywords.update(keywords)
### convenience functions
def search(s, *keywords):
"""Convenience function to search a string for keywords.
"""
acora = AcoraBuilder(keywords).build()
return acora.findall(s)
def search_ignore_case(s, *keywords):
"""Convenience function to search a string for keywords. Case
insensitive version.
"""
acora = AcoraBuilder(keywords, ignore_case=True).build()
return acora.findall(s)
| bsd-3-clause | -4,617,662,313,878,696,000 | 32.388535 | 100 | 0.542636 | false |
skerit/shotfactory | shotfactory04/gui/windows/flock.py | 1 | 3182 | # browsershots.org - Test your web design in different browsers
# Copyright (C) 2007 Johann C. Rocholl <[email protected]>
#
# Browsershots is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Browsershots is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
GUI-specific interface functions for Flock on Microsoft Windows.
"""
__revision__ = "$Rev$"
__date__ = "$Date$"
__author__ = "$Author$"
import os
import time
from win32com.shell import shellcon
from win32com.shell import shell
from shotfactory04.gui import windows
class Gui(windows.Gui):
"""
Special functions for Flock on Windows.
"""
def reset_browser(self):
"""
Delete previous session and browser cache.
"""
appdata = shell.SHGetFolderPath(0, shellcon.CSIDL_LOCAL_APPDATA, 0, 0)
self.delete_if_exists(os.path.join(
appdata, 'Flock', 'Browsers', 'Profiles', '*', 'Cache'))
appdata = shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0)
self.delete_if_exists(os.path.join(
appdata, 'Flock', 'Browser', 'Profiles', '*', 'sessionstore.js'))
self.delete_if_exists(os.path.join(
appdata, 'Flock', 'Browser', 'Profiles', '*', 'history.dat'))
self.delete_if_exists(os.path.join(
appdata, 'Flock', 'Browser', 'Profiles', '*', 'cookies.txt'))
self.delete_if_exists(os.path.join(
appdata, 'Flock', 'Browser', 'Profiles', '*', 'historysearch'))
self.delete_if_exists(os.path.join(
appdata, 'Flock', 'Browser', 'Profiles', '*', 'lucene'))
def start_browser(self, config, url, options):
"""
Start browser and load website.
"""
if config['major'] == 2:
defcmd = r'c:\progra~1\flock\flock.exe'
else:
defcmd = r'c:\progra~1\flock\flock\flock.exe'
command = config['command'] or defcmd
print 'running', command
try:
import subprocess
except ImportError:
os.spawnl(os.P_DETACH, command, os.path.basename(command), url)
else:
subprocess.Popen([command, url])
print "Sleeping %d seconds while page is loading." % options.wait
time.sleep(options.wait)
def find_scrollable(self):
"""Find scrollable window."""
flock = self.find_window_by_title_suffix(' Flock')
return self.get_child_window(flock)
# Test scrolling from command line
if __name__ == '__main__':
config = {
'width': 1024,
'bpp': 24,
}
class Options:
verbose = 3
gui = Gui(config, Options())
gui.down()
time.sleep(1)
gui.scroll_bottom()
| gpl-3.0 | -5,403,887,888,551,945,000 | 32.851064 | 78 | 0.625393 | false |
uroybd/parsexl | parsexl.py | 1 | 1228 | """This Module Parse xls/xslx files and return data in json."""
import xlrd, datetime
from collections import OrderedDict
import simplejson as json
""" This function take 5 arguments:
inp = Input file
outp = Output file
sheet = Worksheet to work with in input file.
start = Starting row
end = Ending row
fields = A list of field-names to be used in json."""
def xlparse(inp, outp, sheet, start, end, fields):
inpt = inp
outpt = outp
wb = xlrd.open_workbook(inpt)
sh = wb.sheet_by_name(sheet)
json_list = []
for rownum in range(start - 1, end):
dicto = OrderedDict()
row_values = sh.row_values(rownum)
counter = 0
for i in fields:
if i.find('date') != -1:
try:
timestr = xlrd.xldate_as_tuple(row_values[counter], wb.datemode)
dicto[i] = str(datetime.datetime(*timestr)).split(' ')[0]
except:
dicto[i] = row_values[counter]
else:
dicto[i] = row_values[counter]
counter = counter + 1
json_list.append(dicto)
out = json.dumps(json_list)
with open(outpt, 'w') as f:
f.write(out)
| gpl-3.0 | 6,407,074,520,226,245,000 | 29.7 | 84 | 0.566775 | false |
matthiascy/panda3d | direct/src/directscripts/doxygen_filter.py | 9 | 2333 | """ This script converts a file into a format that
doxygen can understand and process. It can be used
as an INPUT_FILTER in doxygen. """
import sys, os
# Explicitly include these files. Besides these, all
# files ending in _src will be explicitly included too.
INCLUDE_FILES = ["fltnames.h", "dblnames.h",
"flt2dblnames.h", "dbl2fltnames.h"]
def filter_file(infile):
desc = ""
reading_desc = False
license = True
indent = 0
for line in infile.readlines():
line = line.rstrip()
if line.startswith("////"):
if reading_desc:
# Probably the end of a comment reading_desc.
line = "*/"
reading_desc = False
else:
line = ""
elif line.startswith("//"):
strline = line.lstrip('/ \t')
if reading_desc:
line = line[min(indent, len(line) - len(strline)):]
else:
# A "Description:" text starts the description.
if strline.startswith("Description"):
strline = strline[11:].lstrip(': \t')
indent = len(line) - len(strline)
reading_desc = True
line = "/** " + strline
else:
license = False
if reading_desc:
line = "*/" + line
reading_desc = False
if line.startswith("#include"):
fname = line.split(' ', 1)[1].strip().strip('"')
if fname.rsplit('.', 1)[0].endswith("_src") or fname in INCLUDE_FILES:
# We handle these in a special way, because
# doxygen won't do this properly for us.
# This breaks line numbering, but beh.
if not os.path.isfile(fname):
fname = os.path.join(os.path.dirname(filename), fname)
if os.path.isfile(fname):
filter_file(open(fname, 'r'))
continue # Skip the #include
if license:
line = ""
print(line)
if __name__ == "__main__":
assert len(sys.argv) == 2, "please specify a filename"
filename = sys.argv[1]
infile = open(filename, 'r')
filter_file(infile)
infile.close()
| bsd-3-clause | 7,423,275,660,277,925,000 | 33.308824 | 86 | 0.499357 | false |
xen0l/ansible | lib/ansible/module_utils/network/f5/common.py | 2 | 18920 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.six import iteritems
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE
from collections import defaultdict
try:
from icontrol.exceptions import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
f5_provider_spec = {
'server': dict(
fallback=(env_fallback, ['F5_SERVER'])
),
'server_port': dict(
type='int',
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
'user': dict(
fallback=(env_fallback, ['F5_USER', 'ANSIBLE_NET_USERNAME'])
),
'password': dict(
no_log=True,
aliases=['pass', 'pwd'],
fallback=(env_fallback, ['F5_PASSWORD', 'ANSIBLE_NET_PASSWORD'])
),
'ssh_keyfile': dict(
type='path'
),
'validate_certs': dict(
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
'transport': dict(
choices=['cli', 'rest'],
default='rest'
),
'timeout': dict(type='int'),
}
f5_argument_spec = {
'provider': dict(type='dict', options=f5_provider_spec),
}
f5_top_spec = {
'server': dict(
removed_in_version=2.9,
fallback=(env_fallback, ['F5_SERVER'])
),
'user': dict(
removed_in_version=2.9,
fallback=(env_fallback, ['F5_USER', 'ANSIBLE_NET_USERNAME'])
),
'password': dict(
removed_in_version=2.9,
no_log=True,
aliases=['pass', 'pwd'],
fallback=(env_fallback, ['F5_PASSWORD', 'ANSIBLE_NET_PASSWORD'])
),
'validate_certs': dict(
removed_in_version=2.9,
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
'server_port': dict(
removed_in_version=2.9,
type='int',
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
'transport': dict(
removed_in_version=2.9,
choices=['cli', 'rest']
)
}
f5_argument_spec.update(f5_top_spec)
def get_provider_argspec():
return f5_provider_spec
def load_params(params):
provider = params.get('provider') or dict()
for key, value in iteritems(provider):
if key in f5_argument_spec:
if params.get(key) is None and value is not None:
params[key] = value
# Fully Qualified name (with the partition)
def fqdn_name(partition, value):
"""This method is not used
This was the original name of a method that was used throughout all
the F5 Ansible modules. This is now deprecated, and should be removed
in 2.9. All modules should be changed to use ``fq_name``.
TODO(Remove in Ansible 2.9)
"""
return fq_name(partition, value)
def fq_name(partition, value):
"""Returns a 'Fully Qualified' name
A BIG-IP expects most names of resources to be in a fully-qualified
form. This means that both the simple name, and the partition need
to be combined.
The Ansible modules, however, can accept (as names for several
resources) their name in the FQ format. This becomes an issue when
the FQ name and the partition are both specified as separate values.
Consider the following examples.
# Name not FQ
name: foo
partition: Common
# Name FQ
name: /Common/foo
partition: Common
This method will rectify the above situation and will, in both cases,
return the following for name.
/Common/foo
Args:
partition (string): The partition that you would want attached to
the name if the name has no partition.
value (string): The name that you want to attach a partition to.
This value will be returned unchanged if it has a partition
attached to it already.
Returns:
string: The fully qualified name, given the input parameters.
"""
if value is not None:
try:
int(value)
return '/{0}/{1}'.format(partition, value)
except (ValueError, TypeError):
if not value.startswith('/'):
return '/{0}/{1}'.format(partition, value)
return value
# Fully Qualified name (with partition) for a list
def fq_list_names(partition, list_names):
if list_names is None:
return None
return map(lambda x: fqdn_name(partition, x), list_names)
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
raise F5ModuleError(to_text(err, errors='surrogate_then_replace'))
result = to_text(out, errors='surrogate_then_replace')
responses.append(result)
return responses
def flatten_boolean(value):
truthy = list(BOOLEANS_TRUE) + ['enabled']
falsey = list(BOOLEANS_FALSE) + ['disabled']
if value is None:
return None
elif value in truthy:
return 'yes'
elif value in falsey:
return 'no'
def cleanup_tokens(client):
try:
# isinstance cannot be used here because to import it creates a
# circular dependency with teh module_utils.network.f5.bigip file.
#
# TODO(consider refactoring cleanup_tokens)
if 'F5RestClient' in type(client).__name__:
token = client._client.headers.get('X-F5-Auth-Token', None)
if not token:
return
uri = "https://{0}:{1}/mgmt/shared/authz/tokens/{2}".format(
client.provider['server'],
client.provider['server_port'],
token
)
resp = client.api.delete(uri)
try:
resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
return True
else:
resource = client.api.shared.authz.tokens_s.token.load(
name=client.api.icrs.token
)
resource.delete()
except Exception as ex:
pass
def is_cli(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
result = 'cli' in (transport, provider_transport)
return result
def is_valid_hostname(host):
"""Reasonable attempt at validating a hostname
Compiled from various paragraphs outlined here
https://tools.ietf.org/html/rfc3696#section-2
https://tools.ietf.org/html/rfc1123
Notably,
* Host software MUST handle host names of up to 63 characters and
SHOULD handle host names of up to 255 characters.
* The "LDH rule", after the characters that it permits. (letters, digits, hyphen)
* If the hyphen is used, it is not permitted to appear at
either the beginning or end of a label
:param host:
:return:
"""
if len(host) > 255:
return False
host = host.rstrip(".")
allowed = re.compile(r'(?!-)[A-Z0-9-]{1,63}(?<!-)$', re.IGNORECASE)
result = all(allowed.match(x) for x in host.split("."))
return result
def is_valid_fqdn(host):
"""Reasonable attempt at validating a hostname
Compiled from various paragraphs outlined here
https://tools.ietf.org/html/rfc3696#section-2
https://tools.ietf.org/html/rfc1123
Notably,
* Host software MUST handle host names of up to 63 characters and
SHOULD handle host names of up to 255 characters.
* The "LDH rule", after the characters that it permits. (letters, digits, hyphen)
* If the hyphen is used, it is not permitted to appear at
either the beginning or end of a label
:param host:
:return:
"""
if len(host) > 255:
return False
host = host.rstrip(".")
allowed = re.compile(r'(?!-)[A-Z0-9-]{1,63}(?<!-)$', re.IGNORECASE)
result = all(allowed.match(x) for x in host.split("."))
if result:
parts = host.split('.')
if len(parts) > 1:
return True
return False
def transform_name(partition='', name='', sub_path=''):
if name:
name = name.replace('/', '~')
if partition:
partition = '~' + partition
else:
if sub_path:
raise F5ModuleError(
'When giving the subPath component include partition as well.'
)
if sub_path and partition:
sub_path = '~' + sub_path
if name and partition:
name = '~' + name
result = partition + sub_path + name
return result
def dict2tuple(items):
"""Convert a dictionary to a list of tuples
This method is used in cases where dictionaries need to be compared. Due
to dictionaries inherently having no order, it is easier to compare list
of tuples because these lists can be converted to sets.
This conversion only supports dicts of simple values. Do not give it dicts
that contain sub-dicts. This will not give you the result you want when using
the returned tuple for comparison.
Args:
items (dict): The dictionary of items that should be converted
Returns:
list: Returns a list of tuples upon success. Otherwise, an empty list.
"""
result = []
for x in items:
tmp = [(str(k), str(v)) for k, v in iteritems(x)]
result += tmp
return result
def compare_dictionary(want, have):
"""Performs a dictionary comparison
Args:
want (dict): Dictionary to compare with second parameter.
have (dict): Dictionary to compare with first parameter.
Returns:
bool:
"""
if want == [] and have is None:
return None
if want is None:
return None
w = dict2tuple(want)
h = dict2tuple(have)
if set(w) == set(h):
return None
else:
return want
def is_ansible_debug(module):
if module._debug and module._verbosity >= 4:
return True
return False
def fail_json(module, ex, client=None):
if is_ansible_debug(module) and client:
module.fail_json(msg=str(ex), __f5debug__=client.api.debug_output)
module.fail_json(msg=str(ex))
def exit_json(module, results, client=None):
if is_ansible_debug(module) and client:
results['__f5debug__'] = client.api.debug_output
module.exit_json(**results)
def is_uuid(uuid=None):
"""Check to see if value is an F5 UUID
UUIDs are used in BIG-IQ and in select areas of BIG-IP (notably ASM). This method
will check to see if the provided value matches a UUID as known by these products.
Args:
uuid (string): The value to check for UUID-ness
Returns:
bool:
"""
if uuid is None:
return False
pattern = r'[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}'
if re.match(pattern, uuid):
return True
return False
def on_bigip():
if os.path.exists('/usr/bin/tmsh'):
return True
return False
class Noop(object):
"""Represent no-operation required
This class is used in the Difference engine to specify when an attribute
has not changed. Difference attributes may return an instance of this
class as a means to indicate when the attribute has not changed.
The Noop object allows attributes to be set to None when sending updates
to the API. `None` is technically a valid value in some cases (it indicates
that the attribute should be removed from the resource).
"""
pass
class F5BaseClient(object):
def __init__(self, *args, **kwargs):
self.params = kwargs
self.module = kwargs.get('module', None)
load_params(self.params)
self._client = None
@property
def api(self):
raise F5ModuleError("Management root must be used from the concrete product classes.")
def reconnect(self):
"""Attempts to reconnect to a device
The existing token from a ManagementRoot can become invalid if you,
for example, upgrade the device (such as is done in the *_software
module.
This method can be used to reconnect to a remote device without
having to re-instantiate the ArgumentSpec and AnsibleF5Client classes
it will use the same values that were initially provided to those
classes
:return:
:raises iControlUnexpectedHTTPError
"""
self._client = None
@staticmethod
def validate_params(key, store):
if key in store and store[key] is not None:
return True
else:
return False
def merge_provider_params(self):
result = dict()
provider = self.params.get('provider', {})
if self.validate_params('server', provider):
result['server'] = provider['server']
elif self.validate_params('server', self.params):
result['server'] = self.params['server']
elif self.validate_params('F5_SERVER', os.environ):
result['server'] = os.environ['F5_SERVER']
else:
raise F5ModuleError('Server parameter cannot be None or missing, please provide a valid value')
if self.validate_params('server_port', provider):
result['server_port'] = provider['server_port']
elif self.validate_params('server_port', self.params):
result['server_port'] = self.params['server_port']
elif self.validate_params('F5_SERVER_PORT', os.environ):
result['server_port'] = os.environ['F5_SERVER_PORT']
else:
result['server_port'] = 443
if self.validate_params('validate_certs', provider):
result['validate_certs'] = provider['validate_certs']
elif self.validate_params('validate_certs', self.params):
result['validate_certs'] = self.params['validate_certs']
elif self.validate_params('F5_VALIDATE_CERTS', os.environ):
result['validate_certs'] = os.environ['F5_VALIDATE_CERTS']
else:
result['validate_certs'] = True
if self.validate_params('auth_provider', provider):
result['auth_provider'] = provider['auth_provider']
elif self.validate_params('auth_provider', self.params):
result['auth_provider'] = self.params['auth_provider']
else:
result['auth_provider'] = None
if self.validate_params('user', provider):
result['user'] = provider['user']
elif self.validate_params('user', self.params):
result['user'] = self.params['user']
elif self.validate_params('F5_USER', os.environ):
result['user'] = os.environ.get('F5_USER')
elif self.validate_params('ANSIBLE_NET_USERNAME', os.environ):
result['user'] = os.environ.get('ANSIBLE_NET_USERNAME')
else:
result['user'] = None
if self.validate_params('password', provider):
result['password'] = provider['password']
elif self.validate_params('password', self.params):
result['password'] = self.params['password']
elif self.validate_params('F5_PASSWORD', os.environ):
result['password'] = os.environ.get('F5_PASSWORD')
elif self.validate_params('ANSIBLE_NET_PASSWORD', os.environ):
result['password'] = os.environ.get('ANSIBLE_NET_PASSWORD')
else:
result['password'] = None
if result['validate_certs'] in BOOLEANS_TRUE:
result['validate_certs'] = True
else:
result['validate_certs'] = False
return result
class AnsibleF5Parameters(object):
def __init__(self, *args, **kwargs):
self._values = defaultdict(lambda: None)
self._values['__warnings'] = []
self.client = kwargs.pop('client', None)
self._module = kwargs.pop('module', None)
self._params = {}
params = kwargs.pop('params', None)
if params:
self.update(params=params)
self._params.update(params)
def update(self, params=None):
if params:
self._params.update(params)
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have
# an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
def __getattr__(self, item):
# Ensures that properties that weren't defined, and therefore stashed
# in the `_values` dict, will be retrievable.
return self._values[item]
@property
def partition(self):
if self._values['partition'] is None:
return 'Common'
return self._values['partition'].strip('/')
@partition.setter
def partition(self, value):
self._values['partition'] = value
def _filter_params(self, params):
return dict((k, v) for k, v in iteritems(params) if v is not None)
class F5ModuleError(Exception):
pass
| gpl-3.0 | -6,057,477,038,873,418,000 | 30.798319 | 107 | 0.607822 | false |
yanheven/glance | glance/api/v2/model/metadef_property_type.py | 20 | 2354 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wsme
from wsme import types
from glance.api.v2.model.metadef_property_item_type import ItemType
from glance.common.wsme_utils import WSMEModelTransformer
class PropertyType(types.Base, WSMEModelTransformer):
# When used in collection of PropertyTypes, name is a dictionary key
# and not included as separate field.
name = wsme.wsattr(types.text, mandatory=False)
type = wsme.wsattr(types.text, mandatory=True)
title = wsme.wsattr(types.text, mandatory=True)
description = wsme.wsattr(types.text, mandatory=False)
operators = wsme.wsattr([types.text], mandatory=False)
default = wsme.wsattr(types.bytes, mandatory=False)
readonly = wsme.wsattr(bool, mandatory=False)
# fields for type = string
minimum = wsme.wsattr(int, mandatory=False)
maximum = wsme.wsattr(int, mandatory=False)
enum = wsme.wsattr([types.text], mandatory=False)
pattern = wsme.wsattr(types.text, mandatory=False)
# fields for type = integer, number
minLength = wsme.wsattr(int, mandatory=False)
maxLength = wsme.wsattr(int, mandatory=False)
confidential = wsme.wsattr(bool, mandatory=False)
# fields for type = array
items = wsme.wsattr(ItemType, mandatory=False)
uniqueItems = wsme.wsattr(bool, mandatory=False)
minItems = wsme.wsattr(int, mandatory=False)
maxItems = wsme.wsattr(int, mandatory=False)
additionalItems = wsme.wsattr(bool, mandatory=False)
def __init__(self, **kwargs):
super(PropertyType, self).__init__(**kwargs)
class PropertyTypes(types.Base, WSMEModelTransformer):
properties = wsme.wsattr({types.text: PropertyType}, mandatory=False)
def __init__(self, **kwargs):
super(PropertyTypes, self).__init__(**kwargs)
| apache-2.0 | -786,576,585,974,371,700 | 37.590164 | 73 | 0.727273 | false |
buguelos/odoo | addons/lunch/wizard/lunch_order.py | 440 | 1299 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_order_order(osv.TransientModel):
""" lunch order meal """
_name = 'lunch.order.order'
_description = 'Wizard to order a meal'
def order(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').order(cr, uid, ids, context=context)
| agpl-3.0 | 1,602,878,356,410,357,800 | 43.793103 | 85 | 0.618938 | false |
smileboywtu/python-enhance | static/demo/tic-tac-toe.py | 1 | 5575 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Tic-Tac-Toe
# Plays the game of tic-tac-toe against a human opponent
# global constants
X = "X"
O = "O"
EMPTY = " "
TIE = "TIE"
NUM_SQUARES = 9
def display_instruct():
"""Display game instructions."""
print(
"""
Welcome to the greatest intellectual challenge of all time: Tic-Tac-Toe.
This will be a showdown between your human brain and my silicon processor.
You will make your move known by entering a number, 0 - 8. The number
will correspond to the board position as illustrated:
0 | 1 | 2
---------
3 | 4 | 5
---------
6 | 7 | 8
Prepare yourself, human. The ultimate battle is about to begin. \n
"""
)
def ask_yes_no(question):
"""Ask a yes or no question."""
response = None
while response not in ("y", "n"):
response = input(question).lower()
return response
def ask_number(question, low, high):
"""Ask for a number within a range."""
response = None
while response not in range(low, high):
response = int(input(question))
return response
def pieces():
"""Determine if player or computer goes first."""
go_first = ask_yes_no("Do you require the first move? (y/n): ")
if go_first == "y":
print("\nThen take the first move. You will need it.")
human = X
computer = O
else:
print("\nYour bravery will be your undoing... I will go first.")
computer = X
human = O
return computer, human
def new_board():
"""Create new game board."""
board = []
for square in range(NUM_SQUARES):
board.append(EMPTY)
return board
def display_board(board):
"""Display game board on screen."""
print("\n\t", board[0], "|", board[1], "|", board[2])
print("\t", "---------")
print("\t", board[3], "|", board[4], "|", board[5])
print("\t", "---------")
print("\t", board[6], "|", board[7], "|", board[8], "\n")
def legal_moves(board):
"""Create list of legal moves."""
moves = []
for square in range(NUM_SQUARES):
if board[square] == EMPTY:
moves.append(square)
return moves
def winner(board):
"""Determine the game winner."""
WAYS_TO_WIN = ((0, 1, 2),
(3, 4, 5),
(6, 7, 8),
(0, 3, 6),
(1, 4, 7),
(2, 5, 8),
(0, 4, 8),
(2, 4, 6))
for row in WAYS_TO_WIN:
if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY:
winner = board[row[0]]
return winner
if EMPTY not in board:
return TIE
return None
def human_move(board, human):
"""Get human move."""
legal = legal_moves(board)
move = None
while move not in legal:
move = ask_number("Where will you move? (0 - 8):", 0, NUM_SQUARES)
if move not in legal:
print("\nThat square is already occupied, foolish human. Choose another.\n")
print("Fine...")
return move
def computer_move(board, computer, human):
"""Make computer move."""
# make a copy to work with since function will be changing list
board = board[:]
# the best positions to have, in order
BEST_MOVES = (4, 0, 2, 6, 8, 1, 3, 5, 7)
print("I shall take square number", end=" ")
# if computer can win, take that move
for move in legal_moves(board):
board[move] = computer
if winner(board) == computer:
print(move)
return move
# done checking this move, undo it
board[move] = EMPTY
# if human can win, block that move
for move in legal_moves(board):
board[move] = human
if winner(board) == human:
print(move)
return move
# done checkin this move, undo it
board[move] = EMPTY
# since no one can win on next move, pick best open square
for move in BEST_MOVES:
if move in legal_moves(board):
print(move)
return move
def next_turn(turn):
"""Switch turns."""
if turn == X:
return O
else:
return X
def congrat_winner(the_winner, computer, human):
"""Congratulate the winner."""
if the_winner != TIE:
print(the_winner, "won!\n")
else:
print("It's a tie!\n")
if the_winner == computer:
print("As I predicted, human, I am triumphant once more. \n" \
"Proof that computers are superior to humans in all regards.")
elif the_winner == human:
print("No, no! It cannot be! Somehow you tricked me, human. \n" \
"But never again! I, the computer, so swear it!")
elif the_winner == TIE:
print("You were most lucky, human, and somehow managed to tie me. \n" \
"Celebrate today... for this is the best you will ever achieve.")
def main():
display_instruct()
computer, human = pieces()
turn = X
board = new_board()
display_board(board)
while not winner(board):
if turn == human:
move = human_move(board, human)
board[move] = human
else:
move = computer_move(board, computer, human)
board[move] = computer
display_board(board)
turn = next_turn(turn)
the_winner = winner(board)
congrat_winner(the_winner, computer, human)
# start the program
main()
input("\n\nPress the enter key to quit.") | mit | -6,966,203,909,354,963,000 | 25.42654 | 89 | 0.546726 | false |
OCA/vertical-medical | medical_prescription_sale/models/sale_order_line.py | 2 | 4340 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Dave Lasley <[email protected]>
# Copyright: 2015 LasLabs, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import ValidationError
import logging
_logger = logging.getLogger(__name__)
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
@api.model
def _compute_dispense_qty(self, ):
rx_line = self.prescription_order_line_id
if self.product_uom == rx_line.dispense_uom_id:
self.dispense_qty = self.product_uom_qty
else:
self.dispense_qty = self.product_uom._compute_qty_obj(
self.product_uom_qty, rx_line.dispense_uom_id
)
patient_id = fields.Many2one(
string='Patient',
comodel_name='medical.patient',
related='prescription_order_line_id.patient_id',
)
prescription_order_line_id = fields.Many2one(
string='Prescription Line',
comodel_name='medical.prescription.order.line',
)
medication_id = fields.Many2one(
string='Medication',
comodel_name='medical.patient.medication',
related='prescription_order_line_id.medical_medication_id',
)
dispense_qty = fields.Float(
default=0.0,
readonly=True,
compute='_compute_dispense_qty',
)
@api.one
@api.constrains(
'product_id', 'prescription_order_line_id', 'patient_id',
)
def _check_sale_line_prescription(self, ):
'''
Validate whether the line can be dispensed based on Rx, pending
dispensings, etc.
:returns: bool -- If line can be processed
:raises: :class:`openerp.exceptions.ValidationError`
'''
if not self.medication_id.medicament_id.is_medicament:
return True
if not self.medication_id.medicament_id.is_prescription:
return True
rx_line = self.prescription_order_line_id
if self.patient_id != rx_line.patient_id:
raise ValidationError(_(
'Patients must be same on Order and Rx lines. '
'Got %s on order line %d, expected %s from rx line %d' % (
self.patient_id.name, self.id,
rx_line.patient_id.name, rx_line.id,
),
))
if rx_line.product_id != self.product_id:
if not self.is_substitutable:
raise ValidationError(_(
'Products must be same on Order and Rx lines. '
'Got %s on order line %d, expected %s from rx line %d' % (
self.product_id.name, self.id,
rx_line.product_id.name, rx_line.id,
),
))
else:
raise NotImplementedError(_(
'Drug substitution validation has not been implemented.'
))
if not rx_line.can_dispense:
raise ValidationError(_(
'Cannot dispense - currently %f pending and %f exception.' % (
rx_line.pending_dispense_qty,
rx_line.exception_dispense_qty,
)
))
if self.dispense_qty > rx_line.can_dispense_qty:
raise ValidationError(_(
'Cannot dispense - Order line %s goes over Rx qty by %d' % (
self.name, self.dispense_qty - rx_line.can_dispense_qty
)
))
return True
| gpl-3.0 | -670,401,346,052,154,600 | 35.166667 | 78 | 0.562212 | false |
zstackio/zstack-woodpecker | integrationtest/vm/installation/upgrade/test_zs_degd_latest_1.5_on_cos7.py | 2 | 1889 | '''
@author: MengLai
'''
import os
import tempfile
import uuid
import time
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
tmp_file = '/tmp/%s' % uuid.uuid1().get_hex()
def test():
test_util.test_dsc('Create test vm to test zstack upgrade by -u.')
image_name = os.environ.get('imageName_i_c7')
vm = test_stub.create_vlan_vm(image_name)
test_obj_dict.add_vm(vm)
if os.environ.get('zstackManagementIp') == None:
vm.check()
else:
time.sleep(60)
vm_inv = vm.get_vm()
vm_ip = vm_inv.vmNics[0].ip
test_util.test_dsc('Install latest zstack')
target_file = '/root/zstack-all-in-one.tgz'
test_stub.prepare_test_env(vm_inv, target_file)
ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip
test_stub.copy_id_dsa(vm_inv, ssh_cmd, tmp_file)
test_stub.copy_id_dsa_pub(vm_inv)
test_stub.execute_all_install(ssh_cmd, target_file, tmp_file)
test_stub.check_installation(ssh_cmd, tmp_file, vm_inv)
test_util.test_dsc('Degrade zstack to 1.5')
degrade_target_file = '/root/zstack-degrade-all-in-one.tgz'
install_pkg = os.environ.get('zstackPkg_1.5')
test_stub.prepare_upgrade_test_env(vm_inv, degrade_target_file, install_pkg)
test_stub.upgrade_zstack(ssh_cmd, degrade_target_file, tmp_file)
test_stub.check_installation(ssh_cmd, tmp_file, vm_inv)
os.system('rm -f %s' % tmp_file)
vm.destroy()
test_util.test_pass('ZStack upgrade Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
os.system('rm -f %s' % tmp_file)
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 | 3,769,812,583,138,547,000 | 32.981481 | 106 | 0.66702 | false |
MoonshineSG/OctoPrint | src/octoprint/util/avr_isp/stk500v2.py | 8 | 4705 | from __future__ import absolute_import, division, print_function
import os, struct, sys, time
from serial import Serial
from serial import SerialException
from builtins import range
from . import ispBase, intelHex
class Stk500v2(ispBase.IspBase):
def __init__(self):
self.serial = None
self.seq = 1
self.lastAddr = -1
self.progressCallback = None
def connect(self, port = 'COM22', speed = 115200):
if self.serial != None:
self.close()
try:
self.serial = Serial(str(port), speed, timeout=1, writeTimeout=10000)
except SerialException as e:
raise ispBase.IspError("Failed to open serial port")
except:
raise ispBase.IspError("Unexpected error while connecting to serial port:" + port + ":" + str(sys.exc_info()[0]))
self.seq = 1
#Reset the controller
self.serial.setDTR(1)
time.sleep(0.1)
self.serial.setDTR(0)
time.sleep(0.2)
self.sendMessage([1])
if self.sendMessage([0x10, 0xc8, 0x64, 0x19, 0x20, 0x00, 0x53, 0x03, 0xac, 0x53, 0x00, 0x00]) != [0x10, 0x00]:
self.close()
raise ispBase.IspError("Failed to enter programming mode")
def close(self):
if self.serial != None:
self.serial.close()
self.serial = None
#Leave ISP does not reset the serial port, only resets the device, and returns the serial port after disconnecting it from the programming interface.
# This allows you to use the serial port without opening it again.
def leaveISP(self):
if self.serial != None:
if self.sendMessage([0x11]) != [0x11, 0x00]:
raise ispBase.IspError("Failed to leave programming mode")
ret = self.serial
self.serial = None
return ret
return None
def isConnected(self):
return self.serial != None
def sendISP(self, data):
recv = self.sendMessage([0x1D, 4, 4, 0, data[0], data[1], data[2], data[3]])
return recv[2:6]
def writeFlash(self, flashData):
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
pageSize = self.chip['pageSize'] * 2
flashSize = pageSize * self.chip['pageCount']
if flashSize > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
loadCount = (len(flashData) + pageSize - 1) // pageSize
for i in range(0, loadCount):
recv = self.sendMessage([0x13, pageSize >> 8, pageSize & 0xFF, 0xc1, 0x0a, 0x40, 0x4c, 0x20, 0x00, 0x00] + flashData[(i * pageSize):(i * pageSize + pageSize)])
if self.progressCallback != None:
self.progressCallback(i + 1, loadCount*2)
def verifyFlash(self, flashData):
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
flashSize = self.chip['pageSize'] * 2 * self.chip['pageCount']
if flashSize > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
loadCount = (len(flashData) + 0xFF) // 0x100
for i in range(0, loadCount):
recv = self.sendMessage([0x14, 0x01, 0x00, 0x20])[2:0x102]
if self.progressCallback != None:
self.progressCallback(loadCount + i + 1, loadCount*2)
for j in range(0, 0x100):
if i * 0x100 + j < len(flashData) and flashData[i * 0x100 + j] != recv[j]:
raise ispBase.IspError('Verify error at: 0x%x' % (i * 0x100 + j))
def sendMessage(self, data):
message = struct.pack(">BBHB", 0x1B, self.seq, len(data), 0x0E)
for c in data:
message += struct.pack(">B", c)
checksum = 0
for c in message:
checksum ^= ord(c)
message += struct.pack(">B", checksum)
try:
self.serial.write(message)
self.serial.flush()
except SerialTimeoutException:
raise ispBase.IspError('Serial send timeout')
self.seq = (self.seq + 1) & 0xFF
return self.recvMessage()
def recvMessage(self):
state = 'Start'
checksum = 0
while True:
s = self.serial.read()
if len(s) < 1:
raise ispBase.IspError("Timeout")
b = struct.unpack(">B", s)[0]
checksum ^= b
#print(hex(b))
if state == 'Start':
if b == 0x1B:
state = 'GetSeq'
checksum = 0x1B
elif state == 'GetSeq':
state = 'MsgSize1'
elif state == 'MsgSize1':
msgSize = b << 8
state = 'MsgSize2'
elif state == 'MsgSize2':
msgSize |= b
state = 'Token'
elif state == 'Token':
if b != 0x0E:
state = 'Start'
else:
state = 'Data'
data = []
elif state == 'Data':
data.append(b)
if len(data) == msgSize:
state = 'Checksum'
elif state == 'Checksum':
if checksum != 0:
state = 'Start'
else:
return data
def main():
programmer = Stk500v2()
programmer.connect(port = sys.argv[1])
programmer.programChip(intelHex.readHex(sys.argv[2]))
sys.exit(1)
if __name__ == '__main__':
main()
| agpl-3.0 | -5,445,227,204,360,360,000 | 29.160256 | 162 | 0.655473 | false |
pleaseproject/python-for-android | python3-alpha/python3-src/Lib/encodings/raw_unicode_escape.py | 852 | 1208 | """ Python 'raw-unicode-escape' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.raw_unicode_escape_encode
decode = codecs.raw_unicode_escape_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.raw_unicode_escape_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.raw_unicode_escape_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='raw-unicode-escape',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| apache-2.0 | -8,791,219,121,820,169,000 | 25.844444 | 70 | 0.719371 | false |
ad-uistyleguide/ad-uistyleguide.github.io | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/flock_tool.py | 604 | 1533 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool(object):
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit | 8,156,215,216,584,593,000 | 30.285714 | 74 | 0.679061 | false |
naturali/tensorflow | tensorflow/contrib/distributions/python/ops/mvn.py | 1 | 28414 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
__all__ = [
"MultivariateNormalDiag",
"MultivariateNormalDiagWithSoftplusStDev",
"MultivariateNormalCholesky",
"MultivariateNormalFull",
"MultivariateNormalDiagPlusVDVT",
]
class _MultivariateNormalOperatorPD(distribution.Distribution):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and an instance of
`OperatorPDBase`, which provides access to a symmetric positive definite
operator, which defines the covariance.
#### Mathematical details
With `C` the covariance matrix represented by the operator, the PDF of this
distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian.
mu = [1, 2, 3]
chol = [[1, 0, 0.], [1, 3, 0], [1, 2, 3]]
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1.])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
cov,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCov"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu`, and an instance of `OperatorPDBase`, `cov`,
which determines the covariance.
Args:
mu: Floating point tensor with shape `[N1,...,Nb, k]`, `b >= 0`.
cov: Instance of `OperatorPDBase` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `cov` are different dtypes.
"""
with ops.name_scope(name) as ns:
with ops.name_scope("init", values=[mu] + cov.inputs):
self._mu = array_ops.identity(mu, name="mu")
self._cov = cov
self._validate_args = validate_args # Needed by _assert_valid_mu.
self._mu = self._assert_valid_mu(self._mu)
super(_MultivariateNormalOperatorPD, self).__init__(
dtype=self._mu.dtype,
parameters={"mu": self._mu, "cov": self._cov},
is_reparameterized=True,
is_continuous=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
def _assert_valid_mu(self, mu):
"""Return `mu` after validity checks and possibly with assertations."""
cov = self._cov
if mu.dtype != cov.dtype:
raise TypeError(
"mu and cov must have the same dtype. Found mu.dtype = %s, "
"cov.dtype = %s" % (mu.dtype, cov.dtype))
# Try to validate with static checks.
mu_shape = mu.get_shape()
cov_shape = cov.get_shape()
if mu_shape.is_fully_defined() and cov_shape.is_fully_defined():
if mu_shape != cov_shape[:-1]:
raise ValueError(
"mu.shape and cov.shape[:-1] should match. Found: mu.shape=%s, "
"cov.shape=%s" % (mu_shape, cov_shape))
else:
return mu
# Static checks could not be run, so possibly do dynamic checks.
if not self.validate_args:
return mu
else:
assert_same_rank = check_ops.assert_equal(
array_ops.rank(mu) + 1,
cov.rank(),
data=["mu should have rank 1 less than cov. Found: rank(mu) = ",
array_ops.rank(mu), " rank(cov) = ", cov.rank()],
)
with ops.control_dependencies([assert_same_rank]):
assert_same_shape = check_ops.assert_equal(
array_ops.shape(mu),
cov.vector_shape(),
data=["mu.shape and cov.shape[:-1] should match. "
"Found: shape(mu) = "
, array_ops.shape(mu), " shape(cov) = ", cov.shape()],
)
return control_flow_ops.with_dependencies([assert_same_shape], mu)
@property
def mu(self):
return self._mu
@property
def sigma(self):
"""Dense (batch) covariance matrix, if available."""
with ops.name_scope(self.name):
return self._cov.to_dense()
def log_sigma_det(self, name="log_sigma_det"):
"""Log of determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return self._cov.log_det()
def sigma_det(self, name="sigma_det"):
"""Determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return math_ops.exp(self._cov.log_det())
def _batch_shape(self):
return self._cov.batch_shape()
def _get_batch_shape(self):
return self._cov.get_batch_shape()
def _event_shape(self):
return array_ops.pack([self._cov.vector_space_dimension()])
def _get_event_shape(self):
return self._cov.get_shape()[-1:]
def _sample_n(self, n, seed=None):
# Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
shape = array_ops.concat(0, [self._cov.vector_shape(), [n]])
white_samples = random_ops.random_normal(shape=shape,
mean=0,
stddev=1,
dtype=self.dtype,
seed=seed)
correlated_samples = self._cov.sqrt_matmul(white_samples)
# Move the last dimension to the front
perm = array_ops.concat(0, (
array_ops.pack([array_ops.rank(correlated_samples) - 1]),
math_ops.range(0, array_ops.rank(correlated_samples) - 1)))
# TODO(ebrevdo): Once we get a proper tensor contraction op,
# perform the inner product using that instead of batch_matmul
# and this slow transpose can go away!
correlated_samples = array_ops.transpose(correlated_samples, perm)
samples = correlated_samples + self.mu
return samples
def _log_prob(self, x):
# Q: Why are shape requirements as stated above?
# A: The compatible shapes are precisely the ones that will broadcast to
# a shape compatible with self._cov.
# See Operator base class for notes about shapes compatible with self._cov.
x = ops.convert_to_tensor(x)
contrib_tensor_util.assert_same_float_dtype((self._mu, x))
# _assert_valid_mu asserts that self.mu has same batch shape as self.cov.
# so batch shape of self.mu = that of self._cov and self, and the
# batch shape of x_centered is a broadcast version of these. If this
# broadcast results in a shape like
# [M1,...,Mm] + self.batch_shape + self.event_shape
# OR
# self.batch_shape + self.event_shape
# then subsequent operator calls are guaranteed to work.
x_centered = x - self.mu
# Compute the term x^{-1} sigma^{-1} x which appears in the exponent of
# the pdf.
x_whitened_norm = self._cov.inv_quadratic_form_on_vectors(x_centered)
k = math_ops.cast(self._cov.vector_space_dimension(), self.dtype)
log_prob_value = -0.5 * (self.log_sigma_det() +
k * math.log(2. * math.pi) +
x_whitened_norm)
output_static_shape = x_centered.get_shape()[:-1]
log_prob_value.set_shape(output_static_shape)
return log_prob_value
def _prob(self, x):
return math_ops.exp(self.log_prob(x))
def _entropy(self):
log_sigma_det = self.log_sigma_det()
one_plus_log_two_pi = constant_op.constant(1 + math.log(2 * math.pi),
dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
k = math_ops.cast(self._cov.vector_space_dimension(), dtype=self.dtype)
entropy_value = (k * one_plus_log_two_pi + log_sigma_det) / 2
entropy_value.set_shape(log_sigma_det.get_shape())
return entropy_value
def _mean(self):
return array_ops.identity(self._mu)
def _variance(self):
return self.sigma
def _mode(self):
return array_ops.identity(self._mu)
_prob_note = """
`x` is a batch vector with compatible shape if `x` is a `Tensor` whose
shape can be broadcast up to either:
````
self.batch_shape + self.event_shape
OR
[M1,...,Mm] + self.batch_shape + self.event_shape
```
"""
distribution_util.append_class_fun_doc(_MultivariateNormalOperatorPD.log_prob,
doc_str=_prob_note)
distribution_util.append_class_fun_doc(_MultivariateNormalOperatorPD.prob,
doc_str=_prob_note)
class MultivariateNormalDiag(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a 1-D diagonal
`diag_stdev`, representing the standard deviations. This distribution
assumes the random variables, `(X_1,...,X_k)` are independent, thus no
non-diagonal terms of the covariance matrix are needed.
This allows for `O(k)` pdf evaluation, sampling, and storage.
#### Mathematical details
The PDF of this distribution is defined in terms of the diagonal covariance
determined by `diag_stdev`: `C_{ii} = diag_stdev[i]**2`.
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and the square roots of the (independent) random variables.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal standard deviation.
mu = [1, 2, 3.]
diag_stdev = [4, 5, 6.]
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_stdev = ... # shape 2 x 3, positive.
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_stdev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiag"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and standard deviations `diag_stdev`.
Each batch member represents a random vector `(X_1,...,X_k)` of independent
random normals.
The mean of `X_i` is `mu[i]`, and the standard deviation is `diag_stdev[i]`.
Args:
mu: Rank `N + 1` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
diag_stdev: Rank `N + 1` `Tensor` with same `dtype` and shape as `mu`,
representing the standard deviations. Must be positive.
validate_args: `Boolean`, default `False`. Whether to validate
input with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `diag_stdev` are different dtypes.
"""
cov = operator_pd_diag.OperatorPDSqrtDiag(
diag_stdev, verify_pd=validate_args)
super(MultivariateNormalDiag, self).__init__(
mu, cov, allow_nan_stats=allow_nan_stats, validate_args=validate_args,
name=name)
class MultivariateNormalDiagWithSoftplusStDev(MultivariateNormalDiag):
"""MultivariateNormalDiag with `diag_stddev = softplus(diag_stddev)`."""
def __init__(self,
mu,
diag_stdev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagWithSoftplusStdDev"):
with ops.name_scope(name, values=[mu, diag_stdev]) as ns:
super(MultivariateNormalDiagWithSoftplusStDev, self).__init__(
mu=mu,
diag_stdev=nn.softplus(diag_stdev),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
class MultivariateNormalDiagPlusVDVT(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
Every batch member of this distribution is defined by a mean and a lightweight
covariance matrix `C`.
#### Mathematical details
The PDF of this distribution in terms of the mean `mu` and covariance `C` is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a lightweight
definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
This allows for `O(kr + r^3)` pdf evaluation and determinant, and `O(kr)`
sampling and storage (per batch member).
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and square root of the covariance `S = M + V D V^T`. Extra
leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with covariance square root
# S = M + V D V^T, where V D V^T is a matrix-rank 2 update.
mu = [1, 2, 3.]
diag_large = [1.1, 2.2, 3.3]
v = ... # shape 3 x 2
diag_small = [4., 5.]
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v, diag_small=diag_small)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians. This time, don't provide
# diag_small. This means S = M + V V^T.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_large = ... # shape 2 x 3
v = ... # shape 2 x 3 x 1, a matrix-rank 1 update.
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_large,
v,
diag_small=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagPlusVDVT"):
"""Multivariate Normal distributions on `R^k`.
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a
lightweight definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
Args:
mu: Rank `n + 1` floating point tensor with shape `[N1,...,Nn, k]`,
`n >= 0`. The means.
diag_large: Optional rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `M`.
v: Rank `n + 1` floating point tensor, shape `[N1,...,Nn, k, r]`
`n >= 0`. Defines the matrix `V`.
diag_small: Rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `D`. Default
is `None`, which means `D` will be the identity matrix.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
"""
m = operator_pd_diag.OperatorPDDiag(diag_large, verify_pd=validate_args)
cov = operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
m, v, diag=diag_small, verify_pd=validate_args,
verify_shapes=validate_args)
super(MultivariateNormalDiagPlusVDVT, self).__init__(
mu, cov, allow_nan_stats=allow_nan_stats, validate_args=validate_args,
name=name)
class MultivariateNormalCholesky(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a Cholesky factor `chol`.
Providing the Cholesky factor allows for `O(k^2)` pdf evaluation and sampling,
and requires `O(k^2)` storage.
#### Mathematical details
The Cholesky factor `chol` defines the covariance matrix: `C = chol chol^T`.
The PDF of this distribution is then:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
# Note, this would be more efficient with MultivariateNormalDiag.
mu = [1, 2, 3.]
chol = [[1, 0, 0], [0, 3, 0], [0, 0, 2]]
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
Trainable (batch) Choesky matrices can be created with
`tf.contrib.distributions.matrix_diag_transform()`
"""
def __init__(self,
mu,
chol,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCholesky"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `chol` which holds the (batch) Cholesky
factors, such that the covariance of each batch member is `chol chol^T`.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
chol: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. The upper triangular part is ignored (treated as
though it is zero), and the diagonal must be positive.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `chol` are different dtypes.
"""
cov = operator_pd_cholesky.OperatorPDCholesky(chol, verify_pd=validate_args)
super(MultivariateNormalCholesky, self).__init__(
mu,
cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
class MultivariateNormalFull(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and covariance matrix `sigma`.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations.
#### Mathematical details
With `C = sigma`, the PDF of this distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
mu = [1, 2, 3.]
sigma = [[1, 0, 0], [0, 3, 0], [0, 0, 2.]]
dist = tf.contrib.distributions.MultivariateNormalFull(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
sigma = ... # shape 2 x 3 x 3, positive definite.
dist = tf.contrib.distributions.MultivariateNormalFull(mu, sigma)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
sigma,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalFull"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `sigma`, the mean and covariance.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
sigma: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. Each batch member must be positive definite.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `sigma` are different dtypes.
"""
cov = operator_pd_full.OperatorPDFull(sigma, verify_pd=validate_args)
super(MultivariateNormalFull, self).__init__(
mu,
cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
def _kl_mvn_mvn_brute_force(mvn_a, mvn_b, name=None):
"""Batched KL divergence `KL(mvn_a || mvn_b)` for multivariate normals.
With `X`, `Y` both multivariate normals in `R^k` with means `mu_x`, `mu_y` and
covariance `C_x`, `C_y` respectively,
```
KL(X || Y) = 0.5 * ( T + Q + - k + L ),
T := trace(C_b^{-1} C_a),
Q := (mu_b - mu_a)^T C_b^{-1} (mu_b - mu_a),
L := Log[Det(C_b)] - Log[Det(C_a)]
```
This `Op` computes the trace by solving `C_b^{-1} C_a`. Although efficient
methods for solving systems with `C_b` may be available, a dense version of
(the square root of) `C_a` is used, so performance is `O(B s k^2)` where `B`
is the batch size, and `s` is the cost of solving `C_b x = y` for vectors `x`
and `y`.
Args:
mvn_a: Instance of subclass of `_MultivariateNormalOperatorPD`.
mvn_b: Instance of subclass of `_MultivariateNormalOperatorPD`.
name: (optional) name to use for created ops. Default "kl_mvn_mvn".
Returns:
Batchwise `KL(mvn_a || mvn_b)`.
"""
# Access the "private" OperatorPD that each mvn is built from.
cov_a = mvn_a._cov # pylint: disable=protected-access
cov_b = mvn_b._cov # pylint: disable=protected-access
mu_a = mvn_a.mu
mu_b = mvn_b.mu
inputs = [mu_a, mu_b] + cov_a.inputs + cov_b.inputs
with ops.name_scope(name, "kl_mvn_mvn", inputs):
# If Ca = AA', Cb = BB', then
# tr[inv(Cb) Ca] = tr[inv(B)' inv(B) A A']
# = tr[inv(B) A A' inv(B)']
# = tr[(inv(B) A) (inv(B) A)']
# = sum_{ik} (inv(B) A)_{ik}^2
# The second equality follows from the cyclic permutation property.
b_inv_a = cov_b.sqrt_solve(cov_a.sqrt_to_dense())
t = math_ops.reduce_sum(
math_ops.square(b_inv_a),
reduction_indices=[-1, -2])
q = cov_b.inv_quadratic_form_on_vectors(mu_b - mu_a)
k = math_ops.cast(cov_a.vector_space_dimension(), mvn_a.dtype)
one_half_l = cov_b.sqrt_log_det() - cov_a.sqrt_log_det()
return 0.5 * (t + q - k) + one_half_l
# Register KL divergences.
kl_classes = [
MultivariateNormalFull,
MultivariateNormalCholesky,
MultivariateNormalDiag,
MultivariateNormalDiagPlusVDVT,
]
for mvn_aa in kl_classes:
# Register when they are the same here, and do not register when they are the
# same below because that would result in a repeated registration.
kullback_leibler.RegisterKL(mvn_aa, mvn_aa)(_kl_mvn_mvn_brute_force)
for mvn_bb in kl_classes:
if mvn_bb != mvn_aa:
kullback_leibler.RegisterKL(mvn_aa, mvn_bb)(_kl_mvn_mvn_brute_force)
| apache-2.0 | -4,695,473,008,018,263,000 | 36.239843 | 92 | 0.635461 | false |
pyoceans/pocean-core | pocean/dsg/trajectory/cr.py | 1 | 10376 | #!python
# coding=utf-8
from copy import copy
from collections import OrderedDict
import numpy as np
import pandas as pd
from pocean.utils import (
create_ncvar_from_series,
dict_update,
downcast_dataframe,
generic_masked,
get_default_axes,
get_dtype,
get_mapped_axes_variables,
get_masked_datetime_array,
get_ncdata_from_series,
nativize_times,
normalize_countable_array,
)
from pocean.cf import CFDataset, cf_safe_name
from pocean.dsg.trajectory import trajectory_calculated_metadata
from pocean import logger as L # noqa
class ContiguousRaggedTrajectory(CFDataset):
@classmethod
def is_mine(cls, dsg, strict=False):
try:
rvars = dsg.filter_by_attrs(cf_role='trajectory_id')
assert len(rvars) == 1
assert dsg.featureType.lower() == 'trajectory'
assert len(dsg.t_axes()) >= 1
assert len(dsg.x_axes()) >= 1
assert len(dsg.y_axes()) >= 1
assert len(dsg.z_axes()) >= 1
o_index_vars = dsg.filter_by_attrs(
sample_dimension=lambda x: x is not None
)
assert len(o_index_vars) == 1
assert o_index_vars[0].sample_dimension in dsg.dimensions # Sample dimension
# Allow for string variables
rvar = rvars[0]
# 0 = single
# 1 = array of strings/ints/bytes/etc
# 2 = array of character arrays
assert 0 <= len(rvar.dimensions) <= 2
except BaseException:
if strict is True:
raise
return False
return True
@classmethod
def from_dataframe(cls, df, output, **kwargs):
axes = get_default_axes(kwargs.pop('axes', {}))
daxes = axes
# Should never be a CR file with one trajectory so we ignore the "reduce_dims" attribute
_ = kwargs.pop('reduce_dims', False) # noqa
unlimited = kwargs.pop('unlimited', False)
unique_dims = kwargs.pop('unique_dims', False)
if unique_dims is True:
# Rename the dimension to avoid a dimension and coordinate having the same name
# which is not support in xarray
changed_axes = { k: '{}_dim'.format(v) for k, v in axes._asdict().items() }
daxes = get_default_axes(changed_axes)
# Downcast anything from int64 to int32
# Convert any timezone aware datetimes to native UTC times
df = downcast_dataframe(nativize_times(df))
with ContiguousRaggedTrajectory(output, 'w') as nc:
trajectory_groups = df.groupby(axes.trajectory)
unique_trajectories = list(trajectory_groups.groups.keys())
num_trajectories = len(unique_trajectories)
nc.createDimension(daxes.trajectory, num_trajectories)
trajectory = nc.createVariable(axes.trajectory, get_dtype(df[axes.trajectory]), (daxes.trajectory,))
# Get unique obs by grouping on traj getting the max size
if unlimited is True:
nc.createDimension(daxes.sample, None)
else:
nc.createDimension(daxes.sample, len(df))
# Number of observations in each trajectory
row_size = nc.createVariable('rowSize', 'i4', (daxes.trajectory,))
attributes = dict_update(nc.nc_attributes(axes, daxes), kwargs.pop('attributes', {}))
# Variables defined on only the trajectory axis
traj_vars = kwargs.pop('traj_vars', [])
traj_columns = [ p for p in traj_vars if p in df.columns ]
for c in traj_columns:
var_name = cf_safe_name(c)
if var_name not in nc.variables:
create_ncvar_from_series(
nc,
var_name,
(daxes.trajectory,),
df[c],
zlib=True,
complevel=1
)
for i, (trajid, trg) in enumerate(trajectory_groups):
trajectory[i] = trajid
row_size[i] = len(trg)
# Save any trajectory variables using the first value found
# in the column.
for c in traj_columns:
var_name = cf_safe_name(c)
if var_name not in nc.variables:
continue
v = nc.variables[var_name]
vvalues = get_ncdata_from_series(trg[c], v)[0]
try:
v[i] = vvalues
except BaseException:
L.exception('Failed to add {}'.format(c))
continue
# Add all of the columns based on the sample dimension. Take all columns and remove the
# trajectory, rowSize and other trajectory based columns.
sample_columns = [
f for f in df.columns if f not in traj_columns + ['rowSize', axes.trajectory]
]
for c in sample_columns:
var_name = cf_safe_name(c)
if var_name not in nc.variables:
v = create_ncvar_from_series(
nc,
var_name,
(daxes.sample,),
df[c],
zlib=True,
complevel=1
)
else:
v = nc.variables[var_name]
vvalues = get_ncdata_from_series(df[c], v)
try:
if unlimited is True:
v[:] = vvalues
else:
v[:] = vvalues.reshape(v.shape)
except BaseException:
L.exception('Failed to add {}'.format(c))
continue
# Metadata variables
if 'crs' not in nc.variables:
nc.createVariable('crs', 'i4')
# Set attributes
nc.update_attributes(attributes)
return ContiguousRaggedTrajectory(output, **kwargs)
def calculated_metadata(self, df=None, geometries=True, clean_cols=True, clean_rows=True, **kwargs):
axes = get_default_axes(kwargs.pop('axes', {}))
if df is None:
df = self.to_dataframe(clean_cols=clean_cols, clean_rows=clean_rows, axes=axes)
return trajectory_calculated_metadata(df, axes, geometries)
def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
axes = get_default_axes(kwargs.pop('axes', {}))
axv = get_mapped_axes_variables(self, axes)
o_index_var = self.filter_by_attrs(sample_dimension=lambda x: x is not None)
if not o_index_var:
raise ValueError(
'Could not find the "sample_dimension" attribute on any variables, '
'is this a valid {}?'.format(self.__class__.__name__)
)
else:
o_index_var = o_index_var[0]
o_dim = self.dimensions[o_index_var.sample_dimension] # Sample dimension
t_dim = o_index_var.dimensions
# Trajectory
row_sizes = o_index_var[:]
traj_data = normalize_countable_array(axv.trajectory)
traj_data = np.repeat(traj_data, row_sizes)
# time
time_data = get_masked_datetime_array(axv.t[:], axv.t).flatten()
df_data = OrderedDict([
(axes.t, time_data),
(axes.trajectory, traj_data)
])
building_index_to_drop = np.ones(o_dim.size, dtype=bool)
extract_vars = copy(self.variables)
# Skip the time and row index variables
del extract_vars[o_index_var.name]
del extract_vars[axes.t]
for i, (dnam, dvar) in enumerate(extract_vars.items()):
# Trajectory dimensions
if dvar.dimensions == t_dim:
vdata = np.repeat(generic_masked(dvar[:], attrs=self.vatts(dnam)), row_sizes)
# Sample dimensions
elif dvar.dimensions == (o_dim.name,):
vdata = generic_masked(dvar[:].flatten().astype(dvar.dtype), attrs=self.vatts(dnam))
else:
vdata = generic_masked(dvar[:].flatten().astype(dvar.dtype), attrs=self.vatts(dnam))
# Carry through size 1 variables
if vdata.size == 1:
if vdata[0] is np.ma.masked:
L.warning("Skipping variable {} that is completely masked".format(dnam))
continue
else:
L.warning("Skipping variable {} since it didn't match any dimension sizes".format(dnam))
continue
# Mark rows with data so we don't remove them with clear_rows
if vdata.size == building_index_to_drop.size:
building_index_to_drop = (building_index_to_drop == True) & (vdata.mask == True) # noqa
# Handle scalars here at the end
if vdata.size == 1:
vdata = vdata[0]
df_data[dnam] = vdata
df = pd.DataFrame(df_data)
# Drop all data columns with no data
if clean_cols:
df = df.dropna(axis=1, how='all')
# Drop all data rows with no data variable data
if clean_rows:
df = df.iloc[~building_index_to_drop]
return df
def nc_attributes(self, axes, daxes):
atts = super(ContiguousRaggedTrajectory, self).nc_attributes()
return dict_update(atts, {
'global' : {
'featureType': 'trajectory',
'cdm_data_type': 'Trajectory'
},
axes.trajectory: {
'cf_role': 'trajectory_id',
'long_name' : 'trajectory identifier',
'ioos_category': 'identifier'
},
axes.x: {
'axis': 'X'
},
axes.y: {
'axis': 'Y'
},
axes.z: {
'axis': 'Z'
},
axes.t: {
'units': self.default_time_unit,
'standard_name': 'time',
'axis': 'T'
},
'rowSize': {
'sample_dimension': daxes.sample
}
})
| mit | -8,736,001,294,292,709,000 | 35.664311 | 112 | 0.523419 | false |
scenarios/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/gamma_test.py | 7 | 14250 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import special
from scipy import stats
from tensorflow.contrib.distributions.python.ops import gamma as gamma_lib
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class GammaTest(test.TestCase):
def testGammaShape(self):
with self.test_session():
alpha = constant_op.constant([3.0] * 5)
beta = constant_op.constant(11.0)
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
self.assertEqual(gamma.batch_shape().eval(), (5,))
self.assertEqual(gamma.get_batch_shape(), tensor_shape.TensorShape([5]))
self.assertAllEqual(gamma.event_shape().eval(), [])
self.assertEqual(gamma.get_event_shape(), tensor_shape.TensorShape([]))
def testGammaLogPDF(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
log_pdf = gamma.log_pdf(x)
self.assertEqual(log_pdf.get_shape(), (6,))
self.assertAllClose(log_pdf.eval(), expected_log_pdf)
pdf = gamma.pdf(x)
self.assertEqual(pdf.get_shape(), (6,))
self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensional(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant([[3.0, 4.0]] * batch_size)
alpha_v = np.array([2.0, 4.0])
beta_v = np.array([3.0, 4.0])
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
log_pdf = gamma.log_pdf(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(log_pdf_values, expected_log_pdf)
pdf = gamma.pdf(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensionalBroadcasting(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant(3.0)
alpha_v = np.array([2.0, 4.0])
beta_v = 3.0
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
log_pdf = gamma.log_pdf(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(log_pdf_values, expected_log_pdf)
pdf = gamma.pdf(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaCDF(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
expected_cdf = stats.gamma.cdf(x, alpha_v, scale=1 / beta_v)
cdf = gamma.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
self.assertAllClose(cdf.eval(), expected_cdf)
def testGammaMean(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v)
expected_means = stats.gamma.mean(alpha_v, scale=1 / beta_v)
self.assertEqual(gamma.mean().get_shape(), (3,))
self.assertAllClose(gamma.mean().eval(), expected_means)
def testGammaModeAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self):
with self.test_session():
alpha_v = np.array([5.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v)
expected_modes = (alpha_v - 1) / beta_v
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(gamma.mode().eval(), expected_modes)
def testGammaModeAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):
with self.test_session():
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v, allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
gamma.mode().eval()
def testGammaModeAllowNanStatsIsTrueReturnsNaNforUndefinedBatchMembers(self):
with self.test_session():
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v, allow_nan_stats=True)
expected_modes = (alpha_v - 1) / beta_v
expected_modes[0] = np.nan
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(gamma.mode().eval(), expected_modes)
def testGammaVariance(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v)
expected_variances = stats.gamma.var(alpha_v, scale=1 / beta_v)
self.assertEqual(gamma.variance().get_shape(), (3,))
self.assertAllClose(gamma.variance().eval(), expected_variances)
def testGammaStd(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v)
expected_std = stats.gamma.std(alpha_v, scale=1 / beta_v)
self.assertEqual(gamma.std().get_shape(), (3,))
self.assertAllClose(gamma.std().eval(), expected_std)
def testGammaEntropy(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
expected_entropy = stats.gamma.entropy(alpha_v, scale=1 / beta_v)
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v)
self.assertEqual(gamma.entropy().get_shape(), (3,))
self.assertAllClose(gamma.entropy().eval(), expected_entropy)
def testGammaSampleSmallAlpha(self):
with session.Session():
alpha_v = 0.05
beta_v = 1.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
samples = gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(
alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
def testGammaSample(self):
with session.Session():
alpha_v = 4.0
beta_v = 3.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
samples = gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(
alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
def testGammaSampleMultiDimensional(self):
with session.Session():
alpha_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100
beta_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v)
n = 10000
samples = gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n, 10, 100))
self.assertEqual(sample_values.shape, (n, 10, 100))
zeros = np.zeros_like(alpha_v + beta_v) # 10 x 100
alpha_bc = alpha_v + zeros
beta_bc = beta_v + zeros
self.assertAllClose(
sample_values.mean(axis=0),
stats.gamma.mean(
alpha_bc, scale=1 / beta_bc),
rtol=.035)
self.assertAllClose(
sample_values.var(axis=0),
stats.gamma.var(alpha_bc, scale=1 / beta_bc),
atol=4.5)
fails = 0
trials = 0
for ai, a in enumerate(np.reshape(alpha_v, [-1])):
for bi, b in enumerate(np.reshape(beta_v, [-1])):
s = sample_values[:, bi, ai]
trials += 1
fails += 0 if self._kstest(a, b, s) else 1
self.assertLess(fails, trials * 0.03)
def _kstest(self, alpha, beta, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
ks, _ = stats.kstest(samples, stats.gamma(alpha, scale=1 / beta).cdf)
# Return True when the test passes.
return ks < 0.02
def testGammaPdfOfSampleMultiDims(self):
with session.Session() as sess:
gamma = gamma_lib.Gamma(alpha=[7., 11.], beta=[[5.], [6.]])
num = 50000
samples = gamma.sample(num, seed=137)
pdfs = gamma.pdf(samples)
sample_vals, pdf_vals = sess.run([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self.assertAllClose(
stats.gamma.mean(
[[7., 11.], [7., 11.]], scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.mean(axis=0),
atol=.1)
self.assertAllClose(
stats.gamma.var([[7., 11.], [7., 11.]],
scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.var(axis=0),
atol=.1)
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testGammaNonPositiveInitializationParamsRaises(self):
with self.test_session():
alpha_v = constant_op.constant(0.0, name="alpha")
beta_v = constant_op.constant(1.0, name="beta")
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v, validate_args=True)
with self.assertRaisesOpError("alpha"):
gamma.mean().eval()
alpha_v = constant_op.constant(1.0, name="alpha")
beta_v = constant_op.constant(0.0, name="beta")
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v, validate_args=True)
with self.assertRaisesOpError("beta"):
gamma.mean().eval()
def testGammaWithSoftplusAlphaBeta(self):
with self.test_session():
alpha_v = constant_op.constant([0.0, -2.1], name="alpha")
beta_v = constant_op.constant([1.0, -3.6], name="beta")
gamma = gamma_lib.GammaWithSoftplusAlphaBeta(alpha=alpha_v, beta=beta_v)
self.assertAllEqual(nn_ops.softplus(alpha_v).eval(), gamma.alpha.eval())
self.assertAllEqual(nn_ops.softplus(beta_v).eval(), gamma.beta.eval())
def testGammaGammaKL(self):
alpha0 = np.array([3.])
beta0 = np.array([1., 2., 3., 1.5, 2.5, 3.5])
alpha1 = np.array([0.4])
beta1 = np.array([0.5, 1., 1.5, 2., 2.5, 3.])
# Build graph.
with self.test_session() as sess:
g0 = gamma_lib.Gamma(alpha=alpha0, beta=beta0)
g1 = gamma_lib.Gamma(alpha=alpha1, beta=beta1)
x = g0.sample(int(1e4), seed=0)
kl_sample = math_ops.reduce_mean(g0.log_prob(x) - g1.log_prob(x), 0)
kl_actual = kullback_leibler.kl(g0, g1)
# Execute graph.
[kl_sample_, kl_actual_] = sess.run([kl_sample, kl_actual])
kl_expected = ((alpha0 - alpha1) * special.digamma(alpha0)
+ special.gammaln(alpha1)
- special.gammaln(alpha0)
+ alpha1 * np.log(beta0)
- alpha1 * np.log(beta1)
+ alpha0 * (beta1 / beta0 - 1.))
self.assertEqual(beta0.shape, kl_actual.get_shape())
self.assertAllClose(kl_expected, kl_actual_, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_actual_, atol=0., rtol=1e-2)
if __name__ == "__main__":
test.main()
| apache-2.0 | 6,282,272,630,792,596,000 | 39.140845 | 80 | 0.613263 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.