repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
joelouismarino/iterative_inference | cfg/cifar10/hierarchical/standard/config.py | 1 | 1559 | # training set-up
train_config = {
'dataset': 'CIFAR_10',
'output_distribution': 'gaussian',
'batch_size': 128,
'n_samples': 10,
'n_iterations': 1,
'encoder_optimizer': 'adam',
'decoder_optimizer': 'adam',
'encoder_learning_rate': 0.0001,
'decoder_learning_rate': 0.0001,
'average_gradient': True,
'encoder_decoder_train_multiple': 1,
'kl_min': 0,
'kl_warm_up': True,
'cuda_device': 1,
'display_iter': 30,
'eval_iter': 2000,
'resume_experiment': None
}
# model architecture
arch = {
'model_form': 'dense', # 'dense', 'conv'
'encoder_type': 'inference_model', # 'em', 'inference_model'
'inference_model_type': 'feedforward', # 'feedforward', 'recurrent'
'encoding_form': ['posterior'],
'variable_update_form': 'direct',
'concat_variables': True,
'posterior_form': 'gaussian',
'whiten_input': False,
'constant_prior_variances': False,
'single_output_variance': False,
'learn_top_prior': False,
'top_size': 1,
'n_latent': [1024, 512],
'n_det_enc': [0, 0],
'n_det_dec': [0, 0],
'n_layers_enc': [3, 3, 0],
'n_layers_dec': [1, 1, 1],
'n_units_enc': [2048, 2048, 0],
'n_units_dec': [2048, 2048, 1],
'non_linearity_enc': 'elu',
'non_linearity_dec': 'elu',
'connection_type_enc': 'highway',
'connection_type_dec': 'highway',
'batch_norm_enc': False,
'batch_norm_dec': False,
'weight_norm_enc': False,
'weight_norm_dec': False,
'dropout_enc': 0.0,
'dropout_dec': 0.0
}
| mit | -5,584,336,289,122,220,000 | 22.984615 | 72 | 0.572162 | false |
meganbkratz/acq4 | acq4/LogWindow.py | 3 | 39978 | import time
import traceback
import sys, os
if __name__ == "__main__":
#import os.path as osp
d = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path = [os.path.join(d,'lib','util')] + sys.path + [d]
from PyQt4 import QtGui, QtCore
import LogWidgetTemplate
from acq4.pyqtgraph import FeedbackButton
import acq4.util.configfile as configfile
from acq4.util.DataManager import DirHandle
from acq4.util.HelpfulException import HelpfulException
from acq4.util.Mutex import Mutex
import numpy as np
from acq4.pyqtgraph import FileDialog
from acq4.util.debug import printExc
import weakref
import re
#from acq4.Manager import getManager
#WIN = None
Stylesheet = """
body {color: #000; font-family: sans;}
.entry {}
.error .message {color: #900}
.warning .message {color: #740}
.user .message {color: #009}
.status .message {color: #090}
.logExtra {margin-left: 40px;}
.traceback {color: #555; height: 0px;}
.timestamp {color: #000;}
"""
pageTemplate = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<style type="text/css">
%s </style>
<script type="text/javascript">
function showDiv(id) {
div = document.getElementById(id);
div.style.visibility = "visible";
div.style.height = "auto";
}
</script>
</head>
<body>
</body>
</html>
""" % Stylesheet
class LogButton(FeedbackButton):
def __init__(self, *args):
FeedbackButton.__init__(self, *args)
global WIN
self.clicked.connect(WIN.show)
WIN.buttons.append(weakref.ref(self))
#def close(self):
#global WIN
#WIN.buttons.remove(self)
class LogWindow(QtGui.QMainWindow):
"""LogWindow contains a LogWidget inside a window. LogWindow is responsible for collecting messages generated by the program/user, formatting them into a nested dictionary,
and saving them in a log.txt file. The LogWidget takes care of displaying messages.
Messages can be logged by calling logMsg or logExc functions from acq4.Manager. These functions call the LogWindow.logMsg and LogWindow.logExc functions, but other classes
should not call the LogWindow functions directly.
"""
sigLogMessage = QtCore.Signal(object)
def __init__(self, manager):
QtGui.QMainWindow.__init__(self)
self.setWindowTitle("Log")
path = os.path.dirname(__file__)
self.setWindowIcon(QtGui.QIcon(os.path.join(path, 'logIcon.png')))
self.wid = LogWidget(self, manager)
self.wid.ui.input = QtGui.QLineEdit()
self.wid.ui.gridLayout.addWidget(self.wid.ui.input, 2, 0, 1, 3)
self.wid.ui.dirLabel.setText("Current Storage Directory: None")
self.setCentralWidget(self.wid)
self.resize(1000, 500)
self.manager = manager
#global WIN
global WIN
WIN = self
self.msgCount = 0
self.logCount=0
self.logFile = None
configfile.writeConfigFile('', self.fileName()) ## start a new temp log file, destroying anything left over from the last session.
self.buttons = [] ## weak references to all Log Buttons get added to this list, so it's easy to make them all do things, like flash red.
self.lock = Mutex()
self.errorDialog = ErrorDialog()
self.wid.ui.input.returnPressed.connect(self.textEntered)
self.sigLogMessage.connect(self.queuedLogMsg, QtCore.Qt.QueuedConnection)
#self.sigDisplayEntry.connect(self.displayEntry)
def queuedLogMsg(self, args): ## called indirectly when logMsg is called from a non-gui thread
self.logMsg(*args[0], **args[1])
def logMsg(self, msg, importance=5, msgType='status', **kwargs):
"""msg: the text of the log message
msgTypes: user, status, error, warning (status is default)
importance: 0-9 (0 is low importance, 9 is high, 5 is default)
other keywords:
exception: a tuple (type, exception, traceback) as returned by sys.exc_info()
docs: a list of strings where documentation related to the message can be found
reasons: a list of reasons (as strings) for the message
traceback: a list of formatted callstack/trackback objects (formatting a traceback/callstack returns a list of strings), usually looks like [['line 1', 'line 2', 'line3'], ['line1', 'line2']]
Feel free to add your own keyword arguments. These will be saved in the log.txt file, but will not affect the content or way that messages are displayed.
"""
## for thread-safetyness:
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if not isGuiThread:
self.sigLogMessage.emit(((msg, importance, msgType), kwargs))
return
try:
currentDir = self.manager.getCurrentDir()
except:
currentDir = None
if isinstance(currentDir, DirHandle):
kwargs['currentDir'] = currentDir.name()
else:
kwargs['currentDir'] = None
now = str(time.strftime('%Y.%m.%d %H:%M:%S'))
name = 'LogEntry_' + str(self.msgCount)
self.msgCount += 1
entry = {
#'docs': None,
#'reasons': None,
'message': msg,
'timestamp': now,
'importance': importance,
'msgType': msgType,
#'exception': exception,
'id': self.msgCount,
}
for k in kwargs:
entry[k] = kwargs[k]
self.processEntry(entry)
## Allow exception to override values in the entry
if entry.get('exception', None) is not None and 'msgType' in entry['exception']:
entry['msgType'] = entry['exception']['msgType']
self.saveEntry({name:entry})
self.wid.addEntry(entry) ## takes care of displaying the entry if it passes the current filters on the logWidget
#self.wid.displayEntry(entry)
if entry['msgType'] == 'error':
if self.errorDialog.show(entry) is False:
self.flashButtons()
def logExc(self, *args, **kwargs):
"""Calls logMsg, but adds in the current exception and callstack. Must be called within an except block, and should only be called if the exception is not re-raised. Unhandled exceptions, or exceptions that reach the top of the callstack are automatically logged, so logging an exception that will be re-raised can cause the exception to be logged twice. Takes the same arguments as logMsg."""
kwargs['exception'] = sys.exc_info()
kwargs['traceback'] = traceback.format_stack()[:-2] + ["------- exception caught ---------->\n"]
self.logMsg(*args, **kwargs)
def processEntry(self, entry):
## pre-processing common to saveEntry and displayEntry
## convert exc_info to serializable dictionary
if entry.get('exception', None) is not None:
exc_info = entry.pop('exception')
entry['exception'] = self.exceptionToDict(*exc_info, topTraceback=entry.get('traceback', []))
else:
entry['exception'] = None
def textEntered(self):
msg = unicode(self.wid.ui.input.text())
if msg == '!!':
self.makeError1()
elif msg == '##':
self.makeErrorLogExc()
try:
currentDir = self.manager.getCurrentDir()
except:
currentDir = None
self.logMsg(msg, importance=8, msgType='user', currentDir=currentDir)
self.wid.ui.input.clear()
def exceptionToDict(self, exType, exc, tb, topTraceback):
#lines = (traceback.format_stack()[:-skip]
#+ [" ---- exception caught ---->\n"]
#+ traceback.format_tb(sys.exc_info()[2])
#+ traceback.format_exception_only(*sys.exc_info()[:2]))
#print topTraceback
excDict = {}
excDict['message'] = traceback.format_exception(exType, exc, tb)[-1][:-1]
excDict['traceback'] = topTraceback + traceback.format_exception(exType, exc, tb)[:-1]
if hasattr(exc, 'docs'):
if len(exc.docs) > 0:
excDict['docs'] = exc.docs
if hasattr(exc, 'reasons'):
if len(exc.reasons) > 0:
excDict['reasons'] = exc.reasons
if hasattr(exc, 'kwargs'):
for k in exc.kwargs:
excDict[k] = exc.kwargs[k]
if hasattr(exc, 'oldExc'):
excDict['oldExc'] = self.exceptionToDict(*exc.oldExc, topTraceback=[])
return excDict
def flashButtons(self):
for b in self.buttons:
if b() is not None:
b().failure(tip='An error occurred. Please see the log.', limitedTime = False)
def resetButtons(self):
for b in self.buttons:
if b() is not None:
b().reset()
#try:
#b.reset()
#except RuntimeError:
#self.buttons.remove(b)
#print "Removed a logButton from logWindow's list. button:", b
def makeError1(self):
try:
self.makeError2()
#print x
except:
t, exc, tb = sys.exc_info()
#logExc(message="This button doesn't work", reasons='reason a, reason b', docs='documentation')
#if isinstance(exc, HelpfulException):
#exc.prependErr("Button doesn't work", (t,exc,tb), reasons = ["It's supposed to raise an error for testing purposes", "You're doing it wrong."])
#raise
#else:
raise HelpfulException(message='This button does not work.', exc=(t, exc, tb), reasons=["It's supposed to raise an error for testing purposes", "You're doing it wrong."])
def makeErrorLogExc(self):
try:
print y
except:
self.logExc('This is the message sent to logExc', msgType='error')
def makeError2(self):
try:
print y
except:
t, exc, tb = sys.exc_info()
raise HelpfulException(message='msg from makeError', exc=(t, exc, tb), reasons=["reason one", "reason 2"], docs=['what, you expect documentation?'])
def show(self):
QtGui.QMainWindow.show(self)
self.activateWindow()
self.raise_()
self.resetButtons()
def fileName(self):
## return the log file currently used
if self.logFile is None:
return "tempLog.txt"
else:
return self.logFile.name()
def setLogDir(self, dh):
if self.fileName() == dh.name():
return
oldfName = self.fileName()
if self.logFile is not None:
self.logMsg('Moving log storage to %s.' % (self.logFile.name(relativeTo=self.manager.baseDir))) ## make this note before we change the log file, so when a log ends, you know where it went after.
self.logMsg('Moving log storage to %s.' % (dh.name(relativeTo=self.manager.baseDir))) ## make this note before we change the log file, so when a log ends, you know where it went after.
if oldfName == 'tempLog.txt':
with self.lock:
temp = configfile.readConfigFile(oldfName)
else:
temp = {}
if dh.exists('log.txt'):
self.logFile = dh['log.txt']
with self.lock:
self.msgCount = len(configfile.readConfigFile(self.logFile.name()))
newTemp = {}
for v in temp.values():
self.msgCount += 1
newTemp['LogEntry_'+str(self.msgCount)] = v
self.saveEntry(newTemp)
else:
self.logFile = dh.createFile('log.txt')
self.saveEntry(temp)
self.logMsg('Moved log storage from %s to %s.' % (oldfName, self.fileName()))
self.wid.ui.dirLabel.setText("Current Storage Directory: " + self.fileName())
self.manager.sigLogDirChanged.emit(dh)
def getLogDir(self):
if self.logFile is None:
return None
else:
return self.logFile.parent()
def saveEntry(self, entry):
with self.lock:
configfile.appendConfigFile(entry, self.fileName())
def disablePopups(self, disable):
self.errorDialog.disable(disable)
class LogWidget(QtGui.QWidget):
sigDisplayEntry = QtCore.Signal(object) ## for thread-safetyness
sigAddEntry = QtCore.Signal(object) ## for thread-safetyness
sigScrollToAnchor = QtCore.Signal(object) # for internal use.
def __init__(self, parent, manager):
QtGui.QWidget.__init__(self, parent)
self.ui = LogWidgetTemplate.Ui_Form()
self.manager = manager
self.ui.setupUi(self)
#self.ui.input.hide()
self.ui.filterTree.topLevelItem(1).setExpanded(True)
self.entries = [] ## stores all log entries in memory
self.cache = {} ## for storing html strings of entries that have already been processed
self.displayedEntries = []
#self.currentEntries = None ## recordArray that stores currently displayed entries -- so that if filters get more restrictive we can just refilter this list instead of filtering everything
self.typeFilters = []
self.importanceFilter = 0
self.dirFilter = False
self.entryArrayBuffer = np.zeros(1000, dtype=[ ### a record array for quick filtering of entries
('index', 'int32'),
('importance', 'int32'),
('msgType', '|S10'),
('directory', '|S100'),
('entryId', 'int32')
])
self.entryArray = self.entryArrayBuffer[:0]
self.filtersChanged()
self.sigDisplayEntry.connect(self.displayEntry, QtCore.Qt.QueuedConnection)
self.sigAddEntry.connect(self.addEntry, QtCore.Qt.QueuedConnection)
self.ui.exportHtmlBtn.clicked.connect(self.exportHtml)
self.ui.filterTree.itemChanged.connect(self.setCheckStates)
self.ui.importanceSlider.valueChanged.connect(self.filtersChanged)
#self.ui.logView.linkClicked.connect(self.linkClicked)
self.ui.output.anchorClicked.connect(self.linkClicked)
self.sigScrollToAnchor.connect(self.scrollToAnchor, QtCore.Qt.QueuedConnection)
#page = self.ui.logView.page()
#page.setLinkDelegationPolicy(page.DelegateAllLinks)
def loadFile(self, f):
"""Load the file, f. f must be able to be read by configfile.py"""
log = configfile.readConfigFile(f)
self.entries = []
self.entryArrayBuffer = np.zeros(len(log),dtype=[
('index', 'int32'),
('importance', 'int32'),
('msgType', '|S10'),
('directory', '|S100'),
('entryId', 'int32')
])
self.entryArray = self.entryArrayBuffer[:]
i = 0
for k,v in log.iteritems():
v['id'] = k[9:] ## record unique ID to facilitate HTML generation (javascript needs this ID)
self.entries.append(v)
self.entryArray[i] = np.array([(i, v.get('importance', 5), v.get('msgType', 'status'), v.get('currentDir', ''), v.get('entryId', v['id']))], dtype=[('index', 'int32'), ('importance', 'int32'), ('msgType', '|S10'), ('directory', '|S100'), ('entryId', 'int32')])
i += 1
self.filterEntries() ## puts all entries through current filters and displays the ones that pass
def addEntry(self, entry):
## All incoming messages begin here
## for thread-safetyness:
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if not isGuiThread:
self.sigAddEntry.emit(entry)
return
self.entries.append(entry)
i = len(self.entryArray)
entryDir = entry.get('currentDir', None)
if entryDir is None:
entryDir = ''
arr = np.array([(i, entry['importance'], entry['msgType'], entryDir, entry['id'])], dtype = [('index', 'int32'), ('importance', 'int32'), ('msgType', '|S10'), ('directory', '|S100'), ('entryId', 'int32')])
## make more room if needed
if len(self.entryArrayBuffer) == len(self.entryArray):
newArray = np.empty(len(self.entryArrayBuffer)+1000, self.entryArrayBuffer.dtype)
newArray[:len(self.entryArray)] = self.entryArray
self.entryArrayBuffer = newArray
self.entryArray = self.entryArrayBuffer[:len(self.entryArray)+1]
#self.entryArray[i] = [(i, entry['importance'], entry['msgType'], entry['currentDir'])]
self.entryArray[i] = arr
self.checkDisplay(entry) ## displays the entry if it passes the current filters
#np.append(self.entryArray, np.array(i, [[i, entry['importance'], entry['msgType'], entry['currentDir']]]), dtype = [('index', int), ('importance', int), ('msgType', str), ('directory', str)])
def setCheckStates(self, item, column):
if item == self.ui.filterTree.topLevelItem(1):
if item.checkState(0):
for i in range(item.childCount()):
item.child(i).setCheckState(0, QtCore.Qt.Checked)
elif item.parent() == self.ui.filterTree.topLevelItem(1):
if not item.checkState(0):
self.ui.filterTree.topLevelItem(1).setCheckState(0, QtCore.Qt.Unchecked)
self.filtersChanged()
def filtersChanged(self):
### Update self.typeFilters, self.importanceFilter, and self.dirFilter to reflect changes.
tree = self.ui.filterTree
self.typeFilters = []
for i in range(tree.topLevelItem(1).childCount()):
child = tree.topLevelItem(1).child(i)
if tree.topLevelItem(1).checkState(0) or child.checkState(0):
text = child.text(0)
self.typeFilters.append(unicode(text))
self.importanceFilter = self.ui.importanceSlider.value()
self.updateDirFilter()
#self.dirFilter = self.manager.getDirOfSelectedFile().name()
#else:
#self.dirFilter = False
self.filterEntries()
def updateDirFilter(self, dh=None):
if self.ui.filterTree.topLevelItem(0).checkState(0):
if dh==None:
self.dirFilter = self.manager.getDirOfSelectedFile().name()
else:
self.dirFilter = dh.name()
else:
self.dirFilter = False
def filterEntries(self):
"""Runs each entry in self.entries through the filters and displays if it makes it through."""
### make self.entries a record array, then filtering will be much faster (to OR true/false arrays, + them)
typeMask = self.entryArray['msgType'] == ''
for t in self.typeFilters:
typeMask += self.entryArray['msgType'] == t
mask = (self.entryArray['importance'] > self.importanceFilter) * typeMask
if self.dirFilter != False:
d = np.ascontiguousarray(self.entryArray['directory'])
j = len(self.dirFilter)
i = len(d)
d = d.view(np.byte).reshape(i, 100)[:, :j]
d = d.reshape(i*j).view('|S%d' % j)
mask *= (d == self.dirFilter)
self.ui.output.clear()
global Stylesheet
self.ui.output.document().setDefaultStyleSheet(Stylesheet)
#global pageTemplate
#self.ui.logView.setHtml(pageTemplate)
indices = list(self.entryArray[mask]['index'])
inds = indices
#if self.dirFilter != False:
#j = len(self.dirFilter)
#for i, n in inds:
#if not self.entries[n]['currentDir'][:j] == self.dirFilter:
#indices.pop(i)
self.displayEntry([self.entries[i] for i in indices])
def checkDisplay(self, entry):
### checks whether entry passes the current filters and displays it if it does.
if entry['msgType'] not in self.typeFilters:
return
elif entry['importance'] < self.importanceFilter:
return
elif self.dirFilter is not False:
if entry['currentDir'][:len(self.dirFilter)] != self.dirFilter:
return
else:
self.displayEntry([entry])
def displayEntry(self, entries):
## entries should be a list of log entries
## for thread-safetyness:
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if not isGuiThread:
self.sigDisplayEntry.emit(entries)
return
for entry in entries:
if not self.cache.has_key(id(entry)):
self.cache[id(entry)] = self.generateEntryHtml(entry)
## determine message color:
#if entry['msgType'] == 'status':
#color = 'green'
#elif entry['msgType'] == 'user':
#color = 'blue'
#elif entry['msgType'] == 'error':
#color = 'red'
#elif entry['msgType'] == 'warning':
#color = '#DD4400' ## orange
#else:
#color = 'black'
#if entry.has_key('exception') or entry.has_key('docs') or entry.has_key('reasons'):
##self.displayComplexMessage(entry, color)
#self.displayComplexMessage(entry)
#else:
#self.displayText(entry['message'], entry, color, timeStamp=entry['timestamp'])
#for x in self.cache[id(entry)]:
#self.ui.output.appendHtml(x)
html = self.cache[id(entry)]
#frame = self.ui.logView.page().currentFrame()
#isMax = frame.scrollBarValue(QtCore.Qt.Vertical) == frame.scrollBarMaximum(QtCore.Qt.Vertical)
sb = self.ui.output.verticalScrollBar()
isMax = sb.value() == sb.maximum()
#frame.findFirstElement('body').appendInside(html)
self.ui.output.append(html)
self.displayedEntries.append(entry)
if isMax:
## can't scroll to end until the web frame has processed the html change
#frame.setScrollBarValue(QtCore.Qt.Vertical, frame.scrollBarMaximum(QtCore.Qt.Vertical))
## Calling processEvents anywhere inside an error handler is forbidden
## because this can lead to Qt complaining about paint() recursion.
#QtGui.QApplication.processEvents()
#self.ui.output.scrollToAnchor(str(entry['id']))
self.sigScrollToAnchor.emit(str(entry['id'])) ## queued connection
#self.ui.logView.update()
def scrollToAnchor(self, anchor):
self.ui.output.scrollToAnchor(anchor)
def generateEntryHtml(self, entry):
msg = self.cleanText(entry['message'])
reasons = ""
docs = ""
exc = ""
if entry.has_key('reasons'):
reasons = self.formatReasonStrForHTML(entry['reasons'])
if entry.has_key('docs'):
docs = self.formatDocsStrForHTML(entry['docs'])
if entry.get('exception', None) is not None:
exc = self.formatExceptionForHTML(entry, entryId=entry['id'])
extra = reasons + docs + exc
if extra != "":
#extra = "<div class='logExtra'>" + extra + "</div>"
extra = "<table class='logExtra'><tr><td>" + extra + "</td></tr></table>"
#return """
#<div class='entry'>
#<div class='%s'>
#<span class='timestamp'>%s</span>
#<span class='message'>%s</span>
#%s
#</div>
#</div>
#""" % (entry['msgType'], entry['timestamp'], msg, extra)
return """
<a name="%s"/><table class='entry'><tr><td>
<table class='%s'><tr><td>
<span class='timestamp'>%s</span>
<span class='message'>%s</span>
%s
</td></tr></table>
</td></tr></table>
""" % (str(entry['id']), entry['msgType'], entry['timestamp'], msg, extra)
#if entry.has_key('exception') or entry.has_key('docs') or entry.has_key('reasons'):
##self.displayComplexMessage(entry, color)
#return self.generateComplex(entry)
#else:
#return self.generateSimple(entry['message'], entry, color, timeStamp=entry['timestamp'])
##self.displayText(entry['message'], entry, color, timeStamp=entry['timestamp'])
@staticmethod
def cleanText(text):
text = re.sub(r'&', '&', text)
text = re.sub(r'>','>', text)
text = re.sub(r'<', '<', text)
text = re.sub(r'\n', '<br/>\n', text)
return text
#def displayText(self, msg, entry, colorStr='black', timeStamp=None, clean=True):
#if clean:
#msg = self.cleanText(msg)
#if msg[-1:] == '\n':
#msg = msg[:-1]
#msg = '<br />'.join(msg.split('\n'))
#if timeStamp is not None:
#strn = '<b style="color:black"> %s </b> <span style="color:%s"> %s </span>' % (timeStamp, colorStr, msg)
#else:
#strn = '<span style="color:%s"> %s </span>' % (colorStr, msg)
##self.ui.output.appendHtml(strn)
#self.cache[id(entry)].append(strn)
#def displayComplexMessage(self, entry, color='black'):
#self.displayText(entry['message'], entry, color, timeStamp = entry['timestamp'], clean=True)
#if entry.has_key('reasons'):
#reasons = self.formatReasonStrForHTML(entry['reasons'])
#self.displayText(reasons, entry, 'black', clean=False)
#if entry.has_key('docs'):
#docs = self.formatDocsStrForHTML(entry['docs'])
#self.displayText(docs, entry, 'black', clean=False)
#if entry.get('exception', None) is not None:
#self.displayException(entry['exception'], entry, 'black', tracebacks=entry.get('traceback', None))
def formatExceptionForHTML(self, entry, exception=None, count=1, entryId=None):
### Here, exception is a dict that holds the message, reasons, docs, traceback and oldExceptions (which are also dicts, with the same entries)
## the count and tracebacks keywords are for calling recursively
if exception is None:
exception = entry['exception']
#if tracebacks is None:
#tracebacks = []
indent = 10
text = self.cleanText(exception['message'])
text = re.sub(r'^HelpfulException: ', '', text)
#if exception.has_key('oldExc'):
#self.displayText(" "*indent + str(count)+'. ' + text, entry, color, clean=False)
#else:
#self.displayText(" "*indent + str(count)+'. Original error: ' + text, entry, color, clean=False)
messages = [text]
#print "\n", messages, "\n"
if exception.has_key('reasons'):
reasons = self.formatReasonsStrForHTML(exception['reasons'])
text += reasons
#self.displayText(reasons, entry, color, clean=False)
if exception.has_key('docs'):
docs = self.formatDocsStrForHTML(exception['docs'])
#self.displayText(docs, entry, color, clean=False)
text += docs
traceback = [self.formatTracebackForHTML(exception['traceback'], count)]
text = [text]
if exception.has_key('oldExc'):
exc, tb, msgs = self.formatExceptionForHTML(entry, exception['oldExc'], count=count+1)
text.extend(exc)
messages.extend(msgs)
traceback.extend(tb)
#else:
#if len(tracebacks)==count+1:
#n=0
#else:
#n=1
#for i, tb in enumerate(tracebacks):
#self.displayTraceback(tb, entry, number=i+n)
if count == 1:
exc = "<div class=\"exception\"><ol>" + "\n".join(["<li>%s</li>" % ex for ex in text]) + "</ol></div>"
tbStr = "\n".join(["<li><b>%s</b><br/><span class='traceback'>%s</span></li>" % (messages[i], tb) for i,tb in enumerate(traceback)])
#traceback = "<div class=\"traceback\" id=\"%s\"><ol>"%str(entryId) + tbStr + "</ol></div>"
entry['tracebackHtml'] = tbStr
#return exc + '<a href="#" onclick="showDiv(\'%s\')">Show traceback</a>'%str(entryId) + traceback
return exc + '<a href="exc:%s">Show traceback %s</a>'%(str(entryId), str(entryId))
else:
return text, traceback, messages
def formatTracebackForHTML(self, tb, number):
try:
tb = [line for line in tb if not line.startswith("Traceback (most recent call last)")]
except:
print "\n"+str(tb)+"\n"
raise
return re.sub(" ", " ", ("").join(map(self.cleanText, tb)))[:-1]
#tb = [self.cleanText(strip(x)) for x in tb]
#lines = []
#prefix = ''
#for l in ''.join(tb).split('\n'):
#if l == '':
#continue
#if l[:9] == "Traceback":
#prefix = ' ' + str(number) + '. '
#continue
#spaceCount = 0
#while l[spaceCount] == ' ':
#spaceCount += 1
#if prefix is not '':
#spaceCount -= 1
#lines.append(" "*(spaceCount*4) + prefix + l)
#prefix = ''
#return '<div class="traceback">' + '<br />'.join(lines) + '</div>'
#self.displayText('<br />'.join(lines), entry, color, clean=False)
def formatReasonsStrForHTML(self, reasons):
#indent = 6
reasonStr = "<table class='reasons'><tr><td>Possible reasons include:\n<ul>\n"
for r in reasons:
r = self.cleanText(r)
reasonStr += "<li>" + r + "</li>\n"
#reasonStr += " "*22 + chr(97+i) + ". " + r + "<br>"
reasonStr += "</ul></td></tr></table>\n"
return reasonStr
def formatDocsStrForHTML(self, docs):
#indent = 6
docStr = "<div class='docRefs'>Relevant documentation:\n<ul>\n"
for d in docs:
d = self.cleanText(d)
docStr += "<li><a href=\"doc:%s\">%s</a></li>\n" % (d, d)
docStr += "</ul></div>\n"
return docStr
def exportHtml(self, fileName=False):
#self.makeError1()
if fileName is False:
self.fileDialog = FileDialog(self, "Save HTML as...", self.manager.getCurrentDir().name())
#self.fileDialog.setFileMode(QtGui.QFileDialog.AnyFile)
self.fileDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
self.fileDialog.show()
self.fileDialog.fileSelected.connect(self.exportHtml)
return
if fileName[-5:] != '.html':
fileName += '.html'
#doc = self.ui.output.document().toHtml('utf-8')
#for e in self.displayedEntries:
#if e.has_key('tracebackHtml'):
#doc = re.sub(r'<a href="exc:%s">(<[^>]+>)*Show traceback %s(<[^>]+>)*</a>'%(str(e['id']), str(e['id'])), e['tracebackHtml'], doc)
global pageTemplate
doc = pageTemplate
for e in self.displayedEntries:
doc += self.cache[id(e)]
for e in self.displayedEntries:
if e.has_key('tracebackHtml'):
doc = re.sub(r'<a href="exc:%s">(<[^>]+>)*Show traceback %s(<[^>]+>)*</a>'%(str(e['id']), str(e['id'])), e['tracebackHtml'], doc)
#doc = self.ui.logView.page().currentFrame().toHtml()
f = open(fileName, 'w')
f.write(doc.encode('utf-8'))
f.close()
def makeError1(self):
### just for testing error logging
try:
self.makeError2()
#print x
except:
t, exc, tb = sys.exc_info()
#logExc(message="This button doesn't work", reasons='reason a, reason b', docs='documentation')
#if isinstance(exc, HelpfulException):
#exc.prependErr("Button doesn't work", (t,exc,tb), reasons = ["It's supposed to raise an error for testing purposes", "You're doing it wrong."])
#raise
#else:
printExc("This is the message sent to printExc.")
#raise HelpfulException(message='This button does not work.', exc=(t, exc, tb), reasons=["It's supposed to raise an error for testing purposes", "You're doing it wrong."])
def makeError2(self):
### just for testing error logging
try:
print y
except:
t, exc, tb = sys.exc_info()
raise HelpfulException(message='msg from makeError', exc=(t, exc, tb), reasons=["reason one", "reason 2"], docs=['what, you expect documentation?'])
def linkClicked(self, url):
url = url.toString()
if url[:4] == 'doc:':
self.manager.showDocumentation(url[4:])
elif url[:4] == 'exc:':
cursor = self.ui.output.document().find('Show traceback %s' % url[4:])
try:
tb = self.entries[int(url[4:])-1]['tracebackHtml']
except IndexError:
try:
tb = self.entries[self.entryArray[self.entryArray['entryId']==(int(url[4:]))]['index']]['tracebackHtml']
except:
print "requested index %d, but only %d entries exist." % (int(url[4:])-1, len(self.entries))
raise
cursor.insertHtml(tb)
def clear(self):
#self.ui.logView.setHtml("")
self.ui.output.clear()
self.displayedEntryies = []
class ErrorDialog(QtGui.QDialog):
def __init__(self):
QtGui.QDialog.__init__(self)
#self.setModal(False)
self.setWindowFlags(QtCore.Qt.Window)
#self.setWindowModality(QtCore.Qt.NonModal)
self.setWindowTitle('ACQ4 Error')
self.layout = QtGui.QVBoxLayout()
self.layout.setContentsMargins(3,3,3,3)
self.setLayout(self.layout)
self.messages = []
self.msgLabel = QtGui.QLabel()
#self.msgLabel.setWordWrap(False)
#self.msgLabel.setMaximumWidth(800)
self.msgLabel.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
#self.msgLabel.setFrameStyle(QtGui.QFrame.Box)
#self.msgLabel.setStyleSheet('QLabel { font-weight: bold }')
self.layout.addWidget(self.msgLabel)
self.msgLabel.setMaximumWidth(800)
self.msgLabel.setMinimumWidth(500)
self.msgLabel.setWordWrap(True)
self.layout.addStretch()
self.disableCheck = QtGui.QCheckBox('Disable error message popups')
self.layout.addWidget(self.disableCheck)
self.btnLayout = QtGui.QHBoxLayout()
self.btnLayout.addStretch()
self.okBtn = QtGui.QPushButton('OK')
self.btnLayout.addWidget(self.okBtn)
self.nextBtn = QtGui.QPushButton('Show next error')
self.btnLayout.addWidget(self.nextBtn)
self.nextBtn.hide()
self.logBtn = QtGui.QPushButton('Show Log...')
self.btnLayout.addWidget(self.logBtn)
self.btnLayoutWidget = QtGui.QWidget()
self.layout.addWidget(self.btnLayoutWidget)
self.btnLayoutWidget.setLayout(self.btnLayout)
self.btnLayout.addStretch()
self.okBtn.clicked.connect(self.okClicked)
self.nextBtn.clicked.connect(self.nextMessage)
self.logBtn.clicked.connect(self.logClicked)
def show(self, entry):
## rules are:
## - Try to show friendly error messages
## - If there are any helpfulExceptions, ONLY show those
## otherwise, show everything
self.lastEntry = entry
msgLines = []
if entry['message'] is not None:
msgLines.append(entry['message'])
## extract list of exceptions
key = 'exception'
exc = entry
while key in exc:
exc = exc[key]
if exc is None:
break
## ignore this error if it was generated on the command line.
tb = exc.get('traceback', ['',''])
if len(tb) > 1 and 'File "<stdin>"' in tb[1]:
return False
key = 'oldExc'
if exc['message'].startswith('HelpfulException'):
msgLines.append('<b>' + self.cleanText(re.sub(r'^HelpfulException: ', '', exc['message'])) + '</b>')
elif exc['message'] == 'None':
continue
else:
msgLines.append(self.cleanText(exc['message']))
msg = "<br>".join(msgLines)
if self.disableCheck.isChecked():
return False
if self.isVisible():
self.messages.append(msg)
self.nextBtn.show()
self.nextBtn.setEnabled(True)
self.nextBtn.setText('Show next error (%d more)' % len(self.messages))
else:
w = QtGui.QApplication.activeWindow()
self.nextBtn.hide()
self.msgLabel.setText(msg)
self.open()
if w is not None:
cp = w.geometry().center()
self.setGeometry(cp.x() - self.width()/2., cp.y() - self.height()/2., self.width(), self.height())
#self.activateWindow()
self.raise_()
@staticmethod
def cleanText(text):
text = re.sub(r'&', '&', text)
text = re.sub(r'>','>', text)
text = re.sub(r'<', '<', text)
text = re.sub(r'\n', '<br/>\n', text)
return text
def closeEvent(self, ev):
QtGui.QDialog.closeEvent(self, ev)
self.messages = []
def okClicked(self):
self.accept()
self.messages = []
def logClicked(self):
global WIN
self.accept()
WIN.show()
self.messages = []
def nextMessage(self):
self.msgLabel.setText(self.messages.pop(0))
self.nextBtn.setText('Show next error (%d more)' % len(self.messages))
if len(self.messages) == 0:
self.nextBtn.setEnabled(False)
def disable(self, disable):
self.disableCheck.setChecked(disable)
if __name__ == "__main__":
#import sys
#import os.path as osp
#d = osp.dirname(osp.dirname(osp.abspath(__file__)))
#sys.path = [osp.join(d, 'util')] + sys.path + [d]
#from acq4.util import acq4.pyqtgraph
app = QtGui.QApplication([])
log = LogWindow(None)
log.show()
original_excepthook = sys.excepthook
def excepthook(*args):
global original_excepthook
log.displayException(*args)
ret = original_excepthook(*args)
sys.last_traceback = None ## the important bit
sys.excepthook = excepthook
app.exec_() | mit | -3,876,842,853,771,468,000 | 39.464575 | 401 | 0.561234 | false |
OptiPop/external_chromium_org | tools/telemetry/examples/measure_trace.py | 41 | 3169 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import json
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
'perf'))
from telemetry.page import page as page_module
from telemetry.timeline import model
from telemetry.timeline import tracing_timeline_data
from telemetry.results import page_test_results
from telemetry.results import buildbot_output_formatter
from telemetry.web_perf import timeline_interaction_record as tir_module
from telemetry.web_perf.metrics import smoothness
# pylint: disable=F0401
from measurements import smoothness_controller
from measurements import smooth_gesture_util
def _ExtractInteractionsRecordFromThread(thread, timeline_model):
run_smooth_actions_record = None
records = []
for event in thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
assert event.start_thread
assert event.start_thread is event.end_thread
r = smooth_gesture_util.GetAdjustedInteractionIfContainGesture(
timeline_model,
tir_module.TimelineInteractionRecord.FromAsyncEvent(event))
if r.label == smoothness_controller.RUN_SMOOTH_ACTIONS:
assert run_smooth_actions_record is None, (
'There can\'t be more than 1 %s record' %
smoothness_controller.RUN_SMOOTH_ACTIONS)
run_smooth_actions_record = r
else:
records.append(r)
if not records:
# Only include run_smooth_actions_record (label =
# smoothness_controller.RUN_SMOOTH_ACTIONS) if there is no other records
records = [run_smooth_actions_record]
return records
def Main(args):
if len(args) is not 1:
print 'Invalid arguments. Usage: measure_trace.py <trace file>'
return 1
with open(args[0]) as trace_file:
trace_data = tracing_timeline_data.TracingTimelineData(
json.load(trace_file))
timeline_model = model.TimelineModel(trace_data)
smoothness_metric = smoothness.SmoothnessMetric()
formatters = [
buildbot_output_formatter.BuildbotOutputFormatter(sys.stdout)
]
results = page_test_results.PageTestResults(output_formatters=formatters)
for thread in timeline_model.GetAllThreads():
interaction_records = _ExtractInteractionsRecordFromThread(
thread, timeline_model)
if not any(interaction_records):
continue
records_label_to_records_map = collections.defaultdict(list)
for r in interaction_records:
records_label_to_records_map[r.label].append(r)
for label, records in records_label_to_records_map.iteritems():
if records[0].is_smooth:
page = page_module.Page('interaction-record://%s' % label)
results.WillRunPage(page)
smoothness_metric.AddResults(
timeline_model, thread, records, results)
results.DidRunPage(page)
results.PrintSummary()
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause | -1,768,523,650,092,833,800 | 35.848837 | 77 | 0.723572 | false |
TheCoSMoCompany/biopredyn | Prototype/src/libsbml-5.10.0/src/bindings/swig/swigdoc.py | 1 | 61185 | #!/usr/bin/env python
#
# @file swigdoc.py
# @brief Creates documentation for C#, Java, Python, and Perl.
# @author Ben Bornstein
# @author Christoph Flamm
# @author Akiya Jouraku
# @author Michael Hucka
# @author Frank Bergmann
#
#<!---------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright (C) 2013-2014 jointly by the following organizations:
# 1. California Institute of Technology, Pasadena, CA, USA
# 2. EMBL European Bioinformatics Institute (EMBL-EBI), Hinxton, UK
# 3. University of Heidelberg, Heidelberg, Germany
#
# Copyright (C) 2009-2013 jointly by the following organizations:
# 1. California Institute of Technology, Pasadena, CA, USA
# 2. EMBL European Bioinformatics Institute (EMBL-EBI), Hinxton, UK
#
# Copyright (C) 2006-2008 by the California Institute of Technology,
# Pasadena, CA, USA
#
# Copyright (C) 2002-2005 jointly by the following organizations:
# 1. California Institute of Technology, Pasadena, CA, USA
# 2. Japan Science and Technology Agency, Japan
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
#----------------------------------------------------------------------- -->*/
import sys, string, os.path, re, argparse, libsbmlutils
#
# Globally-scoped variables
#
language = ''
doc_include_path = ''
ignored_hfiles = ['ListWrapper.h']
ignored_ifiles = ['std_string.i', 'javadoc.i', 'spatial-package.i']
libsbmlclasses = []
# In some languages like C#, we have to be careful about the method declaration
# that we put on the swig %{java|cs}methodmodifiers. In particular, in C#, if
# a method overrides a base class' method, we have to add the modifier "new".
#
# FIXME: The following approach of hard-coding the list of cases is
# definitely not ideal. We need to extract the list somehow, but it's not
# easy to do within this script (swigdoc.py) because the syntax of the
# files we read in is C++, not the target language like C#, and in C++,
# it's not obvious if the method you're looking at overrides another. We a
# more sophisticated parser like the compiler itself, or we should write a
# small C# program to gather this info prior to running swigdoc.py.
overriders = \
{
'AlgebraicRule' : [ 'clone', 'hasRequiredAttributes' ],
'AssignmentRule' : [ 'clone', 'hasRequiredAttributes' ],
'Compartment' : [ 'clone', 'getId', 'getName', 'isSetId', 'isSetName', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'setId', 'setName', 'unsetId', 'unsetName' ],
'CompartmentType' : [ 'clone', 'getId', 'getName', 'isSetId', 'isSetName', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'setId', 'setName', 'unsetId', 'unsetName' ],
'CompExtension' : [ 'clone', 'getErrorIdOffset' ],
'Constraint' : [ 'clone', 'getTypeCode', 'getElementName', 'hasRequiredElements' ],
'Delay' : [ 'clone', 'getTypeCode', 'getElementName', 'hasRequiredElements' ],
'Event' : [ 'clone', 'getId', 'getName', 'isSetId', 'isSetName', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'hasRequiredElements', 'setId', 'setName', 'unsetId', 'unsetName', 'connectToChild', 'enablePackageInternal' ],
'EventAssignment' : [ 'clone', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'hasRequiredElements', 'getId' ],
'FbcExtension' : [ 'clone', 'getErrorIdOffset' ],
'FunctionDefinition' : [ 'clone', 'getId', 'getName', 'isSetId', 'isSetName', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'hasRequiredElements', 'setId', 'setName', 'unsetId', 'unsetName' ],
'InitialAssignment' : [ 'clone', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'hasRequiredElements', 'getId' ],
'ISBMLExtensionNamespaces' : [ 'getURI', 'getPackageName' ],
'KineticLaw' : [ 'clone', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'hasRequiredElements', 'connectToChild', 'enablePackageInternal' ],
'LayoutExtension' : [ 'clone', 'getErrorIdOffset' ],
'ListOf' : [ 'clone', 'getTypeCode', 'getElementName', 'connectToChild', 'enablePackageInternal' ],
'ListOfCompartmentTypes' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfCompartments' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfConstraints' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfEventAssignments' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfEvents' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfFunctionDefinitions' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfInitialAssignments' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfParameters' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfLocalParameters' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfReactions' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfRules' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfSpecies' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfSpeciesReferences' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfSpeciesTypes' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfUnitDefinitions' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'ListOfUnits' : [ 'clone', 'getTypeCode', 'getItemTypeCode', 'getElementName', 'get', 'remove' ],
'Parameter' : [ 'clone', 'getId', 'getName', 'isSetId', 'isSetName', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'setId', 'setName', 'unsetId', 'unsetName' ],
'QualExtension' : [ 'clone', 'getErrorIdOffset' ],
'LocalParameter' : [ 'clone', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'getDerivedUnitDefinition' ],
'Model' : [ 'clone', 'getId', 'getName', 'isSetId', 'isSetName', 'getTypeCode', 'getElementName', 'hasRequiredElements', 'setId', 'setName', 'unsetId', 'unsetName', 'setAnnotation', 'appendAnnotation', 'connectToChild', 'enablePackageInternal' ],
'SimpleSpeciesReference' : [ 'getId', 'getName', 'isSetId', 'isSetName', 'setId', 'setName', 'unsetId', 'unsetName' ],
'ModifierSpeciesReference' : [ 'clone', 'getTypeCode', 'getElementName', 'hasRequiredAttributes' ],
'Priority' : [ 'clone', 'getTypeCode', 'getElementName', 'hasRequiredElements' ],
'RateRule' : [ 'clone', 'hasRequiredAttributes' ],
'Reaction' : [ 'clone', 'getId', 'getName', 'isSetId', 'isSetName', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'setId', 'setName', 'unsetId', 'unsetName', 'connectToChild', 'enablePackageInternal' ],
'Rule' : [ 'clone', 'getTypeCode', 'getElementName', 'hasRequiredElements', 'hasRequiredAttributes', 'getId' ],
'SBMLDocument' : [ 'clone', 'getModel', 'getTypeCode', 'getElementName', 'getNamespaces', 'connectToChild', 'enablePackageInternal' ],
'SBMLDocumentPlugin' : [ 'clone' ],
'SBMLErrorLog' : [ 'getError' ],
'Species' : [ 'clone', 'getId', 'getName', 'isSetId', 'isSetName', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'setId', 'setName', 'unsetId', 'unsetName' ],
'SpeciesReference' : [ 'clone', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'setAnnotation', 'appendAnnotation' ],
'SpeciesType' : [ 'clone', 'getId', 'getName', 'isSetId', 'isSetName', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'setId', 'setName', 'unsetId', 'unsetName' ],
'StoichiometryMath' : [ 'clone', 'getTypeCode', 'getElementName', 'hasRequiredElements' ],
'Trigger' : [ 'clone', 'getTypeCode', 'getElementName', 'hasRequiredElements', 'hasRequiredAttributes' ],
'Unit' : [ 'clone', 'getTypeCode', 'getElementName', 'hasRequiredAttributes' ],
'UnitDefinition' : [ 'clone', 'getId', 'getName', 'isSetId', 'isSetName', 'getTypeCode', 'getElementName', 'hasRequiredAttributes', 'hasRequiredElements', 'setId', 'setName', 'unsetId', 'unsetName', 'connectToChild', 'enablePackageInternal' ],
'XMLNode' : [ 'clone' ]
}
#
# Global variable for tracking all class docs, so that we can handle
# cross-references like @copydetails that may refer to files other
# than the file being processed at any given time.
#
allclassdocs = {}
#
# Global list of preprocessor symbols defined via the --define option to
# swigdoc.py on the command line. 'SWIG' and '__cplusplus' are always
# defined by default. (C is the only language for which we would not define
# __cplusplus, but for C, we don't use swigdoc.py anyway.)
#
preprocessor_defines = ['SWIG', '__cplusplus']
#
# Classes and methods.
#
class CHeader:
"""CHeader encapsulates the C++ class and C function definitions
found within a C header file.
"""
def __init__(self, stream, language, defines):
self.language = language
self.classes = []
self.functions = []
self.classDocs = []
self.inClass = False
self.inClassDocs = False
self.inDocs = False
self.isInternal = False
self.ignoreThis = False
self.classname = ''
self.docstring = ''
self.lines = ''
if stream is not None:
read_loop(self.header_line_parser, stream.readlines(), defines)
def header_line_parser(self, line):
stripped = line.strip()
# Track things that we flag as internal, so that we can
# remove them from the documentation.
if (stripped.find('@cond doxygenLibsbmlInternal') >= 0): self.isInternal = True
if (stripped.find('@endcond') >= 0): self.isInternal = False
# Watch for class description, usually at top of file.
if (not self.inClassDocs) and stripped.startswith('* @class'):
self.inClassDocs = True
self.classname = stripped[8:].strip()
if self.classname.endswith('.'):
self.classname = self.classname[:-1]
self.docstring = ''
return
if self.inClassDocs:
if stripped.startswith('* @brief'):
self.docstring += ' * ' + stripped[9:].strip() + '\n'
return
elif stripped.startswith('* @sbmlbrief{'):
end = stripped.find('}')
pkg = stripped[13:end]
rest = stripped[end + 1:].strip()
marker = '@htmlinclude pkg-marker-' + pkg + '.html'
# In the case of Java, the output of swigdoc is fed to Javadoc and
# not Doxygen. So, we do our own processing of our special Doxygen
# aliases. If we're not doing this for Java, we leave them in.
if self.language == 'java':
self.docstring += ' * ' + marker + ' ' + rest + '\n'
else:
group = '@sbmlpackage{' + pkg + '}'
self.docstring += ' * \n * ' + group + '\n *\n' + marker + ' ' + rest + '\n'
return
elif not stripped.endswith('*/') and not stripped.startswith('* @class'):
self.docstring += line
return
else:
if not self.classname.startswith("doc_"):
self.docstring = '/**\n' + self.docstring + ' */'
self.docstring = removeHTMLcomments(self.docstring)
doc = CClassDoc(self.docstring, self.classname, self.isInternal)
self.classDocs.append(doc)
# There may be more class docs in the same comment.
if stripped.startswith('* @class'):
self.classname = stripped[8:].strip()
if self.classname.endswith('.'):
self.classname = self.classname[:-1]
else:
self.inClassDocs = False
self.docstring = ''
return
# Watch for class definition, methods and out-of-class functions.
if stripped.startswith('class ') and not stripped.endswith(';'):
self.ignoreThis = False
self.inClass = True
self.classname = line[6:].split(':')[0].strip()
if self.classname[:6] == 'LIBSBM' or self.classname[:6] == 'LIBLAX':
self.classname = self.classname.split(' ')[1].strip()
self.classes.append( CClass(self.classname) )
return
if stripped == '};':
self.inClass = False
return
if stripped == '/**':
self.docstring = ''
self.lines = ''
self.ignoreThis = False
self.inDocs = True
if self.inDocs:
self.docstring += line
self.inDocs = (stripped != '*/')
return
# If we get here, we're no longer inside a comment block.
# Start saving lines, but skip embedded comments.
if stripped.startswith('#') or (stripped.find('typedef') >= 0):
self.ignoreThis = True
return
if not self.ignoreThis:
cppcomment = stripped.find('//')
if cppcomment != -1:
stripped = stripped[:cppcomment]
self.lines += stripped + ' ' # Space avoids jamming code together.
# Keep an eye out for the end of the declaration.
if not stripped.startswith('*') and \
(stripped.endswith(';') or stripped.endswith(')') or stripped.endswith('}')):
# It might be a forward declaration. Skip it.
if self.lines.startswith('class'):
return
# It might be a C++ operator redefinition. Skip it.
if self.lines.find('operator') >= 0:
return
# It might be an enum. Skip it.
# If it's not an enum at this point, parse it.
if stripped.endswith('}'):
self.lines = self.lines[:self.lines.rfind('{')]
if not stripped.startswith('enum'):
# If this segment begins with a comment, we need to skip over it.
searchstart = self.lines.rfind('*/')
if (searchstart < 0):
searchstart = 0
# Find (we hope) the end of the method name.
stop = self.lines[searchstart:].find('(')
# Pull out the method name & signature.
if (stop > 0):
name = self.lines[searchstart : searchstart + stop].split()[-1]
endparen = self.lines.rfind(')')
args = self.lines[searchstart + stop : endparen + 1]
isConst = self.lines[endparen:].rfind('const')
if len(self.docstring) > 0:
# Remove embedded HTML comments before we store the doc string.
self.docstring = removeHTMLcomments(self.docstring)
else:
# We have an empty self.docstring. Put in something so that later
# stages can do whatever postprocessing they need.
self.docstring = '/** */'
# Swig doesn't seem to mind C++ argument lists, even though they
# have "const", "&", etc. So I'm leaving the arg list unmodified.
func = Method(self.isInternal, self.docstring, name, args, (isConst > 0))
# Reset buffer for the next iteration, to skip the part seen.
self.lines = self.lines[endparen + 2:]
self.docstring = ''
if self.inClass:
c = self.classes[-1]
c.methods.append(func)
# Record method variants that take different arguments.
if c.methodVariants.get(name) == None:
c.methodVariants[name] = {}
c.methodVariants[name][args] = func
else:
self.functions.append(func)
# FIXME need do nc variants
class CClass:
"""A CClass encapsulates a C++ class. It has the following public
attributes:
- name
- methods
- methodVariants
"""
def __init__ (self, name):
"""CClass(name) -> CClass
Creates a new CClass with the given name.
"""
self.name = name
self.methods = [ ]
self.methodVariants = {}
class Method:
"""A Method encapsulates a C/C++ function. Currently, it has the
following public attributes:
- isInternal
- docstring
- name
- args
- isConst
"""
def __init__ (self, isInternal, docstring, name, args, isConst):
"""Method(isInternal, docstring name, args, isConst) -> Method
Creates a new Method description with the given docstring, name and args,
for the language, with special consideration if the method
was declared constant and/or internal.
"""
global language
self.name = name
self.isConst = isConst
self.isInternal = isInternal
if isInternal:
if language == 'java':
# We have a special Javadoc doclet that understands a non-standard
# Javadoc tag, @internal. When present in the documentation string
# of a method, it causes it to be excluded from the final
# documentation output. @internal is something doxygen offers.
#
p = re.compile('(\s+?)\*/', re.MULTILINE)
self.docstring = p.sub(r'\1* @internal\1*/', docstring)
elif language == 'csharp':
# We mark internal methods in a different way for C#.
self.docstring = docstring
else:
self.docstring = " @internal\n" + docstring
else:
self.docstring = docstring
# In Java and C#, if a method is const and swig has to translate the type,
# then for some reason swig cannot match up the resulting doc strings
# that we put into %javamethodmodifiers. The result is that the java
# documentation for the methods are empty. I can't figure out why, but
# have figured out that if we omit the argument list in the doc string
# that is put on %javamethodmodifiers for such case, swig does generate
# the comments for those methods. This approach is potentially dangerous
# because swig might attach the doc string to the wrong method if a
# methods has multiple versions with varying argument types, but the
# combination doesn't seem to arise in libSBML currently, and anyway,
# this fixes a real problem in the Java documentation for libSBML.
if language == 'java' or language == 'csharp':
if isConst and (args.find('unsigned int') >= 0):
self.args = ''
elif not args.strip() == '()':
if isConst:
self.args = args + ' const'
else:
self.args = args
else:
if isConst:
self.args = '() const'
else:
self.args = ''
else:
self.args = args
class CClassDoc:
"""Encapsulates documentation for a class. Currently, it has the
following public attributes:
- docstring
- name
"""
def __init__ (self, docstring, name, isInternal):
"""CClassDoc(docstring, name) -> CClassDoc
Creates a new CClassDoc with the given docstring and name.
"""
# Take out excess leading blank lines.
docstring = re.sub('/\*\*(\s+\*)+', r'/** \n *', docstring)
# Add marker for internal classes.
if isInternal:
docstring = re.sub('\*/', r'* @internal\n */', docstring)
self.docstring = docstring
self.name = name
self.isInternal = isInternal
# Example case for working out the algorithm for read_loop().
# Preprocessor symbols are X, Y, Z.
# Start:
#
# X not defined
# Y not defined
# Z defined
#
# skipping = false
# states = []
# symbols = []
#
# #ifdef X
#
# skipping = true
# states[0] = false
# symbols[0] = X
#
# ; should ignore rest of this until outer #else
#
# #ifndef Y
#
# #else
#
# #endif
#
# #else
#
# skipping = false
# states[0] = false
# symbols[0] = X
#
# #endif
#
# skipping = false
# states = []
# symbols = []
#
# #ifdef Z
#
# skipping = false
# states[0] = false
# symbols[0] = Z
#
# #ifndef Y
#
# skipping = false
# states[1] = false
# symbols[1] = Y
#
# #else
#
# skipping = true
# states[1] = false
# symbols[1] = Y
#
# #endif
#
# skipping = false
# states[0] = false
# symbols[0] = Z
#
# #else
#
# skipping = true
# states[0] = false
# symbols[0] = Z
#
# #endif
def read_loop(line_parser_func, lines, defined_symbols):
"""Non-recursive function to call 'line_parser_func'() on each line
of 'lines', paying attention to #if/#ifdef/#ifndef/#else/#endif
conditionals. 'defined_symbols' is a list of the symbols to check when
reading #if/#ifdef/#ifndef conditions."""
# symbol_stack holds the current #if condition symbol
# state_stack holds the skipping state before the current #if symbol was seen
states = [False]
skipping = False
for line in lines:
split = line.split()
if split:
start = split[0]
if start == '#if' or start == '#ifdef':
states.append(skipping)
if skipping:
continue
skipping = True
for s in defined_symbols:
if split[1] == s:
skipping = False
break
elif start == '#ifndef':
states.append(skipping)
if skipping:
continue
for s in defined_symbols:
if split[1] == s:
skipping = True
break
elif start == '#endif':
skipping = states.pop()
elif start == '#else' and not skipping:
skipping = not states[-1]
if not skipping:
line_parser_func(line)
def find_inclusions(extension, lines, ignored_list):
includes = []
def inclusions_line_parser(line):
split = line.split()
if split and split[0] == '%include':
filename = re.sub('["<>]', '', split[1]).strip()
if filename.endswith(extension) and filename not in ignored_list:
includes.append(filename)
read_loop(inclusions_line_parser, lines, preprocessor_defines)
return includes
def get_swig_files (swig_file, included_files=[], parent_dirs=[]):
"""
Builds a list of all the files %include'd recursively from the given
SWIG .i file.
"""
# Record directories encountered.
dir = os.path.abspath(os.path.join(swig_file, os.pardir))
if dir not in parent_dirs:
parent_dirs.append(dir)
# Read the current file.
swig_file = os.path.normpath(os.path.abspath(os.path.join(dir, swig_file)))
stream = open(swig_file)
lines = stream.readlines()
stream.close()
# Create list of %include'd .i files found in the file, but filter out
# the ones we ignore.
ifiles = find_inclusions('.i', lines, ignored_ifiles)
# Recursively look for files that are included by the files we found.
# SWIG searches multiple paths for %include'd .i files. We just look in
# the directories of the .i files we encounter.
found_ifiles = []
for ifilename in ifiles:
search_dirs = ['.'] + parent_dirs
for dir in search_dirs:
file = os.path.normpath(os.path.abspath(os.path.join(dir, ifilename)))
if os.path.isfile(file) and file not in included_files:
included_files.append(file)
found_ifiles.extend(get_swig_files(file, included_files, parent_dirs))
break
return [swig_file] + found_ifiles
def get_header_files (swig_files, include_path):
"""
Reads the list of %include directives from the given SWIG (.i) files, and
returns a list of C/C++ headers (.h) found. This uses a recursive algorithm.
"""
hfiles = []
for file in swig_files:
stream = open(file)
hfiles.extend(find_inclusions('.h', stream.readlines(), ignored_hfiles))
stream.close()
# Convert the .h file names to absolute paths. This is slightly tricky
# because the file might be in the current directory, or in the
# include_path we were given, or in the directory of one of the .i files we
# encountered. So, we need to search them all.
search_dirs = [os.path.abspath('.')] + [os.path.abspath(include_path)]
for file in swig_files:
search_dirs.append(os.path.dirname(file))
abs_hfiles = []
for file in hfiles:
for dir in search_dirs:
abs_path = os.path.abspath(os.path.join(dir, file))
if os.path.isfile(abs_path) and abs_path not in abs_hfiles:
abs_hfiles.append(abs_path)
return abs_hfiles
def rewriteCommonReferences (docstring):
"""rewriteCommonReferences (docstring) -> docstring
Rewrites common C++ doxygen references to match language-specific needs.
"""
if language == 'java':
target = 'libsbmlConstants#'
elif language == 'csharp':
target = 'libsbmlcs.libsbml.'
elif language == 'python':
target = 'libsbml.'
else:
target = ''
if target != '':
docstring = re.sub(r'ASTNodeType_t#', target, docstring)
docstring = re.sub(r'ASTNode_t#', target, docstring)
docstring = re.sub(r'BiolQualifierType_t#', target, docstring)
docstring = re.sub(r'ConversionOptionType_t#', target, docstring)
docstring = re.sub(r'ModelQualifierType_t#', target, docstring)
docstring = re.sub(r'OperationReturnValues_t#', target, docstring)
docstring = re.sub(r'ParseLogType_t#', target, docstring)
docstring = re.sub(r'QualifierType_t#', target, docstring)
docstring = re.sub(r'RuleType_t#', target, docstring)
docstring = re.sub(r'SBMLCompTypeCode_t#', target, docstring)
docstring = re.sub(r'SBMLErrorCategory_t#', target, docstring)
docstring = re.sub(r'SBMLErrorSeverity_t#', target, docstring)
docstring = re.sub(r'SBMLFbcTypeCode_t#', target, docstring)
docstring = re.sub(r'SBMLLayoutTypeCode_t#', target, docstring)
docstring = re.sub(r'SBMLQualTypeCode_t#', target, docstring)
docstring = re.sub(r'SBMLTypeCode_t#', target, docstring)
docstring = re.sub(r'UnitKind_t#', target, docstring)
docstring = re.sub(r'XMLErrorCategory_t#', target, docstring)
docstring = re.sub(r'XMLErrorCode_t#', target, docstring)
docstring = re.sub(r'XMLErrorSeverityOverride_t#', target, docstring)
docstring = re.sub(r'XMLErrorSeverity_t#', target, docstring)
docstring = re.sub(r'CompSBMLErrorCode_t#', target, docstring)
docstring = re.sub(r'QualSBMLErrorCode_t#', target, docstring)
docstring = re.sub(r'FbcSBMLErrorCode_t#', target, docstring)
docstring = re.sub(r'LayoutSBMLErrorCode_t#', target, docstring)
# Put this one last, so it doesn't match the XXXXSBMLErrorCode_t ones.
docstring = re.sub(r'SBMLErrorCode_t#', target, docstring)
return docstring
def translateVerbatim (match):
text = match.group()
if re.search('@verbatim', text) != None:
tagName = 'verbatim'
else:
tagName = 'code'
text = text.replace('<p>', '')
text = text.replace('<', '<')
text = text.replace('>', '>')
regexp = '@' + tagName + '[ \t]*'
text = re.sub(regexp, r"<div class='fragment'><pre class='fragment'>", text)
regexp = '(\s*\*\s*)*@end' + tagName
p = re.compile(regexp, re.MULTILINE)
text = p.sub(r'</pre></div>', text)
return text
def translateInclude (match):
global doc_include_path
file = match.group(2)
file = re.sub('["\']', '', file)
content = ''
try:
stream = open(doc_include_path + '/common-text/' + file, 'r')
content = stream.read()
stream.close()
except (Exception,):
e = sys.exc_info()[1]
print('Warning: cannot expand common-text: ' + file)
print(e)
content = removeHTMLcomments(content)
# Quote embedded double quotes.
content = re.sub('\"', '\\\"', content)
return content
def translateCopydetails (match):
name = match.group(1)
if (name in allclassdocs):
text = allclassdocs[name]
else:
# If it's not found, just write out what we read in.
text = '@copydetails ' + name
return text
def translateIfElse (match):
text = match.group()
if match.group(1) == language or \
match.group(1) == 'notcpp' or \
match.group(1) == 'notclike':
text = match.group(2)
elif match.group(4) == '@else':
text = match.group(5)
else:
text = ''
return text
def translateJavaCrossRef (match):
prior = match.group(1)
classname = match.group(2)
method = match.group(3)
return prior + '{@link ' + classname + '#' + method + '}'
def translateCSharpCrossRef (match):
prior = match.group(1)
classname = match.group(2)
method = match.group(3)
return prior + '<see cref="' + classname + '.' + method + '"/>'
def translatePythonCrossRef (match):
prior = match.group(1)
classname = match.group(2)
method = match.group(3)
args = match.group(4)
return prior + classname + "." + method + "()"
def translatePythonSeeRef (match):
prior = match.group(1)
method = match.group(2)
args = match.group(3)
return prior + method + "()"
def rewriteClassRefAddingSpace (match):
return match.group(1) + match.group(2) + match.group(3)
def rewriteClassRef (match):
return match.group(1) + match.group(2)
def translateClassRefJava (match):
leading = match.group(1)
classname = match.group(2)
trailing = match.group(3)
if leading != '%' and leading != '(':
return leading + '{@link ' + classname + '}' + trailing
else:
return leading + classname + trailing
def translateClassRefCSharp (match):
leading = match.group(1)
classname = match.group(2)
trailing = match.group(3)
if leading != '%' and leading != '(':
return leading + '<see cref="' + classname + '"/>' + trailing
else:
return leading + classname + trailing
def rewriteList (match):
lead = match.group(1);
list = match.group(2);
space = match.group(3);
ending = match.group(4);
list = re.sub(r'@li\b', '<li>', list)
list = re.sub('r<p>', '', list) # Remove embedded <p>'s.
return lead + "<ul>\n" + lead + list + "\n" + lead + "</ul>" + space + ending;
def rewriteDeprecated (match):
lead = match.group(1);
depr = match.group(2);
body = match.group(3);
ending = match.group(5);
return lead + depr + '<div class="deprecated">' + body + '</div>\n' + lead + ending
def sanitizeForHTML (docstring):
"""sanitizeForHTML (docstring) -> docstring
Performs HTML transformations on the C++/Doxygen docstring.
"""
# Remove some things we use as hacks in Doxygen 1.7-1.8.
docstring = docstring.replace(r'@~', '')
p = re.compile('^\s*\*\s+@par(\s)', re.MULTILINE)
docstring = p.sub(r'\1', docstring)
# Remove @ref's, since we currently have no way to deal with them.
docstring = re.sub('@ref\s+\w+', '', docstring)
# First do conditional section inclusion based on the current language.
# Our possible conditional elements and their meanings are:
#
# java: only Java
# python: only Python
# perl: only Perl
# cpp: only C++
# csharp: only C#
# conly: only C
# clike: C, C++
# notcpp: not C++
# notclike: not C or C++
#
# The notcpp/notclike variants are because Doxygen 1.6.x doesn't have
# @ifnot, yet sometimes we want to say "if not C or C++".
cases = 'java|python|perl|cpp|csharp|conly|clike|notcpp|notclike'
p = re.compile('@if\s+(' + cases + ')\s+(.+?)((@else)\s+(.+?))?@endif', re.DOTALL)
docstring = p.sub(translateIfElse, docstring)
# Replace blank lines between paragraphs with <p>. There are two main
# cases: comments blocks whose lines always begin with an asterix (e.g.,
# C/C++), and comment blocks where they don't (e.g., Python). The third
# substitution below does the same thing for blank lines, except for the
# very end of the doc string.
p = re.compile('^(\s+)\*\s*$', re.MULTILINE)
docstring = p.sub(r'\1* <p>', docstring)
p = re.compile('^((?!\s+\Z)\s+)$', re.MULTILINE)
docstring = p.sub(r'\1<p>', docstring)
p = re.compile('^(?!\Z)$', re.MULTILINE)
docstring = p.sub(r'<p>', docstring)
# There's no Javadoc verbatim or @code/@endcode equivalent, so we have to
# convert it to raw HTML and transform the content too. This requires
# helpers. The following treats both @verbatim and @code the same way.
p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)
docstring = p.sub(translateVerbatim, docstring)
p = re.compile('@code.+?@endcode', re.DOTALL)
docstring = p.sub(translateVerbatim, docstring)
# Javadoc doesn't have a @section or @subsection commands, so we translate
# those ourselves.
p = re.compile('@section\s+[^\s]+\s+(.*)$', re.MULTILINE)
docstring = p.sub(r'<h2>\1</h2>', docstring)
p = re.compile('@subsection\s+[^\s]+\s+(.*)$', re.MULTILINE)
docstring = p.sub(r'<h3>\1</h3>', docstring)
p = re.compile('@subsubsection\s+[^\s]+\s+(.*)$', re.MULTILINE)
docstring = p.sub(r'<h4>\1</h4>', docstring)
# Javadoc doesn't have an @image command. We translate @image html
# but ditch @image latex.
p = re.compile('@image\s+html+\s+([^\s]+).*$', re.MULTILINE)
docstring = p.sub(r"<center class='image'><img src='\1'></center>", docstring)
p = re.compile('@image\s+latex+\s+([^\s]+).*$', re.MULTILINE)
docstring = p.sub(r'', docstring)
# Doxygen doesn't understand HTML character codes like ≥, so we've
# been using doxygen's Latex facility to get special mathematical
# characters into the documentation, but as luck would have it, Javadoc
# doesn't understand the Latex markup. All of this is getting old.
docstring = re.sub(r'\\f\$\\geq\\f\$', '≥', docstring)
docstring = re.sub(r'\\f\$\\leq\\f\$', '≤', docstring)
docstring = re.sub(r'\\f\$\\times\\f\$', '×', docstring)
# The following are done in pairs because I couldn't come up with a
# better way to catch the case where @c and @em end up alone at the end
# of a line and the thing to be formatted starts on the next one after
# the comment '*' character on the beginning of the line.
docstring = re.sub('@c +([^ ,;()/*\n\t<]+)', r'<code>\1</code>', docstring)
docstring = re.sub('@c(\n[ \t]*\*[ \t]*)([^ ,;()/*\n\t<]+)', r'\1<code>\2</code>', docstring)
docstring = re.sub('@p +([^ ,.:;()/*\n\t<]+)', r'<code>\1</code>', docstring)
docstring = re.sub('@p(\n[ \t]*\*[ \t]+)([^ ,.:;()/*\n\t<]+)', r'\1<code>\2</code>', docstring)
docstring = re.sub('@em *([^ ,.:;()/*\n\t<]+)', r'<em>\1</em>', docstring)
docstring = re.sub('@em(\n[ \t]*\*[ \t]*)([^ ,.:;()/*\n\t<]+)', r'\1<em>\2</em>', docstring)
# Convert @li into <li>, but also add <ul> ... </ul>. This is a bit
# simple-minded (I suppose like most of this code), but ought to work
# for the cases we use in practice.
p = re.compile('^(\s+\*\s+)(@li\s+.*?)(\s+)(\*/|<p>\s+\*\s+(?!@li\s))', re.MULTILINE|re.DOTALL)
docstring = p.sub(rewriteList, docstring)
# Wrap @deprecated content with a class so that we can style it.
p = re.compile('^(\s+\*\s+)(@deprecated\s)((\S|\s)+)(<p>|\*/)', re.MULTILINE|re.DOTALL)
docstring = p.sub(rewriteDeprecated, docstring)
# Doxygen automatically cross-references class names in text to the class
# definition page, but Javadoc does not. Rather than having to put in a
# lot conditional @if/@endif's into the documentation to manually create
# cross-links just for the Java case, let's automate. This needs to be
# done better (e.g., by not hard-wiring the class names).
p = re.compile(r'([^a-zA-Z0-9_.">])(' + '|'.join(libsbml_classes) + r')\b([^:])', re.DOTALL)
if language == 'csharp':
docstring = p.sub(translateClassRefCSharp, docstring)
elif language == 'java':
docstring = p.sub(translateClassRefJava, docstring)
# Massage method cross-references.
p = re.compile('(\s+)(\S+?)::(\w+\s*\([^)]*?\))', re.MULTILINE)
if language == 'csharp':
docstring = p.sub(translateCSharpCrossRef, docstring)
elif language == 'java':
docstring = p.sub(translateJavaCrossRef, docstring)
# Clean-up step needed because some of the procedures above are imperfect.
# The first converts " * * @foo" lines into " * @foo".
# The 2nd converts * <p> * <p> * sequences into one <p>.
p = re.compile('^(\s+)\*\s+\*\s+@', re.MULTILINE)
docstring = p.sub(r'\1* @', docstring)
p = re.compile('^(\s*)\*\s*<p>', re.MULTILINE)
docstring = p.sub(r'\1<p>', docstring)
p = re.compile('^(\s*)\*?\s*<p>((\s+\*)+\s+<p>)+', re.MULTILINE)
docstring = p.sub(r'\1*', docstring)
# Merge separated @see's, or else the first gets lost in the javadoc output.
p = re.compile(r'(@see.+?)<p>.+?@see', re.DOTALL)
docstring = p.sub(r'\1@see', docstring)
# If the doc string ends with <p> followed by */, then javadoc parses it
# incorrectly. Since we typically end class and method docs with a list of
# @see's, the consequence is that it omits the last entry of a list of
# @see's. The behavior is totally baffling, but let's just deal with it.
# The two forms below are because, when we are processing method doc
# strings, they do not yet end with "*/" when we process them here, so we
# match against either the end of the string or a "*/".)
p = re.compile(r'(<p>\s*)+\*/', re.MULTILINE)
docstring = p.sub(r'*/', docstring)
p = re.compile(r'(<p>\s*)+\Z', re.MULTILINE)
docstring = p.sub(r'', docstring)
# Take out any left-over Doxygen-style quotes, because Javadoc doesn't have
# the %foo quoting mechanism.
docstring = re.sub(r'(\s)%(\w)', r'\1\2', docstring)
# Currently, we don't handle @ingroup or our pseudo-tag, @sbmlpackage.
docstring = re.sub(r'@ingroup \w+', '', docstring)
docstring = re.sub(r'@sbmlpackage{\w+}', '', docstring)
return docstring
def removeStar (match):
text = match.group()
text = text.replace('*', '')
return text
def removeHTMLcomments (docstring):
return re.sub(r'<!--.+?\s-->', '', docstring, re.DOTALL|re.MULTILINE)
def rewriteDocstringForJava (docstring):
"""rewriteDocstringForJava (docstring) -> docstring
Performs some mimimal javadoc-specific sanitizations on the
C++/Doxygen docstring.
"""
docstring = rewriteCommonReferences(docstring)
# Preliminary: rewrite some of the data type references to equivalent
# Java types. (Note: this rewriting affects only the documentation
# comments inside classes & methods, not the method signatures.)
docstring = docstring.replace(r'const char *', 'String ')
docstring = docstring.replace(r'const char* ', 'String ')
docstring = docstring.replace(r'an unsigned int', 'a long integer')
docstring = docstring.replace(r'unsigned int', 'long')
docstring = docstring.replace(r'const std::string&', 'String')
docstring = docstring.replace(r'const std::string &', 'String ')
docstring = docstring.replace(r'const std::string ', 'String ')
docstring = docstring.replace(r'std::string', 'String')
docstring = docstring.replace(r'NULL', 'null')
docstring = re.sub(r'\bbool\b', 'boolean', docstring)
# Also use Java syntax instead of "const XMLNode*" etc.
p = re.compile(r'const (%?)(' + '|'.join(libsbml_classes) + r')( ?)(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRefAddingSpace, docstring)
p = re.compile(r'(%?)(' + '|'.join(libsbml_classes) + r')( ?)(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRefAddingSpace, docstring)
# Do the big work.
docstring = sanitizeForHTML(docstring)
# Fix up for a problem introduced by sanitizeForHTML: it puts {@link ...}
# into the arguments of functions mentioned in @see's, if the function has
# more than one argument. The following gets rid of the @link's. This
# should be fixed properly some day.
p = re.compile(r'((@see|@throws)\s+[\w\\ ,.\'"=<>()#]*?){@link\s+([^}]+?)}')
while re.search(p, docstring) != None:
docstring = p.sub(r'\1\3', docstring)
# Inside of @see, change double colons to pound signs.
docstring = re.sub('(@see\s+\w+)::', r'\1#', docstring)
# The syntax for @see is slightly different: method names need to have a
# leading pound sign character. This particular bit of code only handles
# a single @see foo(), which means the docs have to be written that way.
# Maybe someday in the future it should be expanded to handle
# @see foo(), bar(), etc., but I don't have time right now to do it.
docstring = re.sub('(@see\s+)([\w:.]+)\(', r'\1#\2(', docstring)
# Remove the '*' character that Javadoc doesn't want to see in @see's.
# (This doesn't make a difference; javadoc still can't match up the refs.)
# p = re.compile('@see[\s\w.:,()#]+[*][\s\w.:,()*#]')
# docstring = p.sub(removeStar, docstring)
# The syntax for @link is vastly different.
p = re.compile('@link([\s/*]+[\w\s,.:#()*]+[\s/*]*[\w():#]+[\s/*]*)@endlink', re.DOTALL)
docstring = p.sub(r'{@link \1}', docstring)
# Outside of @see and other constructs, dot is used to reference members
# instead of C++'s double colon.
docstring = docstring.replace(r'::', '.')
# Need to escape quotation marks. The reason is that the
# %javamethodmodifiers directives created for use with SWIG will
# themselves be double-quoted strings, and leaving embedded quotes
# will completely screw that up.
docstring = docstring.replace('"', "'")
docstring = docstring.replace(r"'", r"\'")
return docstring
def rewriteDocstringForCSharp (docstring):
"""rewriteDocstringForCSharp (docstring) -> docstring
Performs some mimimal C#-specific sanitizations on the
C++/Doxygen docstring.
"""
# Remove some things we use as hacks in Doxygen 1.7-1.8.
docstring = docstring.replace(r'@~', '')
p = re.compile('@par(\s)', re.MULTILINE)
docstring = p.sub(r'\1', docstring)
# Rewrite some common things.
docstring = rewriteCommonReferences(docstring)
# Rewrite some of the data type references to equivalent C# types. (Note:
# this rewriting affects only the documentation comments inside classes &
# methods, not the actual method signatures.)
docstring = docstring.replace(r'const char *', 'string ')
docstring = docstring.replace(r'const char* ', 'string ')
docstring = docstring.replace(r'an unsigned int', 'a long integer')
docstring = docstring.replace(r'unsigned int', 'long')
docstring = docstring.replace(r'const std::string&', 'string')
docstring = docstring.replace(r'const std::string &', 'string ')
docstring = docstring.replace(r'const std::string', 'string')
docstring = docstring.replace(r'std::string', 'string')
docstring = docstring.replace(r'const ', '')
docstring = docstring.replace(r'NULL', 'null')
docstring = docstring.replace(r'boolean', 'bool')
# Use C# syntax instead of "const XMLNode*" etc.
p = re.compile(r'const (%?)(' + '|'.join(libsbml_classes) + r')( ?)(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRefAddingSpace, docstring)
p = re.compile(r'(%?)(' + '|'.join(libsbml_classes) + r')( ?)(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRefAddingSpace, docstring)
# Do replacements on some documentation text we sometimes use.
p = re.compile(r'libsbmlConstants([@.])')
docstring = p.sub(r'libsbmlcs.libsbml\1', docstring)
# Fix @link for constants that we forgot conditionalize in the source.
p = re.compile(r'@link +([A-Z_0-9]+?)@endlink', re.DOTALL)
docstring = p.sub(r'@link libsbml.\1@endlink', docstring)
# Can't use math symbols. Kluge around it.
docstring = re.sub(r'\\f\$\\geq\\f\$', '>=', docstring)
docstring = re.sub(r'\\f\$\\leq\\f\$', '<=', docstring)
docstring = re.sub(r'\\f\$\\times\\f\$', '*', docstring)
# Some additional special cases.
docstring = docstring.replace(r'SBML_formulaToString()', 'libsbmlcs.libsbml.formulaToString()')
docstring = docstring.replace(r'SBML_parseFormula()', 'libsbmlcs.libsbml.parseFormula()')
# Need to escape the quotation marks:
docstring = docstring.replace('"', "'")
docstring = docstring.replace(r"'", r"\'")
return docstring
def rewriteDocstringForPython (docstring):
"""rewriteDocstringForPython (docstring) -> docstring
Performs some mimimal Python specific sanitizations on the
C++/Doxygen docstring.
Note: this is not the only processing performed for the Python
documentation. In docs/src, the doxygen-based code has an additional
filter that processes the output of *this* filter.
"""
# Rewrite some common things.
docstring = rewriteCommonReferences(docstring)
# Take out the C++ comment start and end.
docstring = docstring.replace('/**', '').replace('*/', '')
p = re.compile(r'^\s*\*[ \t]*', re.MULTILINE)
docstring = p.sub(r'', docstring)
# Rewrite some of the data type references to equivalent Python types.
# (Note: this rewriting affects only the documentation comments inside
# classes & methods, not the method signatures.)
docstring = docstring.replace(r'const char *', 'string ')
docstring = docstring.replace(r'const char* ', 'string ')
docstring = docstring.replace(r'an unsigned int', 'a long integer')
docstring = docstring.replace(r'unsigned int', 'long')
docstring = docstring.replace(r'const std::string&', 'string')
docstring = docstring.replace(r'const std::string', 'string')
docstring = docstring.replace(r'std::string', 'string')
docstring = docstring.replace(r'NULL', 'None')
docstring = docstring.replace(r'@c true', '@c True')
docstring = docstring.replace(r'@c false', '@c False')
# Also use Python syntax instead of "const XMLNode*" etc.
p = re.compile(r'const (%?)(' + '|'.join(libsbml_classes) + r') ?(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRef, docstring)
p = re.compile(r'(%?)(' + '|'.join(libsbml_classes) + r') ?(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRef, docstring)
# Need to escape the quotation marks:
docstring = docstring.replace('"', "'")
docstring = docstring.replace(r"'", r"\'")
# Python method cross-references won't be made by doxygen unless
# the method reference is written without arguments.
p = re.compile('(\s+)(\S+?)::(\w+\s*)(\([^)]*?\))', re.MULTILINE)
docstring = p.sub(translatePythonCrossRef, docstring)
p = re.compile('(@see\s+)(\w+\s*)(\([^)]*?\))')
docstring = p.sub(translatePythonSeeRef, docstring)
return docstring
def rewriteDocstringForPerl (docstring):
"""rewriteDocstringForPerl (docstring) -> docstring
Performs some mimimal Perl specific sanitizations on the
C++/Doxygen docstring.
"""
docstring = rewriteCommonReferences(docstring)
# Get rid of the /** ... */ and leading *'s.
docstring = docstring.replace('/**', '').replace('*/', '').replace('*', ' ')
# Get rid of indentation
p = re.compile('^\s+(\S*\s*)', re.MULTILINE)
docstring = p.sub(r'\1', docstring)
# Get rid of paragraph indentation not caught by the code above.
p = re.compile('^[ \t]+(\S)', re.MULTILINE)
docstring = p.sub(r'\1', docstring)
# Get rid of blank lines.
p = re.compile('^[ \t]+$', re.MULTILINE)
docstring = p.sub(r'', docstring)
# Get rid of the %foo quoting.
docstring = re.sub('(\s)%(\w)', r'\1\2', docstring)
# The following are done in pairs because I couldn't come up with a
# better way to catch the case where @c and @em end up alone at the end
# of a line and the thing to be formatted starts on the next one after
# the comment '*' character on the beginning of the line.
docstring = re.sub('@c *([^ ,.:;()/*\n\t<]+)', r'C<\1>', docstring)
docstring = re.sub('@c(\n[ \t]*\*[ \t]*)([^ ,.:;()/*\n\t<]+)', r'\1C<\2>', docstring)
docstring = re.sub('@p +([^ ,.:;()/*\n\t<]+)', r'C<\1>', docstring)
docstring = re.sub('@p(\n[ \t]*\*[ \t]+)([^ ,.:;()/*\n\t<]+)', r'\1C<\2>', docstring)
docstring = re.sub('@em *([^ ,.:;()/*\n\t<]+)', r'I<\1>', docstring)
docstring = re.sub('@em(\n[ \t]*\*[ \t]*)([^ ,.:;()/*\n\t<]+)', r'\1I<\2>', docstring)
docstring = docstring.replace('<ul>', r'\n=over\n')
docstring = docstring.replace('<li> ', r'\n=item\n\n')
docstring = docstring.replace('</ul>', r'\n=back\n')
docstring = docstring.replace(r'@returns?', 'Returns')
docstring = docstring.replace(' < ', ' E<lt> ').replace(' > ', ' E<gt> ')
docstring = re.sub('<code>([^<]*)</code>', r'C<\1>', docstring)
docstring = re.sub('<b>([^<]*)</b>', r'B<\1>', docstring)
return docstring
def processClassMethods(ostream, c):
# In the Python docs, we have to combine the docstring for methods with
# different signatures and write out a single method docstring. In the
# other languages, we write out separate docstrings for every method
# having a different signature.
if language == 'python':
written = {}
for m in c.methods:
if m.name + m.args in written:
continue
if m.name.startswith('~'):
continue
if c.methodVariants[m.name].__len__() > 1:
# This method has more than one variant. It's possible some or all
# of them are marked @internal. Therefore, before we start writing
# a statement that there are multiple variants, we must check that
# we're left with more than one non-internal method to document.
count = 0
for argVariant in list(c.methodVariants[m.name].values()):
if re.search('@internal', argVariant.docstring) == None:
count += 1
if count <= 1:
continue
newdoc = ' This method has multiple variants that differ in the' + \
' arguments\n they accept. Each is described separately' + \
' below.\n'
for argVariant in list(c.methodVariants[m.name].values()):
# Each entry in the methodVariants dictionary is itself a dictionary.
# The dictionary entries are keyed by method arguments (as strings).
# The dictionary values are the 'func' objects we use.
if re.search('@internal', argVariant.docstring) == None:
newdoc += "\n@par\n<hr>\n<span class='variant-sig-heading'>Method variant with the following"\
+ " signature</span>:\n <pre class='signature'>" \
+ argVariant.name \
+ rewriteDocstringForPython(argVariant.args) \
+ "</pre>\n\n"
newdoc += rewriteDocstringForPython(argVariant.docstring)
written[argVariant.name + argVariant.args] = 1
else:
newdoc = rewriteDocstringForPython(m.docstring)
ostream.write(formatMethodDocString(m.name, c.name, newdoc, m.isInternal, m.args))
written[m.name + m.args] = 1
else: # Not python
for m in c.methods:
if m.name.startswith('~'):
continue
if language == 'java':
newdoc = rewriteDocstringForJava(m.docstring)
elif language == 'csharp':
newdoc = rewriteDocstringForCSharp(m.docstring)
elif language == 'perl':
newdoc = rewriteDocstringForPerl(m.docstring)
# print c.name + ": " + m.name + " " + str(m.isInternal)
ostream.write(formatMethodDocString(m.name, c.name, newdoc, m.isInternal, m.args))
ostream.flush()
def formatMethodDocString (methodname, classname, docstring, isInternal, args=None):
if language == 'java':
pre = '%javamethodmodifiers'
post = ' public'
elif language == 'csharp':
pre = '%csmethodmodifiers'
# See the comment for the definition of 'overriders' for more info.
if classname in overriders and methodname in overriders[classname]:
post = ' public new'
else:
post = ' public'
if isInternal:
post = ' /* libsbml-internal */' + post
elif language == 'perl':
pre = '=item'
post = ''
elif language == 'python':
pre = '%feature("docstring")'
if isInternal:
post = '\n\n@internal'
else:
post = ''
output = pre + ' '
if classname:
output += classname + '::'
if language == 'perl':
output += '%s\n\n%s%s\n\n\n' % (methodname, docstring.strip(), post)
elif language == 'python':
output += '%s "\n%s%s\n";\n\n\n' % (methodname, docstring.strip(), post)
else:
output += '%s%s "\n%s%s\n";\n\n\n' % (methodname, args, docstring.strip(), post)
return output
def generateFunctionDocString (methodname, docstring, args, isInternal):
if language == 'java':
doc = rewriteDocstringForJava(docstring)
elif language == 'csharp':
doc = rewriteDocstringForCSharp(docstring)
elif language == 'python':
doc = rewriteDocstringForPython(docstring)
elif language == 'perl':
doc = rewriteDocstringForPerl(docstring)
return formatMethodDocString(methodname, None, doc, isInternal, args)
def generateClassDocString (docstring, classname, isInternal):
pretext = ''
separator = ''
posttext = ''
if language == 'java':
pretext = '%typemap(javaimports) '
separator = ' "\n'
posttext = '\n"\n\n\n'
docstring = rewriteDocstringForJava(docstring).strip()
elif language == 'python':
pretext = '%feature("docstring") '
separator = ' "\n'
posttext = '\n";\n\n\n'
docstring = rewriteDocstringForPython(docstring).strip()
elif language == 'csharp':
pretext = '%typemap(csimports) '
separator = ' "\n using System;\n using System.Runtime.InteropServices;\n\n'
posttext = '\n"\n\n\n'
docstring = rewriteDocstringForCSharp(docstring).strip()
elif language == 'perl':
pretext = '=back\n\n=head2 '
separator = '\n\n'
posttext = '\n\n=over\n\n\n'
docstring = rewriteDocstringForPerl(docstring).strip()
# If this is one of our fake classes used for creating commonly-reused
# documentation strings, we don't write it to the output file; we only
# store the documentation string in a global variable to be used later.
if classname.startswith('doc_'):
allclassdocs[classname] = docstring
return ''
else:
return pretext + classname + separator + docstring + posttext
def processClasses (ostream, classes):
for c in classes:
processClassMethods(ostream, c)
def processFunctions (ostream, functions):
for f in functions:
ostream.write(generateFunctionDocString(f.name, f.docstring, f.args, f.isInternal))
def processClassDocs (ostream, classDocs):
for c in classDocs:
ostream.write(generateClassDocString(c.docstring, c.name, c.isInternal))
def processFile (filename, ostream, language, preprocessor_defines):
"""processFile (filename, ostream, language, preprocessor_defines)
Reads the the given header file and writes to ostream the necessary SWIG
incantation to annotate each method (or function) with a docstring
appropriate for the given language.
"""
istream = open(filename)
header = CHeader(istream, language, preprocessor_defines)
istream.close()
processClassDocs(ostream, header.classDocs)
processClasses(ostream, header.classes)
processFunctions(ostream, header.functions)
ostream.flush()
def postProcessOutputForPython(contents):
"""Do post-processing on the final output for Python."""
# Friggin' doxygen escapes HTML character codes it doesn't understand, so
# the hack we have to do for Javadoc turns out doesn't work for the Python
# documentation. Kluge around it.
contents = re.sub(r'\\f\$\\geq\\f\$', '>=', contents)
contents = re.sub(r'\\f\$\\leq\\f\$', '<=', contents)
contents = re.sub(r'\\f\$\\times\\f\$', '*', contents)
contents = re.sub(r'"', '\\\"', contents)
# Doxygen doesn't understand <nobr>.
contents = re.sub(r'</?nobr>', '', contents)
return contents
def postProcessOutput(istream, ostream):
"""postProcessOutput(instream, outstream)
Post-processes the output to perform final substitutions."""
contents = istream.read()
p = re.compile('@copydetails\s+(\w+)')
contents = p.sub(translateCopydetails, contents)
# Do additional post-processing on a language-specific basis.
if language == 'python':
contents = postProcessOutputForPython(contents)
elif language == 'java':
# Javadoc doesn't have an @htmlinclude command, so we process the file
# inclusion directly here.
p = re.compile('@htmlinclude\s+(\*\s+)*([-\w."\']+)', re.DOTALL)
contents = p.sub(translateInclude, contents)
ostream.write(contents)
#
# Top-level main function and command-line argument parser.
#
__desc_end = '''This file is part of libSBML. Please visit http://sbml.org for
more information about SBML, and the latest version of libSBML.'''
def parse_cmdline(direct_args = None):
parser = argparse.ArgumentParser(epilog=__desc_end)
parser.add_argument("-d", "--define", action='append',
help="define #ifdef symbol when scanning files for includes")
parser.add_argument("-l", "--language", required=True,
help="language for which to generate SWIG docstrings")
parser.add_argument("-m", "--master", required=True,
help="top-level SWIG interface .i file to read")
parser.add_argument("-o", "--output", required=True,
help="output file where SWIG docstrings will be written")
parser.add_argument("-t", "--top", required=True,
help="path to top of libSBML source directory")
return parser.parse_args(direct_args)
def expanded_path(path):
if path: return os.path.expanduser(os.path.expandvars(path))
else: return ''
def get_language(direct_args = None):
return direct_args.language
def get_master_file(direct_args = None):
return os.path.abspath(expanded_path(direct_args.master))
def get_output_file(direct_args = None):
return os.path.abspath(expanded_path(direct_args.output))
def get_top_dir(direct_args = None):
return os.path.abspath(expanded_path(direct_args.top))
def get_defines(direct_args = None):
if direct_args.define: return direct_args.define
else: return []
def main (args):
global doc_include_path
global header_files
global language
global libsbml_classes
global preprocessor_defines
args = parse_cmdline()
language = get_language(args)
main_swig_file = get_master_file(args)
output_swig_file = get_output_file(args)
h_include_path = os.path.join(get_top_dir(args), 'src')
doc_include_path = os.path.join(get_top_dir(args), 'docs', 'src')
preprocessor_defines += get_defines(args)
# We first write all our output to a temporary file. Later, we open this
# file, post-process it, and write the final output to the real destination.
tmpfilename = output_swig_file + ".tmp"
stream = open(tmpfilename, 'w')
# Find all class names, by searching header files for @class declarations
# and SWIG .i files for %template declarations. We need this list to
# recognize when class names are mentioned inside documentation text.
swig_files = get_swig_files(main_swig_file)
header_files = get_header_files(swig_files, h_include_path)
libsbml_classes = libsbmlutils.find_classes(header_files)
libsbml_classes += libsbmlutils.find_classes(swig_files)
try:
libsbml_classes = sorted(list(set(libsbml_classes)))
except (Exception,):
e = sys.exc_info()[1]
pass
# Now, do the main processing pass, writing the output as we go along.
if language == 'perl':
if (os.path.exists(os.path.abspath('LibSBML.txt'))):
infile = open(os.path.abspath('LibSBML.txt'), 'r')
else:
infile = open(h_include_path + '/bindings/perl/LibSBML.txt', 'r')
stream.write(infile.read())
stream.write('=head1 FUNCTION INDEX\n\n=over 8\n\n')
for file in header_files:
processFile(file, stream, language, preprocessor_defines)
if os.path.exists('local-doc-extras.i'):
stream.write('\n%include "local-doc-extras.i"\n')
if language == 'perl':
stream.write('=cut\n')
stream.close()
# Certain things can't be done until we have seen all the input. So, now
# we reopen the file we wrote, post-process the contents, and write the
# results to the real destination (which is given as arg[5]).
tmpstream = open(tmpfilename, 'r')
finalstream = open(output_swig_file, 'w')
postProcessOutput(tmpstream, finalstream)
try:
tmpstream.flush()
tmpstream.close()
except (Exception,):
e = sys.exc_info()[1]
#FB: not printing the warning below, as after all the documentation file
# has been correctly created.
pass
# print "\tWarning, error flushing stream \n\t\t'%s'. \n\tThis is not a serious error, but an issue with the python interpreter known to occur in python 2.7." % e
finalstream.flush()
finalstream.close()
os.remove(tmpfilename)
if __name__ == '__main__':
main(sys.argv)
## The following is for Emacs users. Please leave in place.
## Local Variables:
## python-indent-offset: 2
## End:
| bsd-3-clause | -7,094,538,123,545,906,000 | 35.247038 | 266 | 0.63509 | false |
mandrav/bCNC | lib/tkExtra.py | 2 | 142446 | #!/bin/env python
# $Id: tkExtra.py 3493 2015-04-08 08:52:01Z bnv $
#
# Copyright and User License
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright [email protected] for the
# European Organization for Nuclear Research (CERN)
#
# All rights not expressly granted under this license are reserved.
#
# Installation, use, reproduction, display of the
# software ("flair"), in source and binary forms, are
# permitted free of charge on a non-exclusive basis for
# internal scientific, non-commercial and non-weapon-related
# use by non-profit organizations only.
#
# For commercial use of the software, please contact the main
# author [email protected] for further information.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# DISCLAIMER
# ~~~~~~~~~~
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY, OF
# SATISFACTORY QUALITY, AND FITNESS FOR A PARTICULAR PURPOSE
# OR USE ARE DISCLAIMED. THE COPYRIGHT HOLDERS AND THE
# AUTHORS MAKE NO REPRESENTATION THAT THE SOFTWARE AND
# MODIFICATIONS THEREOF, WILL NOT INFRINGE ANY PATENT,
# COPYRIGHT, TRADE SECRET OR OTHER PROPRIETARY RIGHT.
#
# LIMITATION OF LIABILITY
# ~~~~~~~~~~~~~~~~~~~~~~~
# THE COPYRIGHT HOLDERS AND THE AUTHORS SHALL HAVE NO
# LIABILITY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL,
# CONSEQUENTIAL, EXEMPLARY, OR PUNITIVE DAMAGES OF ANY
# CHARACTER INCLUDING, WITHOUT LIMITATION, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES, LOSS OF USE, DATA OR PROFITS,
# OR BUSINESS INTERRUPTION, HOWEVER CAUSED AND ON ANY THEORY
# OF CONTRACT, WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT
# LIABILITY OR OTHERWISE, ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
#
# Author: [email protected]
# Date: 12-Oct-2006
__author__ = "Vasilis Vlachoudis"
__email__ = "[email protected]"
import os
import re
import time
import signal
import string
import Unicode
from log import say
try:
from Tkinter import *
from Tkinter import _setit
from tkColorChooser import askcolor
except ImportError:
from tkinter import *
from tkinter import _setit
from tkinter.colorchooser import askcolor
ARROW_LEFT = u"\u2190"
ARROW_UP = u"\u2191"
ARROW_RIGHT = u"\u2192"
ARROW_DOWN = u"\u2193"
CROSS = u"\u2A2F" # x -cross product
MULT = u"\u00D7" # x -multiplication
DIAMOND_SQUARE = u"\u26CB"
# Key state codes
SHIFT_MASK = 1
CONTROL_MASK = 4
ALT_MASK = 8
# Ansi escape sequences
ANSI_CLEAR = "\033[2J"
ANSI_BOLD = "\033[1m"
ANSI_UNDERLINE = "\033[4m"
ANSI_REVERSE = "\033[7m"
ANSI_NORMAL = "\033[m"
# base64.encodestring(open("save.gif","rb").read())
_SAVEICON = """
R0lGODlhEAAQAOcBAAAAAP//////////////////////////////////////////////////////
/////////0xLS0RDRLO0ubO0ubO0ubO0ubS2ubS2u7a4vLe5vLi5vLm5vERDRExLS////////0JA
QVpZWfH0+vD0+vD0+vD0+vH0+vL1+vP2+/X4/Pf5/Pn7/WVjZEJAQf///////0A+P2JgYfn6/Zy2
z5y2z5y2z5y2z522z523z5630J+40P39/mJgYUA+P////////z08PV5cXfv8/fj5+/n5+/j5+/j5
+/f4+vX3+vT2+fL0+PL2+V5cXT08Pf///////zs6O1tZWu7z9+/z9+/z9+/z9+7z9+7y9u3x9uvw
9env9Oft81tZWjs6O////////zk3OFdVVuLq8YuiuIuiuIuiuIuiuIqht4qht4qgt4mgttzl7VdV
Vjk3OP///////zc1NlRSU9Te6NDd6NDd6NDd6NDc58/c58/b587b5s3a5s/a5lRSUzc1Nv//////
/zUzM1FOT1FOT1FOT1FOT1FOT1FOT1FOT1FOT1FOT1FOT1FOT1FOTzUzM////////zIwMU1KS01K
S01KS01KS01KS01KS01KS01KS01KS01KS01KS01KSzIwMf///////zAtLklFRklFRs3NzdbW1tbV
1dXV1dbW1tbW1tPT0ykmJyYjJElFRjAtLv///////yspKkI/QEI/QKenpz88PTUzM66urq6urq6u
rq6urkJAQSgnJ0ZDRCspKv///////yYkJTs4OTs4OZmZmTQxMiglJp+fn5+fn5+fn5+fn0E+PyYk
JVpXWCYkJf///////yQiIi0qKjUxMouKiysoKSEfIJCQkJCQkJCQkJCQkDYzNCEfIFFNTiMgIf//
/////yUfHx8dHR4bHHV1dXh4eHh4eHh4eHh4eHh4eHh4eBsZGhsZGR8cHSEfH///////////////
/////////////////////////////////////////////////////yH5BAEKAP8ALAAAAAAQABAA
AAj4AP8JHEiwoMAIEiZQqGDhAoYMGjZw6OBBYAgRI0iUMHECRQoVK1i0cCEwhowZNGrYuIEjh44d
PHr4EBhEyBAiRYwcQZJEyRImTZwIjCJlCpUqVq5gyaJlC5cuXgSGETOGTBkzZ9CkUbOGTRs3AuPI
mUOnjp07ePLo2cOnjx+BgQQNIlTI0CFEiRQtYtTIkcBIkiZRqmTpEqZMmjZx6uRJYChRo0iVMnUK
VSpVq1i1ciUwlqxZtGrZuoUrl65dvHr5EhhM2DBixYwdQ5ZM2TJmzZwJjCZtGrVq1q5hy6ZtG7du
3gSGEzeOXDlz59ClU7eOXTt3BrMTDAgAOw==
"""
_ALLICON = """
R0lGODlhEAAQAKUiAMwjzMcmx6wt2MEqv7ksxLosxL8su6wv1sMru4c4674swLwtvb8sv70tvbou
vrouwrovucQsurovurkwu7gxuLsxu7szu01N/9gtrNgtrbo1uro2uus4h61XraxZrP9NTYech03/
Tf//////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////yH5BAEKAD8ALAAAAAAQABAAAAaMwJ9Q
OEBgMojBcEmMcD4fTkTJ/AEQT2gUAagCMlptpsv8hqFjLzbM4VZ/AydUSl0CAAGI4WiABO5DDBYe
HRUSAQESDgIHDwo/DBsgISEgGw0LBAkXFwkEChaTlJUUE5ucnQ8do6MaBaioB6usIa6wnAehrCCl
p5wJD5Gilpiav5+Qgx0WDEIKD4yOP0EAOw==
"""
_NONEICON = """
R0lGODlhEAAQAKUhAAAAAA8EBAYLEBgHBwQPBAkLHgoKKwcOFQoLKwUTBQsOJyYKCgoSGysKCg4R
LwcYBy8MDBERRhISSQgeCEYRERgYYkkSEgomCgorCgwvDCAggGIYGIAgIBFGERJJEhhiGCCAIP//
////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////yH5BAEKAD8ALAAAAAAQABAAAAaGwJ9Q
OIBQLJDBcElsbDicTUPJFEKe0CikKrRksxbuz/vlhLnX72bLHTihUmoVEFgcFwEA85HpeC4EAAAC
ChESDgw/DxgfICAfGAkHCBUaGhUIDBmNjo8TBZWWlw4enZ0YBqKiEqWmIKiqlhKbph+foZYVDouc
kJKUuZmKfR4ZD0IMDoaIP0EAOw==
"""
_INVICON = """
R0lGODlhEAAQAOeLAFBQUFBQWFFRUVJSUk1UVE9UVFNUVFRUVFRVVVVVVdI1NVdXV1RYWNM2NlhY
XtM4OFlZXdM6OtM7O1lcXF5bW9Q8PNQ9PdQ+PtQ/P1phYdVAQGBgYNVBQWFhZNVCQmJiYtVDQ9VE
RG5gYGNjY9dERNVFRWZkZNZGRtpFRWVlZdZHRtZHR9hHR9ZISNZJSWdnZ9ZKStZKT9xJSdZLS2pq
a9dNTdhOToRlZWxsbNhPT9hPU21ta9hQUNlQUJthYdhRUdlRUdhSUNhSUppjY9lTVNlWVnJyctlX
V3V1WOFWVt5XV291dXR0dOBXV91YWNpZWdpZW6FpaXV1dXJ3d9pbXNpcXnd3d8liYtNgYNVgYNtf
X79nZ9xhYb1qatxiYnx8fHp9fX19fdZoaICAgN9oaN5sWYODg4WFhd9tbd9vb4mIiN9xceBzc7l+
fqeDg+B0dIOPj+F3d46Ojo+Pj5OTk5eXYZmZTJSUlJWUlKOUlJ2dVeWLWZ2dnZWioqGhoaenp+qj
T+2xUfLCRfLHWNnZON/fPODgMPbYSOPjMvffRvnmO///////////////////////////////////
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////yH5BAEKAP8ALAAAAAAQABAA
AAjhAP8JHOjlyRMvAxMKTMMDg4QHDSRUEMJGIZoWJTBEaKDgAYcQK9YMfGNjxgkOFh48sKChBIwa
cQRqASIDBQgNF1iWaFHjBxeBTqLA6dOGRAgPJ1QEKaOjiBgyIkxYOYMnj4sWMGIMOgSISpYhBhak
kDJGjRsWOY4kUiQIChYwCQLoqUODyZQtPYjsCVRFSRczB+wQMrSDwocMPrg0SXLlxpJ/A5AgKuQg
wYEDCAhMYFBAwJx/Lw50gJCgtGnTOATSAXC6dQIAfAbKEeC6NIA7Cv1sGHB6wIg/ChN+MWIkTPCA
ADs=
"""
#===============================================================================
# Sort Assist class for MultiListbox
#===============================================================================
class SortAssist:
def __init__(self, column):
self.column = column
def __call__(self, x):
return x[self.column]
#-------------------------------------------------------------------------------
# Multiple configuration of many widgets given in a list
# lst = list of widgets
#-------------------------------------------------------------------------------
def multiConfig(lst, **opts):
"""Multiple configuration of many widgets"""
for w in lst:
w.config(**opts)
#-------------------------------------------------------------------------------
# Toggle toplevel window height
#-------------------------------------------------------------------------------
def toggleHeight(root, oldHeight):
"""Toggle window height"""
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", root.wm_geometry())
if not m:
root.bell()
return oldHeight
width, height, x, y = map(int, m.groups())
maxheight = root.winfo_screenheight()
if sys.platform == 'win32':
newy = 0
newheight = maxheight - 72
else:
#newy = 24
newy = 0
#newheight = maxheight - 96
newheight = maxheight - 88
if height >= newheight:
try:
newheight = oldHeight
except:
return oldHeight
newgeom = "%dx%d+%d+%d" % (width, newheight, x, newy)
root.wm_geometry(newgeom)
return height
#===============================================================================
def _entryPaste(event):
"""global replacement for the Entry.paste"""
try:
event.widget.delete('sel.first', 'sel.last')
except TclError:
pass # nothing is selected
# in tk.call() use the widget's string representation event.widget._w
# instead of event.widget, which is the widget instance itself
try:
text = event.widget.tk.call('::tk::GetSelection', event.widget._w, 'CLIPBOARD')
except TclError:
return
event.widget.insert('insert', text)
event.widget.tk.call('tk::EntrySeeInsert', event.widget._w)
return "break"
#-------------------------------------------------------------------------------
def _textPaste(event):
"""global replacement for the Text.paste"""
oldSeparator = event.widget.cget("autoseparators")
if oldSeparator:
event.widget.config(autoseparators=0)
event.widget.edit_separator()
try:
event.widget.delete('sel.first', 'sel.last')
except TclError:
pass # nothing is selected
# in tk.call() use the widget's string representation event.widget._w
# instead of event.widget, which is the widget instance itself
try:
text = event.widget.tk.call('::tk::GetSelection', event.widget._w, 'CLIPBOARD')
except TclError:
return
event.widget.insert('insert', text)
if oldSeparator:
event.widget.edit_separator()
event.widget.config(autoseparators=1)
event.widget.see('insert')
return "break"
#-------------------------------------------------------------------------------
def bindClasses(root):
root.bind_class('Entry', '<Control-Key-a>', lambda e: e.widget.selection_range(0,END))
root.bind_class('Entry', '<<Paste>>', _entryPaste)
root.bind_class('Text', '<<Paste>>', _textPaste)
#===============================================================================
# LabelEntry. display a label when entry field is empty
#===============================================================================
class LabelEntry(Entry):
def __init__(self, master, label=None, labelcolor=None, **kw):
Entry.__init__(self, master, **kw)
self.label = label
self._empty = True
self._fg = self["foreground"]
if labelcolor is not None:
self.labelcolor = labelcolor
else:
self.labelcolor = self._fg
self.bind("<FocusIn>", self._focusIn)
self.bind("<FocusOut>", self._focusOut)
self["validate"] = "key"
self["validatecommand"] = (self.register(self.validate), '%P')
self.showLabel()
# ----------------------------------------------------------------------
def showLabel(self):
self.delete(0,END)
self.insert(0, self.label)
self["foreground"] = self.labelcolor
self._empty = True # Restore empty since validation will destroy it
# ----------------------------------------------------------------------
def removeLabel(self):
self.delete(0,END)
self["foreground"] = self._fg
# ----------------------------------------------------------------------
def _focusIn(self, event):
if self._empty:
self.removeLabel()
# ----------------------------------------------------------------------
def _focusOut(self, event):
if self._empty or self.get()=="":
self.showLabel()
# ----------------------------------------------------------------------
def validate(self, value):
self._empty = value == ""
return True
# ----------------------------------------------------------------------
def set(self, value):
self._empty = value==""
if self._empty:
self.showLabel()
else:
self.removeLabel()
self.insert(0, value)
# ----------------------------------------------------------------------
def get(self):
if self._empty:
return ""
else:
return Entry.get(self)
#===============================================================================
# _ValidatingEntry
#===============================================================================
class _ValidatingEntry(Entry):
"""base class for validating entry widgets"""
# ----------------------------------------------------------------------
def __init__(self, master, value="", **kw):
Entry.__init__(self, master, **kw)
self["validate"] = "key"
self["validatecommand"] = (self.register(self.validate), '%P')
# ----------------------------------------------------------------------
def validate(self, value):
# override: return True if valid False if invalid
return True
# ----------------------------------------------------------------------
def set(self, value):
self.delete(0,END)
self.insert(0, value)
# ----------------------------------------------------------------------
def getint(self, default=0):
try:
return int(self.get())
except:
return default
# ----------------------------------------------------------------------
def getfloat(self, default=0.0):
try:
return float(self.get())
except:
return default
#===============================================================================
# Maximum Length Entry
#===============================================================================
class MaxLengthEntry(_ValidatingEntry):
"""MaxLengthEntry limit entry length maximum maxlength characters"""
def __init__(self, master, value="", maxlength=None, **kw):
_ValidatingEntry.__init__(self, master, value, **kw)
self.maxlength = maxlength
# ----------------------------------------------------------------------
def insert(self, idx, value):
m = self.maxlength
self.maxlength = None
_ValidatingEntry.insert(self, idx, value)
self.maxlength = m
# ----------------------------------------------------------------------
def validate(self, value):
if self.maxlength is not None:
return len(value) <= self.maxlength
return True
#===============================================================================
# Integer Validating Entry
#===============================================================================
class IntegerEntry(_ValidatingEntry):
"""IntegerEntry accepting only integers"""
# ----------------------------------------------------------------------
def validate(self, value):
try:
if value: int(value)
return True
except ValueError:
if value=="+" or value=="-": return True
return False
#===============================================================================
# Floating Point Validating Entry
#===============================================================================
class FloatEntry(_ValidatingEntry):
"""accept only floating point numbers"""
# ----------------------------------------------------------------------
def validate(self, value):
try:
if value: float(value)
return True
except ValueError:
if value=="+" or value=="-" or value=="." or \
value=="+." or value=="-.": return True
if len(value)>1:
last = value[-1]
if last=="e" or last=="E": return True
plast = value[-2]
if (plast=="e" or plast=="E") and \
(last=="-" or last=="+"): return True
return False
#===============================================================================
# Vector Validating Entry
#===============================================================================
class VectorEntry(_ValidatingEntry):
"""accept only vectors"""
# ----------------------------------------------------------------------
def validate(self, value):
# remove from value comma, semicolon, and parenthesis () []
for token in re.sub(r"[(),;\[\]]","",value).split():
try:
float(token)
except ValueError:
if token=="+" or token=="-" or token=="." or \
token=="+." or token=="-.": continue
if len(token)>1:
last = token[-1]
if last=="e" or last=="E": continue
plast = token[-2]
if (plast=="e" or plast=="E") and \
(last=="-" or last=="+"): continue
return False
return True
# ----------------------------------------------------------------------
# Get contents as a list
# ----------------------------------------------------------------------
def getlist(self):
return re.sub(r"[(),;\[\]]","",self.get()).split()
# ---------------------------------------------------------------------
# Split vector in to a list of widgets
# ----------------------------------------------------------------------
def split(self, widgets):
value = self.get()
for ch in " ,()[];":
if ch in value:
xyz = self.getlist()
if xyz:
self.set(xyz[0])
for i,w in enumerate(widgets):
if len(xyz)>i+1: w.set(xyz[i+1])
return
#===============================================================================
# Auto Scroll Bar
# Author: Fredrik Lundh <www.pythonware.com>
#===============================================================================
class AutoScrollbar(Scrollbar):
# ----------------------------------------------------------------------
# a scrollbar that hides itself if it's not needed. only
# works if you use the grid geometry manager.
# ----------------------------------------------------------------------
def set(self, lo, hi):
flo = float(lo)
fhi = float(hi)
g = self.get()
if abs(flo-float(g[0]))<=0.001 and abs(fhi-float(g[1]))<=0.001: return
if flo <= 0.001 and fhi >= 0.999:
if self.method==0:
# grid_remove is currently missing from Tkinter!
self.tk.call("grid", "remove", self)
else:
self.tk.call("pack", "forget", self)
elif flo > 0.001 or fhi < 0.999:
if self.method==0:
Scrollbar.grid(self)
else:
Scrollbar.pack(self)
Scrollbar.set(self, lo, hi)
# ----------------------------------------------------------------------
def grid(self, **kw):
self.method = 0
Scrollbar.grid(self, **kw)
# ----------------------------------------------------------------------
def pack(self, **kw):
self.method = 1
Scrollbar.pack(self, **kw)
#raise TclError("cannot use pack with this widget")
# ----------------------------------------------------------------------
def place(self, **kw):
raise TclError("cannot use place with this widget")
#===============================================================================
# ProgressBar Canvas
#===============================================================================
class ProgressBar(Canvas):
def __init__(self, master=None, **kw):
Canvas.__init__(self, master, **kw)
self.config(background="DarkGray")
self.currBox = self.create_rectangle(0, 0, 0, 0,
fill='Orange', width=0)
self.doneBox = self.create_rectangle(0, 0, 0, 0,
fill='DarkGreen', width=0)
self.text = self.create_text(0,0,text="",
fill="White",justify=CENTER)
self.auto = True
self.bind('<Configure>', self.draw)
self.setLimits()
# ----------------------------------------------------------------------
def setLimits(self, low=0.0, high=100.0, step=1.0):
self.low = float(low)
self.high = float(high)
self.length = float(high-low)
self.step = float(step)
self.done = float(low)
self.now = float(low)
self.t0 = time.time()
self.tmsg = ""
# ----------------------------------------------------------------------
def setProgress(self, now, done=None, txt=None):
self.now = now
if self.now < self.low:
self.now = self.low
elif self.now > self.high:
self.now = self.high
if done is None:
self.done = now - self.step
else:
self.done = done
if self.done < self.low:
self.done = self.low
elif self.done > self.high:
self.done = self.high
# calculate remaining time
dt = time.time()-self.t0
p = now-self.low
if p>0:
tot = dt/p*(self.high-self.low)
else:
tot = 0.0
th,s = divmod(tot,3600)
tm,ts = divmod(s,60)
dh,s = divmod(dt,3600)
dm,ds = divmod(s,60)
self.tmsg = "[%d:%02d:%02d | %d:%02d:%02d]"%(dh,dm,ds, th,tm,ts)
self.draw()
if txt is not None:
self.setText(txt)
elif self.auto:
self.autoText()
# ----------------------------------------------------------------------
def setText(self, txt):
self.itemconfig(self.text,text=txt)
# ----------------------------------------------------------------------
def autoText(self):
completed = self.done - self.low
if self.low != 0:
low = "%d - "%(self.low)
else:
low = ""
self.setText("Current: %d [%s%d] Completed: %d%% %s" % \
(self.now, low, self.high, int((100*completed)/self.length),self.tmsg))
# ----------------------------------------------------------------------
def getProgress(self):
return (self.done, self.now)
# ----------------------------------------------------------------------
def draw(self, event=None):
width = self.winfo_width()
height = self.winfo_height()
wn = int(width * (self.now - self.low) / self.length)
wd = int(width * (self.done - self.low) / self.length)
if wn == wd: wd = wn - 1
self.coords(self.currBox, 0, 0, wn, height)
self.coords(self.doneBox, 0, 0, wd, height)
self.coords(self.text, width/2, height/2)
#===============================================================================
# Extended Listbox
#===============================================================================
class ExListbox(Listbox):
"""Listbox that allows keyboard scanning, and a popup menu"""
_KEY_TIME_THRESHOLD = 1000 # ms
_searchTop = None
_searchLabel = None
_search = ""
_searchOrig = ""
_time = 0
def __init__(self, master, **kw):
Listbox.__init__(self, master, **kw)
ExListbox.resetSearch()
self._single = kw.get('selectmode','') in [SINGLE, BROWSE]
# self.bind('<Button-1>', lambda e,s=self:s.focus_set())
self.bind('<Key>', self.handleKey)
self.bind('<Home>', lambda e,s=self:s._scrollTo(0))
self.bind('<Prior>', lambda e,s=self:s._scrollTo(-1, PAGES))
self.bind('<Next>', lambda e,s=self:s._scrollTo( 1, PAGES))
self.bind('<End>', lambda e,s=self:s._scrollTo(END))
self.bind('<FocusOut>', ExListbox._hideSearch)
self.bind('<Unmap>', ExListbox._hideSearch)
self.bind('<<Cut>>', self.copy)
self.bind('<<Copy>>', self.copy)
if not self._single:
self.bind('<Control-Key-a>', self.selectAll)
self.bind('<Control-Shift-A>', self.selectClear)
self.bind('<Button-3>', self.popupMenu)
self.bind('<Control-Key-space>',self.popupMenu)
# User variables to modify
self.additionalChar = "-+._:$%#*"
self.ignoreNonAlpha = True # Ignore non-alpha characters
self.ignoreCase = True # Ignore case of letters
self.showSearch = True
self.usermenu = None # Assign a user-popup menu
# Should be a list with tuples
# in the form:
# (label, underline, commmand)
# or None for separator
# ----------------------------------------------------------------------
def setPopupMenu(self, menu=None):
"""Setup a popup menu list it should be in the form
[ (label, underline, command), ... ]"""
self.usermenu = menu
self.bind('<Button-3>', self.popupMenu)
self.bind('<Control-Key-space>',self.popupMenu)
# ----------------------------------------------------------------------
@staticmethod
def resetSearch():
"""Reset search string"""
ExListbox._search = ""
ExListbox._searchOrig = ""
ExListbox._time = 0
if ExListbox._searchTop is not None:
try:
ExListbox._searchTop.withdraw()
except TclError:
ExListbox._searchTop = None
# ----------------------------------------------------------------------
@staticmethod
def _hideSearch(event=None):
if ExListbox._searchTop is not None:
try: ExListbox._searchTop.withdraw()
except: pass
# ----------------------------------------------------------------------
def _showSearch(self):
if ExListbox._searchTop is None:
ExListbox._searchTop = Toplevel()
ExListbox._searchTop.overrideredirect(1)
ExListbox._searchLabel = Label(ExListbox._searchTop,
anchor=E,
relief=SOLID,
background="Yellow",
takefocus=False,
borderwidth=1)
ExListbox._searchLabel.pack(fill=BOTH)
if ExListbox._searchOrig == "":
ExListbox._hideSearch()
return
ExListbox._searchLabel["text"]=ExListbox._searchOrig
ExListbox._searchTop.update_idletasks()
# Guess position
x = self.winfo_rootx() + self.winfo_width() \
- ExListbox._searchLabel.winfo_width()
y = self.winfo_rooty() + self.winfo_height()-12
ExListbox._searchTop.wm_geometry("+%d+%d" % (x,y))
ExListbox._searchTop.deiconify()
ExListbox._searchTop.lift()
ExListbox._searchTop.update_idletasks()
# ----------------------------------------------------------------------
# Handle key events for quick searching
# ----------------------------------------------------------------------
def handleKey(self, event):
"""handle key events for quick searching"""
if len(event.char)==0:
ExListbox._time = 0
return
if self.ignoreCase:
ch = event.char.upper()
else:
ch = event.char
oldActive = self.index(ACTIVE)
again = False
# Delete search
if event.keysym in ("Delete","Escape","Return","KP_Enter"):
ExListbox.resetSearch()
return
# Search Again Ctrl-G
elif event.char=='\007':
# Space bar selects...
#(event.char==' ' and self.ignoreNonAlpha):
self.activate(oldActive+1)
again = True
# Backspace
elif event.keysym == "BackSpace":
ExListbox._search = ExListbox._search[:-1]
ExListbox._searchOrig = ExListbox._searchOrig[:-1]
# Ignore non-printable characters
elif self.ignoreNonAlpha and \
not (ch.isalnum() or \
self.additionalChar.find(ch)>=0):
return
# Timeout
elif event.time - ExListbox._time > ExListbox._KEY_TIME_THRESHOLD:
# Start a new search
ExListbox._search = ch
ExListbox._searchOrig = event.char
else:
ExListbox._search += ch
ExListbox._searchOrig += event.char
if self.showSearch: self._showSearch()
lsearch = len(ExListbox._search)
ExListbox._time = event.time
start = 0
cur = self.index(ACTIVE)
active = unicode(self.get(ACTIVE))
if self.ignoreCase:
try: active = active.upper()
except: pass
if len(active)>0:
if self.ignoreNonAlpha:
for pos in range(len(active)):
if active[pos].isalnum() or self.additionalChar.find(active[pos])>=0:
break
else:
pos = 0
prefix = active[pos:pos+lsearch]
if ExListbox._search == prefix:
if self._single:
self.selection_clear(0, END)
self.selection_set(cur)
self.activate(cur)
self.see(cur)
self.event_generate("<<ListboxSelect>>")
return 'break'
elif ExListbox._search[:-1] == prefix[:-1]:
start = cur+1
loop = 1
while loop <= 2:
if again:
start = cur+1
again = False
#elif oldActive != self.index(ACTIVE):
else:
start = 0
loop += 1
for i in range(start, self.size()):
item = unicode(self.get(i))
if self.ignoreCase:
try: item = item.upper()
except: pass
if len(item)>0:
if self.ignoreNonAlpha:
for pos in range(len(item)):
if item[pos].isalnum() or self.additionalChar.find(item[pos])>=0:
break
else:
pos = 0
prefix = item[pos:pos+lsearch]
if ExListbox._search == prefix:
if self._single:
self.selection_clear(0, END)
self.selection_set(i)
self.activate(i)
self.see(i)
self.event_generate("<<ListboxSelect>>")
return "break"
loop += 1
if oldActive != self.index(ACTIVE):
self.activate(oldActive)
# ----------------------------------------------------------------------
# Create the popup menu
# ----------------------------------------------------------------------
def popupMenu(self, event):
"""Create popup menu with default actions"""
if self["state"] == DISABLED: return
self.focus_set()
menu=Menu(self, tearoff=0)
if self.usermenu:
for entry in self.usermenu:
if entry is None:
menu.add_separator()
else:
name,und,cmd = entry[:3]
if len(entry)>3:
icon = entry[3]
else:
icon = None
menu.add_command(label=name, underline=und,
image=icon, compound=LEFT,
command=cmd)
if not self._single: menu.add_separator()
if not self._single:
self._ALLICON = PhotoImage(data=_ALLICON)
self._NONEICON = PhotoImage(data=_NONEICON)
self._INVICON = PhotoImage(data=_INVICON)
menu.add_command(label='All', underline=0,
image=self._ALLICON, compound=LEFT,
command=self.selectAll)
menu.add_command(label='Clear', underline=0,
image=self._NONEICON, compound=LEFT,
command=self.selectClear)
menu.add_command(label='Invert', underline=0,
image=self._INVICON, compound=LEFT,
command=self.selectInvert)
menu.tk_popup(event.x_root, event.y_root)
return "break"
# ----------------------------------------------------------------------
# Selection
# ----------------------------------------------------------------------
def selectAll(self, event=None):
"""Select all items"""
self.selection_set(0, END)
self.event_generate("<<ListboxSelect>>")
return "break"
# ----------------------------------------------------------------------
def selectClear(self, event=None):
"""Selection Clear"""
self.selection_clear(0, END)
self.event_generate("<<ListboxSelect>>")
return "break"
# ----------------------------------------------------------------------
def selectInvert(self, event=None):
"""Invert selection"""
for i in range(self.size()):
if self.select_includes(i):
self.selection_clear(i)
else:
self.selection_set(i)
self.event_generate("<<ListboxSelect>>")
return "break"
# ----------------------------------------------------------------------
# return active and selected items
# ----------------------------------------------------------------------
def getSelected(self):
"""return a tuple of active and selected items
for restoring later"""
return (self.index(ACTIVE), map(int, self.curselection()))
# ----------------------------------------------------------------------
# select active and selected items
# ----------------------------------------------------------------------
def selectSaved(self, save, default=None):
"""selected the saved items.
If list has changed then selected the default item"""
self.selection_clear(0,END)
if save is not None:
self.activate(save[0])
for s in save[1]:
self.selection_set(s)
self.see(save[0])
if default is not None:
if save is None or \
(save is not None and save[0] >= self.size()):
if isinstance(default, tuple):
self.selection_set(default[0], default[1])
self.activate(default[0])
else:
self.selection_set(default)
self.activate(default)
self.event_generate("<<ListboxSelect>>")
# ----------------------------------------------------------------------
def _scrollTo(self, pos, unit=None):
if unit:
self.yview_scroll(pos, unit)
else:
if self._single:
self.selection_clear(0, END)
self.selection_set(pos)
self.activate(pos)
self.see(pos)
self.event_generate("<<ListboxSelect>>")
return 'break'
# ----------------------------------------------------------------------
# Change the value of a list item
# and return the value of the old one
# ----------------------------------------------------------------------
def set(self, index, value):
"""Set/Change the value of a list item"""
try:
sel = self.selection_includes(index)
act = self.index(ACTIVE)
self.delete(index)
except TclError:
return
self.insert(index, value)
if sel: self.selection_set(index)
self.activate(act)
self.event_generate("<<ListboxSelect>>")
# ----------------------------------------------------------------------
# Swap two items in the list
# ----------------------------------------------------------------------
def swap(self, a, b):
"""Swap two items in the list"""
if a>b: a, b = b, a
at = self.get(a);
bt = self.get(b);
self.delete(b);
self.delete(a);
self.insert(a, bt);
self.insert(b, at);
# ----------------------------------------------------------------------
# Move up select items by one
# ----------------------------------------------------------------------
def moveUp(self):
"""Move selected items up"""
for i in map(int,self.curselection()):
if i==0: continue
prev = i-1
if not self.selection_includes(prev):
act = self.index(ACTIVE)
self.swap(prev,i)
self.selection_set(prev)
if act == i: self.activate(prev)
self.event_generate("<<ListboxSelect>>")
# ----------------------------------------------------------------------
# Move down select items by one
# ----------------------------------------------------------------------
def moveDown(self):
"""Move selected items down"""
sz = self.size()-1
lst = map(int,self.curselection())
lst.reverse()
for i in lst:
if i >= sz: continue
next = i+1
if not self.selection_includes(next):
act = self.index(ACTIVE)
self.swap(i,next)
self.selection_set(next)
if act == i: self.activate(next)
self.event_generate("<<ListboxSelect>>")
# ----------------------------------------------------------------------
def deleteByName(self, item):
"""delete entry by name"""
act = self.index(ACTIVE)
for i in range(self.size()-1,-1,-1):
it = self.get(i)
if it == item:
self.delete(i)
self.activate(act)
# ----------------------------------------------------------------------
# Fill the listbox
# ----------------------------------------------------------------------
def fill(self, items=None):
self.delete(0,END)
for item in items: self.insert(END, item)
# ----------------------------------------------------------------------
# Copy current elements to clipboard
# ----------------------------------------------------------------------
def copy(self, event=None):
sel = self.curselection()
if not sel: return
items = []
for i in sel:
items.append(self.get(i))
self.clipboard_clear()
self.clipboard_append("\n".join(items))
#===============================================================================
# Search Listbox
# A listbox that the list is narrowing down to the matching items
#===============================================================================
class SearchListbox(ExListbox):
def __init__(self, master, **kw):
ExListbox.__init__(self, master, **kw)
self.prefixSearch = False
self._items = []
self._pos = []
# ----------------------------------------------------------------------
# Fill the listbox
# ----------------------------------------------------------------------
# def fill(self, items=None):
# del self._items[:]
# if items is None:
# for item in Listbox.get(self,0,END):
# self._items.append(unicode(item))
# else:
# self.delete(0,END)
# for item in items:
# item = unicode(item)
# self._items.append(item)
# self.insert(END, item)
# self._pos = range(len(self._items))
# ----------------------------------------------------------------------
def reset(self):
if self._items and ExListbox._search:
ExListbox.resetSearch()
Listbox.delete(self, 0, END)
for item in self._items:
Listbox.insert(self, END, item)
del self._items[:]
del self._pos[:]
# ----------------------------------------------------------------------
def handleKey(self, event):
"""handle key events for quick searching"""
if len(event.char)==0:
ExListbox._time = 0
return
backspace = False
# Delete search
if event.keysym in ("Delete", "Escape"):
ExListbox.resetSearch()
backspace = True
# Backspace
elif event.keysym == "BackSpace":
ExListbox._search = ExListbox._search[:-1]
ExListbox._searchOrig = ExListbox._searchOrig[:-1]
backspace = True
# Ignore non-printable characters
elif self.ignoreNonAlpha and \
not (event.char.isalnum() or \
self.additionalChar.find(event.char)>=0):
return
# Normal character
else:
if self.ignoreCase:
ExListbox._search += event.char.upper()
else:
ExListbox._search += event.char
ExListbox._searchOrig += event.char
if self.showSearch: self._showSearch()
# Remember time and active
ExListbox._time = event.time
active = Listbox.get(self,ACTIVE)
activepos = 0
search = ExListbox._search
prefix = self.prefixSearch
if search and search[0]=="*":
search = search[1:]
prefix = not prefix
# Fill up the list of items
if not self._items:
for item in Listbox.get(self,0,END):
self._items.append(unicode(item))
self._pos = range(len(self._items))
# if Search string is empty, fill the entire list
if not search:
Listbox.delete(self, 0, END)
for i,item in enumerate(self._items):
if active == item: activepos = i
Listbox.insert(self, END, item)
self._pos = range(len(self._items))
# Backspace removes one character then we need to expand the list
elif backspace:
# FIXME I could find the correct position and insert it
# instead of delete all and repopulate
Listbox.delete(self, 0, END)
del self._pos[:]
for i,item in enumerate(self._items):
if prefix:
if self.ignoreCase:
if item.upper().startswith(search):
if active == item: activepos = i
Listbox.insert(self, END, item)
self._pos.append(i)
else:
if item.startswith(search):
if active == item: activepos = i
Listbox.insert(self, END, item)
self._pos.append(i)
else:
if self.ignoreCase:
if item.upper().find(search)>=0:
if active == item: activepos = i
Listbox.insert(self, END, item)
self._pos.append(i)
else:
if item.find(search)>=0:
if active == item: activepos = i
Listbox.insert(self, END, item)
self._pos.append(i)
else:
# FIXME I could use the fnmatch or re to allow * and ? as pattern
# If a new character added then shrink the existing list
# Scan in reverse order
for i in range(Listbox.size(self)-1, -1, -1):
item = Listbox.get(self, i)
if active == item: activepos = i
if self.ignoreCase: item = item.upper()
if prefix:
if not item.startswith(search):
Listbox.delete(self, i)
del self._pos[i]
else:
if item.find(search)<0:
Listbox.delete(self, i)
del self._pos[i]
Listbox.selection_clear(self, 0, END)
Listbox.selection_set(self, activepos)
Listbox.activate(self, activepos)
# ----------------------------------------------------------------------
def insert(self, index, *elements):
del self._items[:]
return Listbox.insert(self, index, *elements)
# ----------------------------------------------------------------------
def delete(self, first, last=None):
del self._items[:]
return Listbox.delete(self, first, last)
# ----------------------------------------------------------------------
def curselection(self):
if self._items:
return [self._pos[int(x)] for x in Listbox.curselection(self)]
else:
return Listbox.curselection(self)
# ----------------------------------------------------------------------
# FIXME needs work to handle, ACTIVE, END...
# ----------------------------------------------------------------------
def get(self, first, last=None):
#say("SearchListbox.get",first,type(first),last,type(last))
if not self._items:
return Listbox.get(self, first, last)
elif first == ACTIVE:
return Listbox.get(self, first, last)
elif last is None:
return self._items[first]
elif last == END:
last = len(self._items)
else:
last = int(last)+1
if len(self._items)==0: return ""
return self._items[int(first):last]
#===============================================================================
# MultiListbox based on recipe from
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52266
# Author: Brent Burley
# Date: 2001/03/14
#===============================================================================
class MultiListbox(Frame):
"""Multilistbox class"""
# Add default options if not supplied
defopt = (("borderwidth", 0),
("selectmode", EXTENDED),
("selectborderwidth", 0),
("relief", FLAT),
("exportselection", FALSE),
("takefocus", FALSE))
def __init__(self, master, lists, **options):
Frame.__init__(self, master)
self.paneframe = PanedWindow(self, orient=HORIZONTAL,
showhandle=0, handlepad=0, handlesize=0,
sashwidth=2, opaqueresize=1)
self.paneframe.pack(side=LEFT, expand=YES, fill=BOTH)
self.paneframe.bind("<Button-1>", self._sashMark)
self.paneframe.bind("<B1-Motion>", self._sashDrag)
self.paneframe.bind("<ButtonRelease-1>", self._sashRelease)
self.lists = []
self._labels = []
col = 0
if "header" in options:
header = options["header"]
del options["header"]
else:
header = 1
if "stretch" in options:
stretch = options["stretch"]
del options["stretch"]
else:
stretch = "always"
for n,o in MultiListbox.defopt:
if n not in options:
options[n] = o
for l, w, a in lists:
#if header:
frame = Frame(self.paneframe, border=0)
try: self.paneframe.add(frame, minsize=16, stretch=stretch)
except: self.paneframe.add(frame, minsize=16) # tk8.4
if header:
lbl = Label(frame, text=l, borderwidth=1,
relief=RAISED)
lbl.pack(fill=X)
lbl.bind('<Button-1>', lambda e, s=self, c=col:
s.sort(c))
self._labels.append(lbl)
#else:
# frame = self
lb = ExListbox(frame, width=w, **options)
#if header:
# lb.pack(expand=YES, fill=BOTH)
#else:
lb.pack(side=LEFT, expand=YES, fill=BOTH)
self.lists.append(lb)
lb.bind('<B2-Motion>', lambda e, s=self:
s._b2motion(e.x, e.y))
lb.bind('<Button-2>', lambda e, s=self:
s._button2(e.x, e.y))
lb.bind('<Button-4>', lambda e, s=self:
s._scroll(SCROLL, -1, UNITS))
lb.bind('<Button-5>', lambda e, s=self:
s._scroll(SCROLL, 1, UNITS))
lb.bind('<<ListboxSelect>>', lambda e, s=self, l=lb:
s._updateSelect(l))
col += 1
self.lists[0]["takefocus"] = True
if header:
frame = Frame(self)
frame.pack(side=RIGHT, fill=Y)
Label(frame, borderwidth=1, relief=RAISED).pack(fill=X)
self.scrollbar = Scrollbar(frame, orient=VERTICAL,
takefocus=False,
command=self._scroll)
if header:
self.scrollbar.pack(fill=Y, expand=YES)
else:
self.scrollbar.pack(side=RIGHT, fill=Y)
self.lists[0]['yscrollcommand']=self.scrollbar.set
self.activeList = self.lists[0]
self.sortAssist = SortAssist
self._sortOrder = None # Array containing the previous order of the list after sort
self._sortColumn = -1
self._sashIndex = -1
self._sortReverse = False
self._sashx = None
# ----------------------------------------------------------------------
# Bind left/right arrow to focusing different list
# ----------------------------------------------------------------------
def bindLeftRight(self):
""" Default bindings of left/right arrow to focusing different lists"""
self.bindList('<Left>', self.focusLeft)
self.bindList('<Right>', self.focusRight)
# ----------------------------------------------------------------------
def _updateSelect(self, lst=None):
if lst is None: lst = self.activeList
ypos = lst.yview()[0]
sel = lst.curselection()
act = lst.index(ACTIVE)
for l in self.lists:
if l is lst: continue
l.selection_clear(0, END)
for s in sel:
l.selection_set(s)
l.activate(act)
l.yview_moveto(ypos)
self.event_generate('<<ListboxSelect>>')
# ----------------------------------------------------------------------
# Update header labels
# ----------------------------------------------------------------------
def labels(self, names):
for i,n in enumerate(names):
if i>len(self._labels): return
self._labels[i].config(text=n)
# ----------------------------------------------------------------------
def _button2(self, x, y):
for l in self.lists:
l.scan_mark(x, y)
return 'break'
# ----------------------------------------------------------------------
def _b2motion(self, x, y):
for l in self.lists:
l.scan_dragto(x, y)
return 'break'
# ----------------------------------------------------------------------
def _sashMark(self, event):
self._sashIndex = -1
try:
self._sashIndex,which = self.paneframe.identify(event.x, event.y)
if which == "sash":
self._sashx = [self.paneframe.sash_coord(i)[0] \
for i in range(len(self.lists)-1)]
self._sashdx = self._sashx[self._sashIndex] - event.x
self._sashDrag(event)
else:
self._sashIndex = -1
except:
return
return 'break'
# ----------------------------------------------------------------------
def _sashDrag(self, event):
if self._sashx and self._sashIndex >= 0:
ddx = event.x - self._sashdx - self._sashx[self._sashIndex]
self.paneframe.sash_place(self._sashIndex, event.x-self._sashdx, 1)
for i in range(self._sashIndex+1, len(self.lists)-1):
self.paneframe.sash_place(i, self._sashx[i]+ddx, 1)
return 'break'
# ----------------------------------------------------------------------
def _sashRelease(self, event):
if self._sashIndex >= 0:
self._sashx = None
return 'break'
# ----------------------------------------------------------------------
def _scroll(self, *args):
for l in self.lists:
l.yview(*args)
return 'break'
# ----------------------------------------------------------------------
def curselection(self):
return self.lists[0].curselection()
# ----------------------------------------------------------------------
def delete(self, first, last=None):
for l in self.lists:
l.delete(first, last)
# ----------------------------------------------------------------------
def get(self, first, last=None):
result = []
for l in self.lists:
result.append(l.get(first, last))
if last: return zip(*result)
return result
# ----------------------------------------------------------------------
def getList(self, i):
return self.lists[i]
list = getList
# ----------------------------------------------------------------------
def index(self, item):
return self.lists[0].index(item)
# ----------------------------------------------------------------------
def insert(self, index, *elements):
for e in elements:
for i,l in enumerate(self.lists):
l.insert(index, e[i])
if len(e) < len(self.lists):
for l in self.lists[len(e) : len(self.lists)]:
l.insert(index, "")
if self._sortColumn>=0:
txt = self._labels[self._sortColumn]["text"]
self._labels[self._sortColumn].config(text=txt[:-1])
self._sortColumn = -1
# ----------------------------------------------------------------------
# Change the value of a list item
# and return the value of the old one
# ----------------------------------------------------------------------
def set(self, index, value):
"""Set the value of a list item."""
self.delete(index)
self.insert(index, value)
# ----------------------------------------------------------------------
def size(self):
return self.lists[0].size()
# ----------------------------------------------------------------------
def setPopupMenu(self, menu):
"""Setup a popup menu list it should be in the form
[ (label, underline, command), ... ]"""
for l in self.lists:
l.setPopupMenu(menu)
# ----------------------------------------------------------------------
def nearest(self, y):
return self.lists[0].nearest(y)
# ----------------------------------------------------------------------
def see(self, index):
for l in self.lists:
l.see(index)
# ----------------------------------------------------------------------
def configure(self, **kw):
for l in self.lists:
l.configure(**kw)
config = configure
# ----------------------------------------------------------------------
def itemcget(self, index, option):
"""Return the resource value for an ITEM and an OPTION."""
return self.lists[0].itemcget(index, option)
# ----------------------------------------------------------------------
def itemconfigure(self, index, cnf=None, **kw):
"""Configure resources of an ITEM.
The values for resources are specified as keyword arguments.
To get an overview about the allowed keyword arguments
call the method without arguments.
Valid resource names: background, bg, foreground, fg,
selectbackground, selectforeground."""
for l in self.lists:
l.itemconfigure(index, cnf, **kw)
itemconfig = itemconfigure
# ----------------------------------------------------------------------
# Override of the standard Tkinter cget() routine
# ----------------------------------------------------------------------
def __getitem__(self, key):
return self.lists[0].cget(key)
# ----------------------------------------------------------------------
# Override of the standard Tkinter config() routine
# ----------------------------------------------------------------------
def __setitem__(self, key, value):
for l in self.lists:
l[key] = value
# ----------------------------------------------------------------------
# Selection
# ----------------------------------------------------------------------
def selection_anchor(self, index):
for l in self.lists:
l.selection_anchor(index)
# ----------------------------------------------------------------------
def selection_includes(self, index):
return self.lists[0].selection_includes(index)
# ----------------------------------------------------------------------
def selection_clear(self, first, last=None):
for l in self.lists:
l.selection_clear(first, last)
# ----------------------------------------------------------------------
def selection_set(self, first, last=None):
for l in self.lists:
l.selection_set(first, last)
# ----------------------------------------------------------------------
def selectAll(self, event=None):
"""Select all items"""
self.selection_set(0, END)
self.event_generate("<<ListboxSelect>>")
return "break"
# ----------------------------------------------------------------------
def selectClear(self, event=None):
"""Unselect all items"""
self.selection_clear(0, END)
self.event_generate("<<ListboxSelect>>")
return "break"
# ----------------------------------------------------------------------
def selectInvert(self, event=None):
"""Invert selection"""
l = self.lists[0]
for i in range(l.size()):
if l.select_includes(i):
self.selection_clear(i)
else:
self.selection_set(i)
self.event_generate("<<ListboxSelect>>")
return "break"
# ----------------------------------------------------------------------
def bindList(self, event, func):
self.bind(event, func)
for l in self.lists:
l.bind(event, func)
# ----------------------------------------------------------------------
def activate(self, index):
for l in self.lists:
l.activate(index)
# ----------------------------------------------------------------------
def focus_set(self):
self.lists[0].focus_set()
# ----------------------------------------------------------------------
def focusLeft(self, event=None):
listbox = self.focus_get()
if listbox is None: return
active = listbox.index(ACTIVE)
try:
lid = self.lists.index(listbox) - 1
if lid>=0:
self.lists[lid].activate(active)
self.lists[lid].focus_set()
except:
pass
# ----------------------------------------------------------------------
def focusRight(self, event=None):
listbox = self.focus_get()
if listbox is None: return
active = listbox.index(ACTIVE)
try:
lid = self.lists.index(listbox) + 1
if lid < len(self.lists):
self.lists[lid].activate(active)
self.lists[lid].focus_set()
except:
pass
# ----------------------------------------------------------------------
def sort(self, column, reverse=None):
""" Sort by a given column."""
if self.lists[0].cget("state") == DISABLED: return
if self.sortAssist is None: return
if column == self._sortColumn:
txt = self._labels[self._sortColumn]["text"][:-1]
if reverse is None:
reverse = not self._sortReverse
else:
if self._sortColumn>=0:
txt = self._labels[self._sortColumn]["text"][:-1]
self._labels[self._sortColumn].config(text=txt)
self._sortColumn = -1
txt = self._labels[column]["text"]
if reverse is None:
reverse = False
#elements = self.get(0, END)
elements = []
lst = self.lists[0]
for i in range(self.size()):
item = []
for l in self.lists:
item.append(l.get(i))
item.append(lst.selection_includes(i)) # Include selection
item.append(i) # Include position
elements.append(item)
try: active = int(self.index(ACTIVE))
except: active = -1
self.delete(0, END)
elements.sort(key=self.sortAssist(column), reverse=reverse)
# get selection status
status = []
self._sortOrder = []
newactive = -1
for i,item in enumerate(elements):
idx = item.pop()
if active == idx: newactive = i
self._sortOrder.append(idx)
status.append(item.pop())
self.insert(END, *elements)
for i,s in enumerate(status):
if s:
self.selection_set(i)
if newactive<0: newactive = i
if newactive>=0:
self.activate(newactive)
self._sortColumn = column
self._sortReverse = reverse
if reverse:
self._labels[column].config(text=txt+Unicode.BLACK_DOWN_POINTING_TRIANGLE)
else:
self._labels[column].config(text=txt+Unicode.BLACK_UP_POINTING_TRIANGLE)
self.event_generate("<<ListboxSort>>")
# ----------------------------------------------------------------------
def saveSort(self):
self._saveColumn = self._sortColumn
self._saveReverse = self._sortReverse
# ----------------------------------------------------------------------
def restoreSort(self):
if self._saveColumn>=0:
self.sort(self._saveColumn, self._saveReverse)
# ----------------------------------------------------------------------
def yview(self):
return self.lists[0].yview()
# ----------------------------------------------------------------------
def yview_moveto(self, fraction):
for l in self.lists:
l.yview_moveto(fraction)
#===============================================================================
# A MultiListbox that remembers the color of items
#===============================================================================
class ColorMultiListbox(MultiListbox):
# ----------------------------------------------------------------------
def sort(self, column, dir=None):
# remember colors
colors = {}
for i in range(self.size()):
colors[self.lists[0].get(i)] = \
self.lists[0].itemcget(i, "foreground")
MultiListbox.sort(self, column, dir)
# set colors
for i in range(self.size()):
self.setColor(i, colors[self.lists[0].get(i)])
del colors
# ----------------------------------------------------------------------
def setColor(self, idx, color):
for l in self.lists:
l.itemconfigure(idx, foreground=color)
#===============================================================================
# Image list
#===============================================================================
class ImageListbox(Text):
"""ImageListbox widget which can display a list of strings and images"""
def __init__(self, master, **options):
Text.__init__(self, master, **options)
self.config(cursor="arrow",
tabs="20p",
#insertofftime=0,
#insertontime=0,
wrap=NONE,
insertwidth=0,
takefocus=TRUE,
exportselection=0)
# state=DISABLED)
self.bind("<Button-1>", self._button1)
self.bind("<Control-Button-1>", self._controlButton1)
self.bind("<Shift-Button-1>", self._motion1)
self.bind("<B1-Motion>", self._motion1)
self.bind("<Control-B1-Motion>",self._controlMotion1)
self.bind("<Key>", self._key)
self.bind("<Delete>", self._break)
self.bind("<Return>", self._break)
self.bind("<KeyRelease>", self._break)
self.bind("<<Cut>>", self.cut)
self.bind("<<Copy>>", self.copy)
self.bind("<<Paste>>", self.paste)
self._selection = []
self._anchor = 0
self._active = 0
# ----------------------------------------------------------------------
def insert(self, index, icon, text):
"""Insert ELEMENTS at INDEX."""
# self.config(state=NORMAL)
if index != END:
index = int(index)
sindex = "%d.0"%(index+1)
Text.insert(self, sindex, "\t%s\n"%(text))
self.image_create(sindex, image=icon)
self._selection.insert(index,False)
else:
self.image_create(END, image=icon)
Text.insert(self, END, "\t%s\n"%(text))
self._selection.append(False)
# self.config(state=DISABLED)
# ----------------------------------------------------------------------
def delete(self, first, last=None):
"""Delete items from FIRST to LAST (not included)."""
if first == END:
Text.delete(self, "end.0", END)
self._selection.pop()
return "break"
if first == ACTIVE:
first = self.index(ACTIVE)
if last is None:
i = int(first)
if 0 <= i < len(self._selection):
Text.delete(self, "%d.0"%(i+1), "%d.0 + 1 lines"%(i+1))
del self._selection[i]
return "break"
if last == END:
last = self.size()
first = int(first)
lines = int(last) - first
Text.delete(self, "%d.0"%(first+1), "%d.0 + %d lines"%(first+1, lines))
try:
del self._selection[first:last]
except IndexError:
pass
return "break"
# ----------------------------------------------------------------------
def size(self):
"""Return the number of elements in the listbox."""
return len(self._selection)
# ----------------------------------------------------------------------
def curselection(self):
"""Return list of indices of currently selected item."""
sel = []
for i,x in enumerate(self._selection):
if x: sel.append(i)
return sel
# ----------------------------------------------------------------------
def nearest(self, y):
"""Get index of item which is nearest to y coordinate Y."""
index = Text.index(self,"@1,%d"%(y))
i = int(index.split(".")[0])-1
if i>= self.size(): i -= 1
return i
# ----------------------------------------------------------------------
def _button1(self, event):
self.focus()
self.selection_anchor(self.nearest(event.y))
self._active = self._anchor
self._select()
return "break"
# ----------------------------------------------------------------------
def _motion1(self, event):
y = self.nearest(event.y)
if self._active != y:
self._active = y
self._selectRange()
return "break"
# ----------------------------------------------------------------------
def _controlButton1(self, event):
self.selection_anchor(self.nearest(event.y))
self._active = self._anchor
self._selection[self._anchor] = not self._selection[self._anchor]
self._tagSelection()
self.event_generate("<<ListboxSelect>>")
return "break"
# ----------------------------------------------------------------------
def _controlMotion1(self, event):
self._active = self.nearest(event.y)
last = self._selection[self._anchor]
if self._active < self._anchor:
for i in range(self._active, self._anchor):
self._selection[i] = last
elif self._active > self._anchor:
for i in range(self._anchor, self._active+1):
self._selection[i] = last
self._tagSelection()
return "break"
# ----------------------------------------------------------------------
def _key(self, event):
if event.keysym == "Up":
if self._active == 0: return "break"
self._active -= 1
if event.state & SHIFT_MASK:
self._selectRange()
else:
self._anchor = self._active
self._select()
elif event.keysym == "Down":
self._active += 1
if self._active >= self.size():
self._active = self.size()-1
return "break"
if event.state & SHIFT_MASK:
self._selectRange()
else:
self._anchor = self._active
self._select()
elif event.keysym in ("Prior", "Next", "Delete"):
return
if event.state & CONTROL_MASK != 0:
# Let system handle all Control keys
pass
else:
# Ignore all normal keys
return "break"
# ----------------------------------------------------------------------
def _break(self, event):
return "break"
# ----------------------------------------------------------------------
def _select(self):
self._selection = [False] * len(self._selection)
self.selection_set(self._active)
idx = "%d.0"%(self._active+1)
Text.see(self, idx)
Text.index(self, idx)
self.event_generate("<<ListboxSelect>>")
# ----------------------------------------------------------------------
def _selectRange(self):
self._selection = [False] * len(self._selection)
if self._active < self._anchor:
for i in range(self._active, self._anchor):
self._selection[i] = True
elif self._active > self._anchor:
for i in range(self._anchor, self._active+1):
self._selection[i] = True
try:
self._selection[self._anchor] = True
except IndexError:
pass
self._tagSelection()
self.event_generate("<<ListboxSelect>>")
return "break"
# ----------------------------------------------------------------------
def selection_anchor(self, index):
"""Set the fixed end oft the selection to INDEX."""
self._anchor = index
select_anchor = selection_anchor
# ----------------------------------------------------------------------
def selection_clear(self, first, last=None):
"""Clear the selection from FIRST to LAST (not included)."""
self._selection = [False] * len(self._selection)
self._tagSelection()
select_clear = selection_clear
# ----------------------------------------------------------------------
def selection_includes(self, index):
"""Return 1 if INDEX is part of the selection."""
return self._selection[index]
select_includes = selection_includes
# ----------------------------------------------------------------------
def selection_set(self, first, last=None):
"""Set the selection from FIRST to LAST (not included) without
changing the currently selected elements."""
if first == END:
self._selection[-1] = True
self._tagSelection()
return
if last is None:
i = int(first)
if 0 <= i < len(self._selection):
self._selection[int(first)] = True
self._tagSelection()
return
if last == END:
last = self.size()
for i in range(int(first), last):
self._selection[i] = True
self._tagSelection()
select_set = selection_set
# ----------------------------------------------------------------------
def see(self, index):
"""Scroll such that INDEX is visible."""
if index == END:
Text.see(self, index)
else:
Text.see(self, "%d.0"%(int(index)+1))
# ----------------------------------------------------------------------
def _tagSelection(self):
self.tag_delete("lola")
for i,x in enumerate(self._selection):
if x:
self.tag_add("lola", "%d.0"%(i+1), "%d.0 +1 lines"%(i+1))
self.tag_configure("lola", foreground="White", background="SteelBlue2")
Text.selection_clear(self)
# ----------------------------------------------------------------------
#def bbox(self, *args):
def bbox(self, index):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
which encloses the item identified by index in ARGS."""
if index == END:
return Text.dlineinfo(self,index)[:4]
if index == ACTIVE:
index = self.index(index)
return Text.bbox(self,"%d.2"%(int(index)+1))
# ----------------------------------------------------------------------
def dlineinfo(self,index):
if index == END:
return Text.dlineinfo(self,index)
if index == ACTIVE:
index = self.index(index)
return Text.dlineinfo(self,"%d.0"%(int(index)+1))
# ----------------------------------------------------------------------
def activate(self, index):
"""Activate item identified by INDEX."""
if index == END:
self._active = self.size()-1
else:
self._active = int(index)
# ----------------------------------------------------------------------
def get(self, first, last=None):
"""Get list of items from FIRST to LAST (not included)."""
if first == END:
first = self.size()-1
else:
first = int(first)
if last is None:
if 0 <= first < len(self._selection):
first += 1
img = Text.image_cget(self, "%d.0"%(first), "image")
txt = Text.get(self, "%d.2"%(first), "%d.end"%(first))
return img,txt
return None,None
if last == END:
last = self.size()
else:
last = int(last)
# ----------------------------------------------------------------------
def index(self, index):
"""Return index of item identified with INDEX."""
if index == ACTIVE:
return self._active
else:
return Text.index(self,index)
# ----------------------------------------------------------------------
def scan_mark(self, x, y):
"""Remember the current X, Y coordinates."""
pass
# ----------------------------------------------------------------------
def scan_dragto(self, x, y):
"""Adjust the view of the listbox to 10 times the
difference between X and Y and the coordinates given in
scan_mark."""
pass
# ----------------------------------------------------------------------
def itemcget(self, index, option):
"""Return the resource value for an ITEM and an OPTION."""
pass
# ----------------------------------------------------------------------
def itemconfigure(self, index, cnf=None, **kw):
"""Configure resources of an ITEM.
The values for resources are specified as keyword arguments.
To get an overview about the allowed keyword arguments
call the method without arguments.
Valid resource names: background, bg, foreground, fg,
selectbackground, selectforeground."""
pass
itemconfig = itemconfigure
# ----------------------------------------------------------------------
# Override cut,copy,paste to do nothing
# ----------------------------------------------------------------------
def cut(self, event=None):
return "break"
copy = cut
paste = cut
#===============================================================================
# Class to edit in place the contents of a listbox
#===============================================================================
class InPlaceEdit:
def __init__(self, listbox, item=ACTIVE, value=None, x=None, select=True, **kw):
# Return value
self.value = None # Result
self.frame = None
self.lastkey = None # Last key that exited the editbox
self.kw = kw
self._x = x
self._select = select
# Find active
try: self.active = listbox.index(item)
except: return
self.item = item
self.listbox = listbox
# Create and set value
self.frame = Frame(listbox, relief=None)
self.createWidget()
self.old = self.set(value)
self.defaultBinds()
# Bindings
self.frame.bind("<FocusOut>", self.focusOut)
# Unmap creates core dump when Fn key is pressed
self.frame.bind("<ButtonRelease-1>", self.clickOk)
self.frame.bind("<ButtonRelease-3>", self.clickCancel)
self.listbox.bind("<Configure>", self.resize)
#self.frame.bind("<Unmap>", self._destroy)
try:
self._grab_window = self.frame.grab_current()
except TclError:
self._grab_window = None
self.resize()
self.show()
# ----------------------------------------------------------------------
def show(self):
# Show and wait to be destroyed
try:
self.frame.wait_visibility()
self.frame.grab_set()
self.icursor()
self.frame.wait_window()
except TclError:
pass
#self.listbox.focus_set()
# ----------------------------------------------------------------------
# Override method if another widget is requested
# ----------------------------------------------------------------------
def createWidget(self):
self.edit = Entry(self.frame, **self.kw)
self.edit.pack(expand=YES, fill=BOTH)
self.edit.focus_set()
# ----------------------------------------------------------------------
# set insert cursor at location
# ----------------------------------------------------------------------
def icursor(self):
if self._x is not None:
self.edit.icursor("@%d"%(self._x))
# ----------------------------------------------------------------------
# Set default bindings
# ----------------------------------------------------------------------
def defaultBinds(self):
try:
self.edit.bind("<Return>", self.ok)
self.edit.bind("<KP_Enter>", self.ok)
self.edit.bind("<Up>", self.ok)
self.edit.bind("<Down>", self.ok)
self.edit.bind("<Escape>", self.cancel)
except AttributeError:
pass
# ----------------------------------------------------------------------
def resize(self, event=None):
if self.frame is None: return
bbox = self.listbox.bbox(self.item)
if bbox is None: return
x, y, w, h = bbox
w = self.listbox.winfo_width() - x
h += 3
try:
self.frame.place(in_=self.listbox,
x=x-1, y=y-1,
width=w, height=h,
bordermode=OUTSIDE)
self.frame.update_idletasks()
except TclError:
pass
# ----------------------------------------------------------------------
# Override method to set the value
# ----------------------------------------------------------------------
def set(self, value):
if self.frame is None: return
if value is None:
value = self.listbox.get(self.item)
self.edit.delete(0, END)
self.edit.insert(0, value)
if self._select:
self.edit.selection_range(0, END)
return value
# ----------------------------------------------------------------------
# Override method to get the value
# ----------------------------------------------------------------------
def get(self):
if self.frame is None: return None
return self.edit.get()
# ----------------------------------------------------------------------
def reset_grab(self):
if self.frame is None: return
self.frame.grab_release()
if self._grab_window is not None:
try:
self._grab_window.grab_set()
except TclError:
pass
# ----------------------------------------------------------------------
def clickOk(self, event):
# If clicked outside return ok
if event.x < 0 or \
event.y < 0 or \
event.x > self.frame.winfo_width() or \
event.y > self.frame.winfo_height():
self.ok(event)
# ----------------------------------------------------------------------
def clickCancel(self, event):
# If clicked outside return cancel
if event.x < 0 or \
event.y < 0 or \
event.x > self.frame.winfo_width() or \
event.y > self.frame.winfo_height():
self.cancel(event)
# ----------------------------------------------------------------------
def focusOut(self, event=None):
self.ok()
# ----------------------------------------------------------------------
def updateValue(self):
if isinstance(self.listbox, Listbox):
self.listbox.delete(self.active)
self.listbox.insert(self.active, self.value)
# ----------------------------------------------------------------------
def ok(self, event=None):
if event: self.lastkey = event.keysym
self.value = self.get()
self.frame.unbind('<FocusOut>')
act = self.listbox.index(ACTIVE)
sel = self.listbox.selection_includes(self.active)
self.updateValue()
self.listbox.see(self.active)
if sel:
self.listbox.selection_set(self.active)
self.listbox.activate(act)
if self.value == self.old: self.value = None
self.reset_grab()
self.listbox.focus_set()
self.frame.place_forget()
self.frame.destroy()
return "break"
# ----------------------------------------------------------------------
def cancel(self, event=None):
self.reset_grab()
self.listbox.focus_set()
self.frame.place_forget()
self.frame.destroy()
return "break"
#===============================================================================
class InPlaceSpinbox(InPlaceEdit):
# ----------------------------------------------------------------------
def createWidget(self):
self.edit = Spinbox(self.frame, **self.kw)
self.edit.pack(expand=YES, fill=BOTH)
self.edit.focus_set()
# ----------------------------------------------------------------------
def set(self, value):
if self.frame is None: return
if value is None:
value = self.listbox.get(self.item)
self.edit.delete(0, END)
self.edit.insert(0, value)
return value
#===============================================================================
class InPlaceInteger(InPlaceEdit):
# ----------------------------------------------------------------------
def createWidget(self):
self.edit = IntegerEntry(self.frame, **self.kw)
self.edit.pack(expand=YES, fill=BOTH)
self.edit.focus_set()
#===============================================================================
class InPlaceFloat(InPlaceEdit):
# ----------------------------------------------------------------------
def createWidget(self):
self.edit = FloatEntry(self.frame, **self.kw)
self.edit.pack(expand=YES, fill=BOTH)
self.edit.focus_set()
#===============================================================================
class InPlaceList(InPlaceEdit):
def __init__(self, listbox, item=ACTIVE, value=None, height=None, values=[], **kw):
self.values = values
self.height = height
InPlaceEdit.__init__(self, listbox, item, value, **kw)
# ----------------------------------------------------------------------
def createWidget(self):
self.frame.config(relief=RAISED)
sb = Scrollbar(self.frame)
sb.pack(side=RIGHT, fill=Y)
if self.height is None:
if len(self.values)<10:
self.height = max(len(self.values)+1,3)
else:
self.height = 10
self.edit = ExListbox(self.frame,
selectmode=BROWSE,
height=self.height,
#background="White",
yscrollcommand=sb.set)
sb.config(command=self.edit.yview)
self.edit.pack(side=LEFT, fill=BOTH, expand=YES)
self.edit.bind('<ButtonRelease-1>', self.ok)
self.edit.focus_set()
# ----------------------------------------------------------------------
def set(self, value):
if value is None:
value = self.listbox.get(self.item)
# Fill&select listbox
for item in self.values:
self.edit.insert(END, item)
if item == value:
self.edit.activate(END)
self.edit.selection_set(END)
if len(self.edit.curselection()) == 0:
self.edit.activate(0)
self.edit.see(ACTIVE)
return value
# ----------------------------------------------------------------------
def get(self):
cur = self.edit.curselection()
if len(cur)>0:
return self.edit.get(cur[0])
else:
return ""
# ----------------------------------------------------------------------
def defaultBinds(self):
InPlaceEdit.defaultBinds(self)
try:
self.edit.unbind("<Up>")
self.edit.unbind("<Down>")
except AttributeError:
pass
# ----------------------------------------------------------------------
def resize(self, event=None):
if self.frame is None: return
bbox = self.listbox.bbox(self.item)
if bbox is None: return
x, y, item_width, item_height = bbox
list_width = self.listbox.winfo_width()
list_height = self.listbox.winfo_height()
h = item_height*self.height + 2
if y+h > list_height:
y = list_height - h
if y <= 0:
y = 0
h = list_height
try:
self.frame.place(in_=self.listbox,
x=x-1, y=y,
width=list_width, height=h,
bordermode=OUTSIDE)
self.frame.update_idletasks()
except TclError:
pass
#===============================================================================
class InPlaceColor(InPlaceEdit):
# ----------------------------------------------------------------------
def createWidget(self):
b = Button(self.frame, text="x",
padx=0, pady=0, command=self.clearColor)
b.pack(side=LEFT)
self.edit = Button(self.frame, command=self.selectColor)
self.edit.pack(side=RIGHT, expand=YES, fill=BOTH)
self.edit.focus_set()
# ----------------------------------------------------------------------
def set(self, value):
if value is None:
value = self.listbox.get(self.item)
self.value = value
if self.value is None or self.value=="": self.value = "White"
if value != "":
self.edit.config(text=value,
background=value,
activebackground=value)
return value
# ----------------------------------------------------------------------
def get(self):
return self.edit["text"]
# ----------------------------------------------------------------------
def selectColor(self):
self.frame.unbind("<FocusOut>")
try:
rgb, colorStr = askcolor(
title="Color",
initialcolor=self.value,
parent=self.listbox.master)
except TclError:
colorStr = None
if colorStr is not None:
colorStr = str(colorStr)
self.value = colorStr
self.edit.config(text=colorStr,
background=colorStr,
activebackground=colorStr)
self.frame.bind("<FocusOut>", self.cancel)
self.edit.focus_set()
# ----------------------------------------------------------------------
def clearColor(self):
self.frame.unbind("<FocusOut>")
self.value = None
self.edit.config(text="",
background="White",
activebackground="White")
self.frame.bind("<FocusOut>", self.cancel)
self.edit.focus_set()
#===============================================================================
class InPlaceMaxLength(InPlaceEdit):
def __init__(self, listbox, item=ACTIVE, value=None, maxlength=None, **kw):
self.maxlength = maxlength
InPlaceEdit.__init__(self, listbox, item, value, **kw)
# ----------------------------------------------------------------------
# Override method if another widget is requested
# ----------------------------------------------------------------------
def createWidget(self):
self.edit = MaxLengthEntry(self.frame,
maxlength=self.maxlength,
**self.kw)
self.edit.pack(expand=YES, fill=BOTH)
self.edit.focus_set()
#===============================================================================
class InPlaceText(InPlaceEdit):
# ----------------------------------------------------------------------
def show(self):
self.toplevel.bind("<FocusOut>", self.focusOut)
try:
self.toplevel.wait_visibility()
self.toplevel.grab_set()
self.toplevel.wait_window()
except TclError:
pass
# ----------------------------------------------------------------------
def defaultBinds(self):
InPlaceEdit.defaultBinds(self)
self.toplevel.bind("<ButtonRelease-1>", self.clickOk)
self.toplevel.bind("<ButtonRelease-3>", self.clickCancel)
#self.edit.bind("<ButtonRelease-1>", self.clickOk)
#self.edit.bind("<ButtonRelease-3>", self.clickCancel)
self.edit.bind("<Shift-Return>", self.shiftReturn)
self.edit.bind("<Escape>", self.cancel)
# ----------------------------------------------------------------------
def createWidget(self):
self.toplevel = Toplevel(self.listbox)
self.toplevel.transient(self.listbox)
self.toplevel.overrideredirect(1)
self.edit = Text(self.toplevel, width=70, height=10,
background="White", undo=True)
self.edit.pack(side=LEFT, expand=YES, fill=BOTH)
self.edit.focus_set()
# ----------------------------------------------------------------------
def resize(self, event=None):
if self.frame is None: return
bbox = self.listbox.bbox(self.item)
if bbox is None: return
x, y, w, h = bbox
x += self.listbox.winfo_rootx()
y += self.listbox.winfo_rooty()
w = self.listbox.winfo_width()
try:
self.toplevel.wm_geometry("+%d+%d" % (x,y))
except TclError:
pass
# ----------------------------------------------------------------------
def set(self, value):
if self.frame is None: return
if value is None:
value = self.listbox.get(self.item)
self.edit.delete("0.0", END)
self.edit.insert("0.0", value)
self.edit.tag_add(SEL, "0.0", END)
return value
# ----------------------------------------------------------------------
def get(self):
if self.frame is None: return None
return self.edit.get("0.0", END).strip()
# ----------------------------------------------------------------------
def shiftReturn(self, event):
# Empty binding to avoid the Shift-Return to trigger the "ok"
pass
# ----------------------------------------------------------------------
def clickOk(self, event):
# If clicked outside return ok
if event.x < 0 or \
event.y < 0 or \
event.x > self.toplevel.winfo_width() or \
event.y > self.toplevel.winfo_height():
self.ok(event)
# ----------------------------------------------------------------------
def clickCancel(self, event):
# If clicked outside return cancel
if event.x < 0 or \
event.y < 0 or \
event.x > self.toplevel.winfo_width() or \
event.y > self.toplevel.winfo_height():
self.cancel(event)
# ----------------------------------------------------------------------
def ok(self, event=None):
InPlaceEdit.ok(self, event)
self.toplevel.destroy()
return "break"
# ----------------------------------------------------------------------
def cancel(self, event=None):
InPlaceEdit.cancel(self, event)
self.toplevel.destroy()
return "break"
#===============================================================================
class InPlaceFile(InPlaceEdit):
# ----------------------------------------------------------------------
def __init__(self, listbox, item=ACTIVE, value=None,
title=None, filetypes=None,
save=True, **kw):
self.title = title
self.filetypes = filetypes
self._save = save
self._icon = PhotoImage(data=_SAVEICON)
InPlaceEdit.__init__(self, listbox, item, value, **kw)
# ----------------------------------------------------------------------
def createWidget(self):
self.edit = Entry(self.frame, width=5, **self.kw)
self.edit.pack(side=LEFT, expand=YES, fill=BOTH)
b = Button(self.frame, image=self._icon,
padx=0, pady=0, command=self.fileDialog)
b.pack(side=RIGHT)
self.edit.focus_set()
# ----------------------------------------------------------------------
def fileDialog(self):
import bFileDialog
self.frame.unbind("<FocusOut>")
self.frame.grab_release()
if self._save:
fn = bFileDialog.asksaveasfilename(master=self.listbox,
title=self.title,
initialfile=self.value,
filetypes=self.filetypes)
else:
fn = bFileDialog.askopenfilename(master=self.listbox,
title=self.title,
initialfile=self.value,
filetypes=self.filetypes)
self.frame.grab_set()
#self.frame.bind("<FocusOut>", self.cancel)
self._icon = None
if len(fn) > 0:
self.edit.delete(0, END)
self.edit.insert(0, fn)
self.ok()
else:
self.cancel()
#=============================================================================
# PopupList
# Show a popup list on a top level and return selected item
#=============================================================================
class PopupList(Toplevel):
def __init__(self, master, items=None, selected=None, **kw):
Toplevel.__init__(self, master, **kw)
self.selected = selected
self.overrideredirect(1)
self.transient(master)
# Create the listbox inside the dropdown window
sb = Scrollbar(self)
sb.pack(side=RIGHT, fill=Y)
self._listbox = SearchListbox(self,
selectmode=BROWSE,
yscrollcommand=sb.set)
self._listbox.pack(side=LEFT, expand=YES, fill=BOTH)
sb.config(command=self._listbox.yview)
if items:
for item in items:
self._listbox.insert(END, item)
if selected == item:
self._listbox.selection_set(END)
self._listbox.activate(END)
self.see(ACTIVE)
self._listbox.bind('<Escape>', self.close)
self._listbox.bind('<Return>', self._select)
self._listbox.bind('<KP_Enter>', self._select)
self._listbox.bind('<Tab>', self._select)
self._listbox.bind('<Shift-Tab>', self._select)
self._listbox.bind('<ButtonRelease-1>', self._release)
self.bind('<FocusOut>', self.close)
# ----------------------------------------------------------------------
def show(self, x, y):
self.deiconify()
if x is not None and y is not None:
self.geometry('+%d+%d' % (x,y))
self._listbox.focus_set()
#self.wait_visibility()
#self.grab_set()
self.wait_window()
return self.selected
# ----------------------------------------------------------------------
def close(self, event=None):
self.grab_release()
self.destroy()
# ----------------------------------------------------------------------
def _select(self, event=None):
self.selected = self._listbox.get(ACTIVE)
self.close()
# ----------------------------------------------------------------------
def _release(self, event):
act = self._listbox.nearest(event.y)
self._listbox.activate(act)
self._select()
#=============================================================================
# Combobox
#=============================================================================
class Combobox(Frame):
def __init__(self, master, label=True, *args, **kwargs):
Frame.__init__(self, master, class_="Combobox")
Frame.config(self, padx=0, pady=0)
if "command" in kwargs:
self.command = kwargs.get("command")
del kwargs["command"]
else:
self.command = None
# Create entry and button
if label:
self._text = Label(self, relief=GROOVE, anchor=W, *args, **kwargs)
else:
self._text = Entry(self, *args, **kwargs)
self._text.pack(side=LEFT, expand=YES, fill=BOTH)
# Arrow button
self._post = IntVar()
self._post.trace("w", self._showList)
self._arrowBtn = Checkbutton(self,
text=u"\u25BC",
variable=self._post,
indicatoron=False,
padx=2, pady=0)
self._arrowBtn.pack(side=RIGHT, fill=Y)
# Bindings
self._text.bind('<Up>', self.postList)
self._text.bind('<Down>', self.postList)
self._text.bind('<Return>', self.postList)
self._text.bind('<KP_Enter>', self.postList)
self.bind('<Up>', self.postList)
self.bind('<Down>', self.postList)
self.bind('<Return>', self.postList)
self.bind('<KP_Enter>', self.postList)
if label:
self._text.bind('<Key-space>', self.postList)
if isinstance(self._text, Label):
self._text.bind('<Button-1>', self._togglePost)
# Need to unpost the popup if the entryfield is unmapped (eg:
# its toplevel window is withdrawn) while the popup list is
# displayed.
self._text.bind('<Unmap>', self.unpostList)
# Create a static popup window with dropdown list
self._popup = Toplevel(master)
self._popup.overrideredirect(1)
self._popup.transient(master)
self._popup.withdraw()
# Create the listbox inside the dropdown window
sb = Scrollbar(self._popup)
sb.pack(side=RIGHT, fill=Y)
self._listbox = SearchListbox(self._popup,
selectmode=BROWSE,
yscrollcommand=sb.set,
*args,
**kwargs)
self._listbox.pack(side=LEFT, expand=YES, fill=BOTH)
sb.config(command=self._listbox.yview)
# Bind events to the dropdown window.
self._listbox.bind('<Escape>', self.unpostList)
self._listbox.bind('<Return>', self._selectUnpost)
self._listbox.bind('<KP_Enter>', self._selectUnpost)
self._listbox.bind('<Tab>', self._selectUnpost)
self._listbox.bind('<Shift-Tab>', self._selectUnpost)
self._listbox.bind('<ButtonRelease-1>', self._release)
self._popup.bind('<FocusOut>', self._focusOut)
self._popup.bind('<Button-1>', self.popupClick)
self._popup.bind('<Button-3>', self.popupClick)
#self._popup.bind('<Shift-Tab>', self._selectUnpostPrev)
#self._popup.bind('<Tab>', self._selectUnpostNext)
#if sys.platform in ("linux","linux2"):
# self._popup.bind('<ISO_Left_Tab>', self._selectUnpostPrev)
# grab_set redirects all mouse events to the list even
# when the list is posted with a mouse click
#self._hide_on_next_release = False
self._grab_window = None
# ----------------------------------------------------------------------
# Unhide and post the list
# ----------------------------------------------------------------------
def _showList(self, a=False, b=False, c=False):
if self._post.get():
Balloon.hide()
self._grab_window = None
try:
self._grab_window = self.grab_current()
if self._grab_window is not None:
self._grab_window.grab_release()
except KeyError:
pass
if self._text.cget("state") == DISABLED:
self._post.set(False)
return
ExListbox.resetSearch()
self.beforeShow()
h = self._popup.winfo_height()
if h == 1:
self._popup.deiconify()
self._popup.lift()
self._popup.update_idletasks()
h = self._popup.winfo_height()
w = self._text.winfo_width() + self._arrowBtn.winfo_width()
x = self._text.winfo_rootx()
y = self._text.winfo_rooty() + self._text.winfo_height()
sh = self.winfo_screenheight()
if y + h > sh and y > sh / 2:
y = self._text.winfo_rooty() - h
self._popup.deiconify()
self._popup.geometry('%dx%d+%d+%d' % (w,h,x,y))
self._popup.lift()
self._popup.grab_set()
self._popup.update_idletasks()
# Grab the popup, so that all events are delivered to it, and
# set focus to the listbox, to make keyboard navigation
# easier.
#self._popup.grab_set()
#self._popup.focus_set()
self._listbox.focus_set()
self._showSelection()
elif self._popup.winfo_ismapped():
self._popup.grab_release()
if self._grab_window:
self._grab_window.grab_set()
self._grab_window = None
self._popup.withdraw()
self._arrowBtn.focus_set()
self.afterHide()
# ----------------------------------------------------------------------
def _showSelection(self):
lb = self._listbox
lb.selection_clear(0,END)
item = self.get()
# Test active
if lb.get(ACTIVE) != item:
# Scan list
for i in range(lb.size()):
if item == lb.get(i):
lb.activate(i)
lb.selection_set(ACTIVE)
lb.see(ACTIVE)
# ----------------------------------------------------------------------
def postList(self, event=None):
if self._arrowBtn.cget("state") != DISABLED:
self._post.set(True)
# ----------------------------------------------------------------------
def unpostList(self, event=None):
self._listbox.reset()
if self._arrowBtn.cget("state") != DISABLED:
self._post.set(False)
# ----------------------------------------------------------------------
def _togglePost(self, event):
if self._text.cget("state") != DISABLED:
self._post.set( not self._post.get() )
# ----------------------------------------------------------------------
def _focusOut(self, event):
try:
f = self._popup.focus_get()
except KeyError:
pass
else:
if f == self._popup or f == self._listbox:
return
self._focus = None
self.unpostList()
# ----------------------------------------------------------------------
def _selectUnpost(self, event=None):
if self._post.get():
sel = self._listbox.get(ACTIVE)
self.set(sel)
self.unpostList()
# ----------------------------------------------------------------------
def invoke(self):
if self.command is not None:
self.command()
# ----------------------------------------------------------------------
def _release(self, event):
act = self._listbox.nearest(event.y)
self._listbox.activate(act)
self._selectUnpost()
# ----------------------------------------------------------------------
def popupClick(self, event):
if event.x < 0 or event.y < 0 or \
event.x > self._popup.winfo_width() or \
event.y > self._popup.winfo_height():
self.unpostList()
# ----------------------------------------------------------------------
# The following methods are called before the show of the list ...
# ----------------------------------------------------------------------
def beforeShow(self):
pass
# ----------------------------------------------------------------------
# ... and after hide it
# The user should override them in case some special treatment is needed
# ----------------------------------------------------------------------
def afterHide(self):
pass
# ----------------------------------------------------------------------
# Public methods
# ----------------------------------------------------------------------
def get(self, first=None, last=None):
if first is None:
if isinstance(self._text, Label):
return self._text.cget("text")
else:
return self._text.get()
else:
return self._listbox.get(first, last)
# ----------------------------------------------------------------------
def set(self, txt):
if isinstance(self._text, Label):
self._text.config(text=txt)
else:
self._text.delete(0, END)
self._text.insert(0, txt)
self._text.update_idletasks()
self.invoke()
# ----------------------------------------------------------------------
def size(self):
return self._listbox.size()
# ----------------------------------------------------------------------
def clear(self):
self.clearLabel()
self.clearList()
# ----------------------------------------------------------------------
def clearLabel(self):
if isinstance(self._text, Label):
self._text.config(text="")
else:
self._text.delete(0, END)
# ----------------------------------------------------------------------
def clearList(self):
self._listbox.delete(0, END)
# ----------------------------------------------------------------------
def insert(self, index, *elements):
self._listbox.insert(index, *elements)
# ----------------------------------------------------------------------
def delete(self, first, last=None):
self._listbox.delete(first, last)
# ----------------------------------------------------------------------
def fill(self, items):
self.clearList()
for item in items:
self._listbox.insert(END, item)
# ----------------------------------------------------------------------
def select(self, index=None):
if index is None:
txt = self.get()
for i in range(self.size()):
if txt == self._listbox.get(i):
return i
return -1
elif 0 <= index < self._listbox.size():
self.set(self._listbox.get(index))
# ----------------------------------------------------------------------
def configure(self, **kw):
self._text.configure(**kw)
self._arrowBtn.configure(**kw)
config = configure
# ----------------------------------------------------------------------
def __setitem__(self, key, value):
self.configure({key:value})
# ----------------------------------------------------------------------
def cget(self, key):
return self._text.cget(key)
__getitem__ = cget
# ----------------------------------------------------------------------
def bindWidgets(self, event, func):
self._text.bind(event, func)
self._arrowBtn.bind(event, func)
#===============================================================================
# ExOptionMenu
#===============================================================================
class ExOptionMenu(OptionMenu):
def __init__(self, master, variable, value, *values, **kwargs):
OptionMenu.__init__(self, master, variable, value,
*values, **kwargs)
self.variable = variable
self.command = kwargs.get("command")
# ----------------------------------------------------------------------
def delete(self, from_=0, to_=END):
"""Delete items from menu"""
self["menu"].delete(from_, to_)
# ----------------------------------------------------------------------
def add(self, value):
"""Add an extra value to the menu"""
menu = self["menu"]
menu.add_command(label=value,
command=_setit(self.variable, value, None))
# ----------------------------------------------------------------------
def set(self, valueList, value=None):
"""
clear and reload the menu with a new set of options.
valueList - list of new options
value - initial value to set the optionmenu's menubutton to
"""
self['menu'].delete(0, END)
for item in valueList:
self['menu'].add_command(label=item,
command=_setit(self.variable, item, self.command))
if value:
self.variable.set(value)
#===============================================================================
# Splitter Frame
#===============================================================================
class Splitter(Frame):
"""Base class for horizontal or vertical frame splitter"""
def __init__(self, master, split=0.5, horizontal=True, absolute=False):
Frame.__init__(self, master, class_="Splitter")
self.f1 = Frame(self, bd=1, relief=SUNKEN)
self.f2 = Frame(self, bd=1, relief=SUNKEN)
self.dragFrame = Frame(self, bd=1, relief=GROOVE)
self.dragFrame.bind("<B1-Motion>", self.motion) # Overridden
self.dragFrame.bind("<ButtonRelease-1>", self.placeChilds) # Overridden
self.dragFrame.bind("<Double-Button-1>", self.toggle)
self.split = split
self.save = split
self.absolute = absolute
self.setRange()
self.setOrientation(horizontal)
if self.absolute:
self.bind("<Configure>", self.placeChilds)
# ----------------------------------------------------------------------
def orient(self): return self._hori
def firstFrame(self): return self.f1
def secondFrame(self): return self.f2
# ----------------------------------------------------------------------
def setOrientation(self, horizontal=True):
self._hori = horizontal # True horizontal / False vertical
self.f1.place_forget()
self.f2.place_forget()
if self._hori:
self.dragFrame["cursor"] = "sb_h_double_arrow"
else:
self.dragFrame["cursor"] = "sb_v_double_arrow"
self.placeChilds()
# ----------------------------------------------------------------------
def swapOrient(self):
self.dragFrame.place_forget()
self.setOrientation(not self._hori)
# ----------------------------------------------------------------------
def equal(self):
self._setSplit(0.5)
self.placeChilds()
# ----------------------------------------------------------------------
def minimize(self):
self._setSplit(0.0)
self.placeChilds()
# ----------------------------------------------------------------------
def maximize(self):
self._setSplit(1.0)
self.placeChilds()
# ----------------------------------------------------------------------
# Toggle position normally with double click
# ----------------------------------------------------------------------
def toggle(self, event=None):
if self.absolute:
if self.save == self.split: self.save = 100
if self.split > 20:
self.save = self.split
self.split = 1
else:
self.split = self.save
else:
if self.save == self.split: self.save = 0.3
if self.split <= self.min or self.split >= self.max:
self.split = self.save
elif self.split < 0.5:
self.split = self.min
else:
self.split = self.max
self.placeChilds()
# ----------------------------------------------------------------------
# Set acceptable range
# ----------------------------------------------------------------------
def setRange(self, min=0.005, max=0.995):
if min<0.01: min=0.01
if max>0.99: max=0.99
self.margin = 5 # pixels on absolute
self.min = min
self.max = max
# ----------------------------------------------------------------------
def _setSplit(self, newSplit):
if newSplit == self.split: return
if self.absolute:
if newSplit <= self.margin: newSplit = self.margin
if self._hori:
if newSplit + self.margin >= self.winfo_width():
newSplit = self.winfo_width() - self.margin
else:
if newSplit + self.margin >= self.winfo_height():
newSplit = self.winfo_height() - self.margin
else:
if newSplit <= self.min: newSplit = self.min
if newSplit >= self.max: newSplit = self.max
self.save = self.split
self.split = newSplit
# ----------------------------------------------------------------------
# Set the split position
# ----------------------------------------------------------------------
def setSplit(self, newSplit):
"""Change the spliting position"""
self._setSplit(newSplit)
self.placeChilds()
# ----------------------------------------------------------------------
def motion(self, event):
if self.absolute:
if self._hori:
# Horizontal
self._setSplit(event.x_root - self.winfo_rootx())
self.dragFrame.place(x=self.split-2,
relheight=1.0, width=5)
else:
pass
else:
if self._hori:
# Horizontal
self._setSplit(float(event.x_root - self.winfo_rootx()) / \
float(self.winfo_width()))
self.dragFrame.place(relx=self.split, x=-2,
relheight=1.0, width=5)
else:
# Vertical
self._setSplit(float(event.y_root - self.winfo_rooty()) / \
float(self.winfo_height()))
self.dragFrame.place(rely=self.split, y=-2,
relwidth=1.0, height=5)
# ----------------------------------------------------------------------
# Place the two frames
# ----------------------------------------------------------------------
def placeChilds(self, event=None):
"""(Re)Place the two frames"""
if self.absolute:
if self._hori:
# Horizontal
self.f1.place( relx=0.0,
width=self.split,
relheight=1.0)
self.f2.place( x=self.split+3,
width=self.winfo_width()-self.split-4,
relheight=1.0)
self.dragFrame.place(x=self.split-1,
relheight=1.0,
width=3)
else:
# Vertical
self.f1.place( rely=0.0,
height=self.split,
relwidth=1.0)
self.f2.place( y=self.split+3,
height=self.winfo_height()-self.split()-4,
relwidth=1.0)
self.dragFrame.place(y=self.split-1,
relwidth=1.0,
height=3)
else:
if self._hori:
# Horizontal
self.f1.place( relx=0.0,
relwidth=self.split,
relheight=1.0)
self.f2.place( relx=self.split,
x=3,
relwidth=1.0-self.split,
relheight=1.0)
self.dragFrame.place(relx=self.split,
x=-1,
relheight=1.0,
width=3)
else:
# Vertical
self.f1.place( rely=0.0,
relheight=self.split,
relwidth=1.0)
self.f2.place( rely=self.split,
y=2,
relheight=1.0-self.split,
relwidth=1.0)
self.dragFrame.place(rely=self.split,
y=-2,
relwidth=1.0,
height=4)
#===============================================================================
# Horizontal Splitter
#===============================================================================
class HSplitter(Splitter):
"""Horizontal frame spliter"""
def __init__(self, master, split=0.5, absolute=False):
Splitter.__init__(self, master, split, True, absolute)
# ----------------------------------------------------------------------
def leftFrame(self): return self.firstFrame()
def rightFrame(self): return self.secondFrame()
#===============================================================================
# Vertical Splitter
#===============================================================================
class VSplitter(Splitter):
"""Vertical frame spliter"""
def __init__(self, master, split=0.5, absolute=False):
Splitter.__init__(self, master, split, False, absolute)
# ----------------------------------------------------------------------
def topFrame(self): return self.firstFrame()
def bottomFrame(self): return self.secondFrame()
#===============================================================================
# Splitter Node
#-------------------------------------------------------------------------------
class _SplitNode:
def __init__(self, parent, widget, pos=0.5, hori=True):
self.parent = parent # Parent of node
self.left = None # Left child node
self.right = None # Right child node (None if end node)
self.pos = pos # Splitting position (<0.0 inverts hori)
self.hori = hori # Horizontal splitting (Vertical frames)
if self.pos < 0.0:
self.pos = -self.pos
self.hori = not self.hori
self.child = widget
self.split = None # drawing frame
self._xy = 0.0 # Absolute limits of window size for splitter
self._wh = 1.0
# ----------------------------------------------------------------------
def end(self): return self.child is not None
def full(self): return self.left is not None and self.right is not None
# ----------------------------------------------------------------------
def getpos(self):
if self.hori:
return self.pos
else:
return -self.pos
# ----------------------------------------------------------------------
def setCursor(self):
if self.split:
if self.hori:
self.split["cursor"] = "sb_h_double_arrow"
else:
self.split["cursor"] = "sb_v_double_arrow"
# ----------------------------------------------------------------------
def makeSplit(self, master, drag):
self.split = Frame(master, bd=1, relief=GROOVE)
self.split.bind("<B1-Motion>",drag)
#split.bind("<ButtonRelease-1>", self.placeChilds)
#split.bind("<Double-Button-1>", self.toggle)
self.setCursor()
# ----------------------------------------------------------------------
def printNode(self, depth):
if self.left: self.left.printNode(depth+1)
if self.child:
say(" "*depth, self.child, self.child["bg"])
else:
say(" "*depth, " ======== H=",self.hori," pos=",self.pos)
if self.right: self.right.printNode(depth+1)
#===============================================================================
# Tree Splitter allows any nesting of splitting using a tree structure
#===============================================================================
class TreeSplitter(Frame):
"""Splitter using a tree structure"""
def __init__(self, master, **kw):
Frame.__init__(self, master, class_="TreeSplitter", **kw)
self.tree = None
self.width = 3
self.border = 0.01
self._maxchild = None
self._splitters= {} # Dictionary of splitters for faster lookup
self._drag = None
self._dragFrame = None
self._drag_x_root = -1
self._drag_y_root = -1
# ----------------------------------------------------------------------
def isempty(self): return self.tree is None
# ----------------------------------------------------------------------
# Add a new node under a parent
# ----------------------------------------------------------------------
def add(self, parent, child, pos=0.5, hori=True):
node = self.node(child)
if node is not None: return node
if isinstance(parent, Widget):
parent = self.node(parent)
node = self._add(parent, child, pos, hori)
self.placeChilds()
return node
# ----------------------------------------------------------------------
def _add(self, parent, child, pos=0.5, hori=True):
node = _SplitNode(parent, child, pos, hori)
if parent is None:
# Add to top
if self.tree is None:
self.tree = node # Set up the root node
self._maxchild = child
else:
oldtree = self.tree
self.tree = parent = _SplitNode(None, None, pos, hori)
parent.left = node
node.parent = parent
parent.right = oldtree
oldtree.parent = parent
self._maxchild = None
else:
if parent.end(): # End node with only a child
# Keep parent the same and make a new node for both childs
parent.left = _SplitNode(parent, parent.child, pos, hori)
parent.right = node
parent.child = None
else:
raise Exception("Parent node is full")
self._maxchild = None
if parent and parent.child is None:
parent.makeSplit(self, self.dragSplitter)
self._splitters[parent.split] = parent
self.placeChilds()
return node
# ----------------------------------------------------------------------
# Remove a child from the frame
# ----------------------------------------------------------------------
def remove(self, node):
if isinstance(node,Widget):
node = self.node(node)
if node is None: return
if node.child is self._maxchild: self._maxchild = None
self._remove(node)
self.placeChilds()
# ----------------------------------------------------------------------
def _remove(self, node):
if not node.end():
raise Exception("Only end nodes can be removed")
if node.child is not None: node.child.place_forget()
if node.split is not None:
del self._splitters[node.split]
node.split.place_forget()
parent = node.parent
if parent is None:
self.tree = None
elif parent.right is node:
# re-parent left node
grandpa = parent.parent
if grandpa is None: # root tree
self.tree = parent.left
elif grandpa.left is parent:
grandpa.left = parent.left
else:
grandpa.right = parent.left
parent.left.parent = grandpa
if parent.split is not None:
del self._splitters[parent.split]
parent.split.place_forget()
elif parent.left is node:
# re-parent right node
grandpa = parent.parent
if grandpa is None: # root tree
self.tree = parent.right
elif grandpa.left is parent:
grandpa.left = parent.right
else:
grandpa.right = parent.right
parent.right.parent = grandpa
if parent.split is not None:
del self._splitters[parent.split]
parent.split.place_forget()
else:
raise Exception("TreeSplitter is broken")
# ----------------------------------------------------------------------
# Replace the child of the node
# ----------------------------------------------------------------------
def replace(self, node, child):
place = node.child.place_info()
if self._maxchild is node.child:
self._maxchild = child
node.child.place_forget()
node.child = child
node.child.place(**place)
# ----------------------------------------------------------------------
# Clean up the whole tree
# ----------------------------------------------------------------------
def removeAll(self):
self.__remove(self.tree)
self.tree = None
self._splitters = {}
self._maxchild = None
# ----------------------------------------------------------------------
def __remove(self, node):
if node is None: return
self.__remove(node.left)
self.__remove(node.right)
if node.child is not None: node.child.place_forget()
if node.split is not None: node.split.place_forget()
# ----------------------------------------------------------------------
# Depending on rpn
# if None: Return RPN expression of the tree
# else: Create the tree from the rpn expression
# ----------------------------------------------------------------------
def RPN(self, rpn=None):
if rpn is not None:
self.removeAll()
stack = []
for item in rpn:
if isinstance(item, Widget):
stack.append(_SplitNode(None, item))
else:
try:
right = stack.pop()
left = stack.pop()
except IndexError:
break
node = _SplitNode(None, None, item)
node.makeSplit(self, self.dragSplitter)
self._splitters[node.split] = node
node.left = left
left.parent = node
node.right = right
right.parent = node
stack.append(node)
try:
self.tree = stack.pop()
self.placeChilds()
except IndexError:
pass
else:
rpn = []
self.__rpn(self.tree, rpn)
return rpn
# ----------------------------------------------------------------------
def __rpn(self, node, rpn):
if node is None: return
self.__rpn(node.left, rpn)
self.__rpn(node.right, rpn)
if node.child is not None:
rpn.append(node.child)
else:
rpn.append(node.getpos())
# ----------------------------------------------------------------------
def printTree(self):
if self.tree: self.tree.printNode(0)
# ----------------------------------------------------------------------
def childs(self):
"""Return list of child nodes"""
lst = []
self.__childNode(self.tree, lst)
return lst
# ----------------------------------------------------------------------
def __childNode(self, node, lst):
if node is None: return
self.__childNode(node.left, lst)
self.__childNode(node.right, lst)
if node.child is not None: lst.append(node.child)
# ----------------------------------------------------------------------
# return node that has as child the widget
# ----------------------------------------------------------------------
def __searchWidget(self, node, widget):
if node is None: return None
n = self.__searchWidget(node.left, widget)
if n is not None: return n
n = self.__searchWidget(node.right, widget)
if n is not None: return n
if node.child is widget: return node
return None
# ----------------------------------------------------------------------
def node(self, widget):
if widget is None: return None
return self.__searchWidget(self.tree, widget)
# ----------------------------------------------------------------------
def __placeForget(self, node):
if node is None: return
if node.split is not None: node.split.place_forget()
if node.child is not None: node.child.place_forget()
self.__placeForget(node.left)
self.__placeForget(node.right)
# ----------------------------------------------------------------------
def __placeNode(self, node, x, y, w, h):
if node is None: return
#say("placeNode", node, node.child)
#say(" ",x, y, w, h)
if node.end():
# Place the child
if x>0.0: xx = self.width
else: xx = 0
if x+w<1.0: ww = -self.width-xx
else: ww = 0
if y>0.0: yy = self.width
else: yy = 0
if y+h<1.0: hh = -self.width-yy
else: hh = 0
node.child.place(in_ = self,
relx = x,
x = xx,
relwidth = w,
width = ww,
rely = y,
y = yy,
relheight= h,
height = hh)
return
# Place the splitter
if node.hori: # Splitting along X => Vertical frames
node._xy = x
node._wh = w
pos = x + w*node.pos
sw = pos - x
if sw <= self.border:
pos = min(x+self.border, x+w-self.border)
sw = pos - x
if y>0.0: yy = self.width
else: yy = 0
if y+h<1.0: hh = -self.width
else: hh = 0
node.split.place(in_ = self,
relx = pos,
x = -self.width,
relwidth = 0.0,
width = 2*self.width,
rely = y,
y = yy,
relheight = h,
height = hh)
self.__placeNode(node.left, x, y, sw, h)
self.__placeNode(node.right, x+sw, y, w-sw, h)
else: # Splitting along Y => Horizontal frames
node._xy = y
node._wh = h
pos = y + h*node.pos
sh = pos - y
if sh <= self.border:
pos = min(y+self.border, y+w-self.border)
sh = pos - y
if x>0.0: xx = self.width
else: xx = 0
if x+w<1.0: ww = -self.width
else: ww = 0
node.split.place(in_ = self,
relx = x,
x = xx,
relwidth = w,
width = ww,
rely = pos,
y = -self.width,
relheight = 0.0,
height = 2*self.width)
self.__placeNode(node.left, x, y, w, sh)
self.__placeNode(node.right, x, y+sh, w, h-sh)
# ----------------------------------------------------------------------
# Place the frames [1..3]
# ----------------------------------------------------------------------
def placeChilds(self):
if self.tree is None: return
if self._maxchild is not None:
self.__placeForget(self.tree)
self._maxchild.place(in_ = self,
relx=0.0, x=0, relwidth=1.0, width=0,
rely=0.0, y=0, relheight=1.0, height=0)
else:
self.__placeNode(self.tree, 0.0, 0.0, 1.0, 1.0)
# ----------------------------------------------------------------------
# drag splitter and reposition childs
# ----------------------------------------------------------------------
def dragSplitter(self, event):
node = self._splitters[event.widget]
if node.hori:
pos = float(event.x_root - self.winfo_rootx()) / \
float(self.winfo_width())
else:
pos = float(event.y_root - self.winfo_rooty()) / \
float(self.winfo_height())
# Absolute positioning
pos = min(max(pos,self.border),1.0-self.border)
# Convert to relative
node.pos = (pos - node._xy) / node._wh
self.placeChilds()
# ----------------------------------------------------------------------
def maximize(self, child=None):
if self._maxchild is child:
self._maxchild = None
else:
self._maxchild = child
self.placeChilds()
# ----------------------------------------------------------------------
def maxchild(self):
return self._maxchild
# ----------------------------------------------------------------------
# return node containing x,y position in absolute coordinates
# ----------------------------------------------------------------------
def nodeContaining(self, node, x, y):
if node is None: return None
n = self.nodeContaining(node.left, x, y)
if n is not None: return n
n = self.nodeContaining(node.right, x, y)
if n is not None: return n
if node.child is not None:
rx = node.child.winfo_rootx()
ry = node.child.winfo_rooty()
if rx <= x <= rx+node.child.winfo_width() and \
ry <= y <= ry+node.child.winfo_height():
return node
return None
# ----------------------------------------------------------------------
# Find position in screen
# ----------------------------------------------------------------------
def reposition(self, node, event):
if self._maxchild is not None: return False
# First check absolute placement
x = float(event.x_root - self.winfo_rootx()) / float(self.winfo_width())
y = float(event.y_root - self.winfo_rooty()) / float(self.winfo_height())
if x<0.0 or x>1.0 or y<0.0 or y>1.0:
return
if x<0.1 or x>0.9 or y<0.1 or y>0.9:
x1 = 1.0-x
y1 = 1.0-y
self._remove(node)
newnode = self._add(None, node.child)
parent = self.tree
if x>y and x>y1:
# Move to right
parent.hori = True
if parent.right is not None: # Swap to move to TOP
parent.left, parent.right = parent.right, parent.left
elif x1>y and x1>y1:
# Move to left
parent.hori = True
elif y>x and y>x1:
# Move to bottom
parent.hori = False
if parent.right is not None: # Swap to move to TOP
parent.left, parent.right = parent.right, parent.left
else:
# Move to top
parent.hori = False
parent.setCursor()
self.placeChilds()
return True
# Place inside another widget
overnode = self.nodeContaining(self.tree, event.x_root, event.y_root)
if overnode is None or overnode is node: return False
overwidget = overnode.child
# Then inside other widgets
x = float(event.x_root - overwidget.winfo_rootx()) / float(overwidget.winfo_width())
y = float(event.y_root - overwidget.winfo_rooty()) / float(overwidget.winfo_height())
x1 = 1.0-x
y1 = 1.0-y
if 0.4<x<0.6 and 0.4<y<0.6:
# Swap children
overnode.child, node.child = node.child, overnode.child
else:
self._remove(node)
overnode = self.node(overwidget) # Maybe it has changed
newnode = self._add(overnode, node.child)
parent = newnode.parent
if x>y and x>y1:
# Move to right
parent.hori = True
elif x1>y and x1>y1:
# Move to left
parent.hori = True
if parent.right is not None: # Swap to move to TOP
parent.left, parent.right = parent.right, parent.left
elif y>x and y>x1:
# Move to bottom
parent.hori = False
else:
# Move to top
parent.hori = False
if parent.right is not None: # Swap to move to TOP
parent.left, parent.right = parent.right, parent.left
parent.setCursor()
self.placeChilds()
return True
# ----------------------------------------------------------------------
# Event handlers for dragging and placing
# Bind to <Button-1>
# ----------------------------------------------------------------------
def dragStart(self, event):
self._drag = None
self._dragFrame = None
self._drag_x_root = event.x_root
self._drag_y_root = event.y_root
# ----------------------------------------------------------------------
# Bind to <B1-Motion>
# ----------------------------------------------------------------------
def dragMove(self, event):
if self._maxchild is not None: return
if self.tree.child is not None: return # Only one node
if self._drag is None:
if abs(self._drag_x_root - event.x_root)>10 or \
abs(self._drag_y_root - event.y_root)>10:
self["cursor"] = "hand1"
self._drag = self.nodeContaining(self.tree,
self._drag_x_root,
self._drag_y_root)
if self._drag:
self._dragFrame = Frame(self._drag.child.master,
relief=RIDGE,
borderwidth=2*self.width,
bg="LightYellow")
elif self._dragFrame is not None:
# First check absolute placement
sx = float(self.winfo_rootx())
sy = float(self.winfo_rooty())
sw = float(self.winfo_width())
sh = float(self.winfo_height())
x = (float(event.x_root) - sx) / sw
y = (float(event.y_root) - sy) / sh
if x<0.0 or x>1.0 or y<0.0 or y>1.0:
self._dragFrame.place_forget()
return
if x<0.1 or x>0.9 or y<0.1 or y>0.9:
x1 = 1.0-x
y1 = 1.0-y
if x>y and x>y1:
# Move to right
self._dragFrame.place(in_=self,
relx = 0.5,
relwidth = 0.5,
rely = 0.0,
relheight = 1.0)
elif x1>y and x1>y1:
# Move to left
self._dragFrame.place(in_=self,
relx = 0.0,
relwidth = 0.5,
rely = 0.0,
relheight = 1.0)
elif y>x and y>x1:
# Move to bottom
self._dragFrame.place(in_=self,
relx = 0.0,
relwidth = 1.0,
rely = 0.5,
relheight = 0.5)
else:
# Move to top
self._dragFrame.place(in_=self,
relx = 0.0,
relwidth = 1.0,
rely = 0.0,
relheight = 0.5)
self._dragFrame.lift()
return
# Test inside a widget
over = self.nodeContaining(self.tree,
event.x_root,
event.y_root)
if over is None or over is self._drag:
self._dragFrame.place_forget()
return
overwidget = over.child
# Then inside other widgets
wx = float(overwidget.winfo_rootx())
wy = float(overwidget.winfo_rooty())
ww = float(overwidget.winfo_width())
wh = float(overwidget.winfo_height())
x = (float(event.x_root) - wx) / ww
y = (float(event.y_root) - wy) / wh
x1 = 1.0-x
y1 = 1.0-y
if 0.4<x<0.6 and 0.4<y<0.6:
# Swap children
self._dragFrame.place(in_=self,
relx = (wx-sx)/sw,
relwidth = ww/sw,
rely = (wy-sy)/sh,
relheight = wh/sh)
else:
if x>y and x>y1:
# Move to left
self._dragFrame.place(in_=self,
relx = (wx+ww/2.0-sx)/sw,
relwidth = ww/sw/2.0,
rely = (wy-sy)/sh,
relheight = wh/sh)
elif x1>y and x1>y1:
# Move to right
self._dragFrame.place(in_=self,
relx = (wx-sx)/sw,
relwidth = ww/sw/2.0,
rely = (wy-sy)/sh,
relheight = wh/sh)
elif y>x and y>x1:
# Move to bottom
self._dragFrame.place(in_=self,
relx = (wx-sx)/sw,
relwidth = ww/sw,
rely = (wy+wh/2.0-sy)/sh,
relheight = wh/sh/2.0)
else:
# Move to top
self._dragFrame.place(in_=self,
relx = (wx-sx)/sw,
relwidth = ww/sw,
rely = (wy-sy)/sh,
relheight = wh/sh/2.0)
# ----------------------------------------------------------------------
# Bind to <ButtonRelease-1>
# ----------------------------------------------------------------------
def dragEnd(self, event):
if self._maxchild is not None: return
if self._dragFrame is None: return
if self._drag:
self["cursor"] = ""
self._dragFrame.place_forget()
self._dragFrame = None
return self.reposition(self._drag, event)
return False
#=============================================================================
# Display a balloon message (only static methods)
#=============================================================================
class Balloon:
_top = None
_widget = None
font = ("Helvetica","-12")
foreground = "Black"
background = "LightYellow"
delay = 1500
x_mouse = 0
y_mouse = 0
# ----------------------------------------------------------------------
# set a balloon message to a widget
# ----------------------------------------------------------------------
@staticmethod
def set(widget, help):
widget._help = help
widget.bind('<Any-Enter>', Balloon.enter)
widget.bind('<Any-Leave>', Balloon.leave)
widget.bind('<Key>', Balloon.hide)
# ----------------------------------------------------------------------
@staticmethod
def enter(event):
if Balloon._widget is event.widget: return
Balloon._widget = event.widget
Balloon.x_mouse = event.x_root
Balloon.y_mouse = event.y_root
return event.widget.after(Balloon.delay, Balloon.show)
# ----------------------------------------------------------------------
@staticmethod
def leave(event=None):
Balloon._widget = None
if Balloon._top is None: return
try:
if Balloon._top.winfo_ismapped():
Balloon._top.withdraw()
except TclError:
Balloon._top = None
hide=leave
# ----------------------------------------------------------------------
@staticmethod
def setWidget(widget, x, y):
Balloon._widget = widget
Balloon.x_mouse = x
Balloon.y_mouse = y
# ----------------------------------------------------------------------
@staticmethod
def show():
try:
if Balloon._widget is None: return
widget = Balloon._widget
if Balloon._top is None:
Balloon._top = Toplevel()
Balloon._top.overrideredirect(1)
Balloon._msg = Message(Balloon._top,
aspect=300,
foreground=Balloon.foreground,
background=Balloon.background,
relief=SOLID,
borderwidth=1,
font=Balloon.font)
Balloon._msg.pack()
Balloon._top.bind("<1>",Balloon.hide)
Balloon._msg.config(text=widget._help)
# Guess position
x = widget.winfo_rootx() + widget.winfo_width()//2
y = widget.winfo_rooty() + widget.winfo_height()+5
# if too far away use mouse
if abs(x - Balloon.x_mouse) > 30:
x = Balloon.x_mouse + 20
if abs(y - Balloon.y_mouse) > 30:
y = Balloon.y_mouse + 10
Balloon._top.wm_geometry("+%d+%d" % (x,y))
Balloon._top.deiconify()
Balloon._top.lift()
Balloon._top.update_idletasks()
# Check if it is hidden on bottom-right sides
move = False
if Balloon._top.winfo_rootx() + Balloon._top.winfo_width() >= \
Balloon._top.winfo_screenwidth():
x = Balloon._top.winfo_screenwidth() \
- Balloon._top.winfo_width() - 20
move = True
if Balloon._top.winfo_rooty() + Balloon._top.winfo_height() >= \
Balloon._top.winfo_screenheight():
y = Balloon._top.winfo_screenheight() \
- Balloon._top.winfo_height() - 10
move = True
if move:
Balloon._top.wm_geometry("+%d+%d" % (x,y))
except TclError:
Balloon._top = None
#===============================================================================
# A LabelFrame that can collapse/expand
#===============================================================================
class ExLabelFrame(LabelFrame):
def __init__(self, master, *args, **kwargs):
if "command" in kwargs:
self.command = kwargs.get("command")
del kwargs["command"]
else:
self.command = None
LabelFrame.__init__(self, master, *args, **kwargs)
self.frame = Frame(self)
self.frame.pack(expand=YES, fill=BOTH)
self.bind("<Button-1>", self. click)
if self["height"]==0:
self["height"] = 20
self.width = self["width"]
# ----------------------------------------------------------------------
def click(self, event=None):
if self.frame.winfo_ismapped():
self.collapse()
else:
self.expand()
if self.command is not None:
self.command(event)
# ----------------------------------------------------------------------
def collapse(self):
self["width"] = self.winfo_width()
self.frame.pack_forget()
lbl = self["text"]
if lbl[-1] in (Unicode.BLACK_UP_POINTING_TRIANGLE, Unicode.BLACK_DOWN_POINTING_TRIANGLE):
lbl = lbl[:-1]
self["text"] = lbl+Unicode.BLACK_UP_POINTING_TRIANGLE
# ----------------------------------------------------------------------
def expand(self):
self["width"] = self.width
self.frame.pack(fill=BOTH)
lbl = self["text"]
if lbl[-1] in (Unicode.BLACK_UP_POINTING_TRIANGLE, Unicode.BLACK_DOWN_POINTING_TRIANGLE):
self["text"] = lbl[:-1]
# ----------------------------------------------------------------------
def isexpanded(self):
return self.frame.winfo_ismapped()
#================================================================================
# ScrolledFrame by Bruno
#================================================================================
class ScrolledFrame(Frame):
# ----------------------------------------------------------------------
def __init__(self, master=None, stretch=True, cnf={}, **kw):
Frame.__init__(self,master,cnf,**kw)
self.client = Frame(self,border=0)
# width and height of Scrolledframe
self.W = 1.0
self.H = 1.0
# top left corner coordinates of client frame
self.client_x = 0
self.client_y = 0
# width and height of client frame
self.client_w = 1.0
self.client_h = 1.0
# scrollcommands (default)
self.xscrollcommand=lambda *args:None
self.yscrollcommand=lambda *args:None
# scrollincrements
self.xscrollincrement = 15
self.yscrollincrement = 15
# stretches
self.stretch = stretch
self.stretch_x = stretch
self.stretch_y = stretch
#self.bind("<Expose>",self.updateScrollRegion)
self.bind("<Configure>",self.updateScrollRegion)
# ----------------------------------------------------------------------
def cget(self,item):
if not hasattr(self,item):
return Frame.cget(self,item)
else:
getattr(self,item)
__getitem__ = cget
def __setitem__(self,item,value):self.configure({item:value})
# ----------------------------------------------------------------------
def configure(self,cnf=None,**kw):
if kw: cnf=Tkinter._cnfmerge((cnf,kw))
for key in cnf.keys():
if not hasattr(self,key):
Frame.configure(self,cnf)
else:
setattr(self,key,cnf[key])
config=configure
# ----------------------------------------------------------------------
# Use this method to get the parent widget of the frame
# ----------------------------------------------------------------------
def __call__(self): return self.client
# ----------------------------------------------------------------------
def position(self):
return self.client_x, self.client_y
# ----------------------------------------------------------------------
def xview(self, event, value, units='pages'):
if event == "moveto":
fraction = float(value)
if fraction <= 0.0:
self.client_x = 0
elif fraction >= float(self.client_w-self.W)/self.client_w:
self.client_x = self.W-self.client_w
else:
self.client_x = int(-self.client_w*fraction)
elif event == "scroll":
amount=int(value)
if self.client_x == 0 and amount < 0:return
if self.W >= self.client_w: return
if self.client_x == self.W-self.client_w and amount > 0:return
if units == "units":
dx = self.xscrollincrement
else:
dx = amount*self.W*0.99
self.client_x -= amount*dx
else:
return
self.updateScrollx()
self.client.place_configure(x=self.client_x)
# ----------------------------------------------------------------------
def yview(self, event, value, units='pages'):
if event == "moveto":
fraction=float(value)
if fraction <= 0.0:
self.client_y = 0
elif fraction >= float(self.client_h-self.H)/self.client_h:
self.client_y = self.H-self.client_h
else:
self.client_y = int(-self.client_h*fraction)
elif event == "scroll":
amount=int(value)
if self.client_y == 0 and amount < 0:return
if self.H >= self.client_h: return
if self.client_y == self.H-self.client_h and amount > 0:return
if units == "units":
dy = self.yscrollincrement
else:
dy = self.H
self.client_y -= amount*dy
else:
return
self.updateScrolly()
self.client.place_configure(y=self.client_y)
# ----------------------------------------------------------------------
def moveto(self, x, y):
if x >= 0:
self.client_x = 0
elif x <= self.W - self.client_w:
self.client_x = self.W-self.client_w
else:
self.client_x = x
if y >= 0:
self.client_y = 0
elif y <= self.H - self.client_h:
self.client_y = self.H-self.client_h
else:
self.client_y = y
self.updateScrollx()
self.updateScrolly()
self.client.place_configure(x=self.client_x,y=self.client_y)
# ----------------------------------------------------------------------
def updateScrollx(self, *args):
if self.client_x >= 0:
low = 0.0
else:
low = -float(self.client_x)/self.client_w
if self.client_x+self.client_w <= self.W:
high = 1.0
else:
high = low+float(self.W)/self.client_w
if low <= 0.0:
self.client_x=0
elif high >= 1.0:
self.client_x=self.W-self.client_w
low=-float(self.client_x)/self.client_w
if self.client_w < self.W:
self.stretch_x = self.stretch
else:
self.stretch_x = False
self.xscrollcommand(low,high)
# ----------------------------------------------------------------------
def updateScrolly(self, *args):
if self.client_y >= 0:
low = 0.0
else:
low = -float(self.client_y)/self.client_h
if self.client_y+self.client_h <= self.H:
high = 1.0
else:
high = low+float(self.H)/self.client_h
if low <= 0.0:
self.client_y=0
elif high >= 1.0:
self.client_y=self.H-self.client_h
low=-float(self.client_y)/self.client_h
if self.client_h < self.H:
self.stretch_y = self.stretch
else:
self.stretch_y = False
self.yscrollcommand(low,high)
# ----------------------------------------------------------------------
def updateScrollRegion(self, *args):
if len(self.client.children):
self.client_w = self.client.winfo_reqwidth()
self.client_h = self.client.winfo_reqheight()
self.W = self.winfo_width()
self.H = self.winfo_height()
self.updateScrolly()
self.updateScrollx()
if self.stretch_y:
h = self.H
else:
h = self.client_h
if self.stretch_x:
w = self.W
else:
w = self.client_w
self.client.place_configure(
x=self.client_x,
y=self.client_y,
height=h,
width=w,
anchor="nw")
else:
self.xscrollcommand(0.0,1.0)
self.yscrollcommand(0.0,1.0)
self.client.place_forget()
#================================================================================
# The following is from idlelib (tabpage.py)
#================================================================================
class InvalidTabPage(Exception): pass
class AlreadyExists(Exception): pass
#===============================================================================
# A page tab frame button
#===============================================================================
class PageTab(Frame):
"""
a 'page tab' like framed button
"""
# ----------------------------------------------------------------------
def __init__(self, parent):
Frame.__init__(self, parent, borderwidth=2, relief=RIDGE)
self.button=Radiobutton(self, padx=5, pady=2, takefocus=FALSE,
indicatoron=FALSE, highlightthickness=0,
borderwidth=0, selectcolor=self.cget('bg'))
self.button.pack(fill=BOTH)
#===============================================================================
# Tab pages
#===============================================================================
class TabPageSet(Frame):
"""
a set of 'pages' with TabButtons for controlling their display
"""
# ----------------------------------------------------------------------
def __init__(self, parent, pageNames=[], top=True, hidetext=False, **kw):
"""
pageNames - a list of strings, each string will be the dictionary key
to a page's data, and the name displayed on the page's tab. Should be
specified in desired page order. The first page will be the default
and first active page.
"""
Frame.__init__(self, parent, kw)
self.grid_location(0, 0)
self.tabBar=Frame(self)
self.top = top
self.hidetext = hidetext
if top:
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.tabBar.grid(row=0, column=0, sticky=EW)
else:
self.columnconfigure(1, weight=1)
self.rowconfigure(0, weight=1)
self.tabBar.grid(row=0, column=0, sticky=NSEW)
self.activePage=StringVar(self)
self.defaultPage=''
self.pages={}
for name in pageNames:
if isinstance(name,tuple):
self.addPage(*name)
else:
self.addPage(name)
# ----------------------------------------------------------------------
def page(self, name):
return self.pages[name]['page']
def __getitem__(self, name): return self.page(name)
# ----------------------------------------------------------------------
def changePage(self, pageName=None):
if pageName:
if pageName in self.pages.keys():
self.activePage.set(pageName)
else:
raise InvalidTabPage("Invalid TabPage Name")
## pop up the active 'tab' only
for page in self.pages.keys():
tab = self.pages[page]['tab']
tab.config(relief=RIDGE)
tab.button.config(background="DarkGray",
activebackground="DarkGray")
if self.hidetext: tab.button.config(text="")
tab = self.pages[self.getActivePage()]['tab']
tab.config(relief=RAISED)
tab.button.config(
background="LightGray",
activebackground="LightGray")
if self.hidetext:
tab.button.config(text=self.getActivePage())
## switch page
self.pages[self.getActivePage()]['page'].lift()
self.event_generate("<<ChangePage>>") #, data=pageName)
# ----------------------------------------------------------------------
def getActivePage(self):
return self.activePage.get()
# ----------------------------------------------------------------------
def addPage(self, pageName, icon=None):
if pageName in self.pages.keys():
raise AlreadyExists("TabPage Name Already Exists")
self.pages[pageName]={
'tab' : PageTab(self.tabBar),
'page': Frame(self, borderwidth=2, relief=RAISED) }
if icon:
self.pages[pageName]['tab'].button.config(text=pageName,
image=icon, compound=LEFT)
self.icons = True
else:
self.pages[pageName]['tab'].button.config(text=pageName)
self.pages[pageName]['tab'].button.config(
command=self.changePage,
variable=self.activePage,
value=pageName)
if self.top:
self.pages[pageName]['tab'].pack(side=LEFT)
self.pages[pageName]['page'].grid(row=1, column=0, sticky=NSEW)
else:
self.pages[pageName]['tab'].pack(side=TOP, fill=X)
self.pages[pageName]['page'].grid(row=0, column=1, sticky=NSEW)
if len(self.pages)==1: # adding first page
self.defaultPage=pageName
self.activePage.set(self.defaultPage)
self.changePage()
# ----------------------------------------------------------------------
def removePage(self, pageName):
if not pageName in self.pages.keys():
raise InvalidTabPage("Invalid TabPage Name")
self.pages[pageName]['tab'].pack_forget()
self.pages[pageName]['page'].grid_forget()
self.pages[pageName]['tab'].destroy()
self.pages[pageName]['page'].destroy()
del(self.pages[pageName])
# handle removing last remaining, or default, or active page
if not self.pages: # removed last remaining page
self.defaultPage=''
return
if pageName==self.defaultPage: # set a new default page
self.defaultPage=\
self.tabBar.winfo_children()[0].button.cget('text')
if pageName==self.getActivePage(): # set a new active page
self.activePage.set(self.defaultPage)
self.changePage()
# ----------------------------------------------------------------------
def renamePage(self, old, new):
if not old in self.pages.keys():
raise InvalidTabPage("Invalid TabPage Name")
self.pages[new] = self.pages[old]
del self.pages[old]
self.pages[new]['tab'].button.config(text=new, value=new)
if old == self.getActivePage():
self.activePage.set(new)
#===============================================================================
if __name__ == "__main__":
root = Tk()
frame = Frame(root)
frame.pack(side=TOP, fill=X)
p = ProgressBar(frame, background="DarkGray", height=24)
p.pack(side=TOP, fill=X)
def addProg(ev):
global p
p.setProgress(p.getProgress()[0]+10.0)
p.autoText()
p.bind('<1>', addProg)
frame = Frame(root)
frame.pack(side=BOTTOM, expand=YES, fill=BOTH)
hsplit = HSplitter(frame, 0.7)
vsplitL = VSplitter(hsplit.leftFrame(), 0.5)
vsplitR = VSplitter(hsplit.rightFrame(), 0.3)
Label(vsplitL.topFrame(), text='MultiListbox').pack()
mlb = MultiListbox(vsplitL.topFrame(),
(('Subject', 40, None),
('Sender', 20, None),
('Date', 10, None)))
for i in range(100):
mlb.insert(END, ('%d Important Message' % i,
'John Doe', '10/10/%04d' % (1900+i)))
mlb.pack(expand=YES, fill=BOTH)
l = Label(vsplitL.bottomFrame(), text="Combobox")
l.pack(side=TOP)
cb = Combobox(vsplitL.bottomFrame(), label=True)
cb.pack(side=BOTTOM, expand=YES, fill=X)
cb.fill(("one","two","three","four", "fix-six-seven-eight-nine-ten"))
cb.select(0)
Label(vsplitR.topFrame(), text='SearchListbox').pack()
lb = SearchListbox(vsplitR.topFrame(), selectmode=BROWSE,
exportselection=FALSE)
lb.insert(END,"Starting")
lb.insert(END,"Loading card database")
lb.insert(END,"Loading isotopes database")
lb.insert(END,"Layout initialization")
lb.insert(END,"Layout create Tree list")
lb.insert(END,"--Initialize Tk")
lb.insert(END,"After initialization of Tk")
lb.insert(END,"Creating frames")
lb.insert(END,"Creation of windows...")
lb.insert(END,"Writing ini file")
lb.insert(END,"Exiting program")
# lb.fill()
lb.pack(expand=YES, fill=BOTH)
lb.focus_set()
lb.ignoreCase = True
# lb.ignoreNonAlpha = False
# v = StringVar()
# lst = ["One", "Two", "Three", "Four"]
# v.set("One")
# o = ExOptionMenu(vsplitR.bottomFrame(), v, *lst)
# o.pack()
# o.delete()
# lst.reverse()
# for i in lst:
# o.add(i)
#test dialog
frame = vsplitR.bottomFrame()
tabPage=TabPageSet(frame, pageNames=['Foobar','Baz'])
tabPage.pack(expand=TRUE, fill=BOTH)
Label(tabPage['Foobar'], text='Foo', pady=20).pack()
Label(tabPage['Foobar'], text='Bar', pady=20).pack()
Label(tabPage['Baz'], text='Baz').pack()
entryPgName=Entry(frame)
buttonAdd=Button(frame, text='Add Page',
command=lambda:tabPage.addPage(entryPgName.get()))
buttonRemove=Button(frame, text='Remove Page',
command=lambda:tabPage.removePage(entryPgName.get()))
labelPgName=Label(frame, text='name of page to add/remove:')
buttonAdd.pack(padx=5, pady=5)
buttonRemove.pack(padx=5, pady=5)
labelPgName.pack(padx=5)
entryPgName.pack(padx=5)
tabPage.changePage()
b = Button(root, text="Exit", command=root.destroy)
Balloon.set(b, "Push me to exit")
b.pack()
e = FloatEntry(root)
Balloon.set(e, "Enter a floating point number")
e.pack()
e = IntegerEntry(root)
Balloon.set(e, "Enter an integer number")
e.pack()
root.geometry("800x600")
root.mainloop()
| gpl-2.0 | 1,673,235,192,395,705,000 | 30.584479 | 91 | 0.517866 | false |
natefoo/ansible-modules-extras | cloud/vmware/vmware_maintenancemode.py | 30 | 5856 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, VMware, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_maintenancemode
short_description: Place a host into maintenance mode
description:
- Place an ESXI host into maintenance mode
- Support for VSAN compliant maintenance mode when selected
author: "Jay Jahns <[email protected]>"
version_added: "2.1"
notes:
- Tested on vSphere 5.5 and 6.0
requirements:
- "python >= 2.6"
- PyVmomi
options:
esxi_hostname:
description:
- Name of the host as defined in vCenter
required: True
vsan_mode:
description:
- Specify which VSAN compliant mode to enter
choices:
- 'ensureObjectAccessibility'
- 'evacuateAllData'
- 'noAction'
required: False
evacuate:
description:
- If True, evacuate all powered off VMs
choices:
- True
- False
default: False
required: False
timeout:
description:
- Specify a timeout for the operation
required: False
default: 0
state:
description:
- Enter or exit maintenance mode
choices:
- present
- absent
default: present
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Enter VSAN-Compliant Maintenance Mode
local_action:
module: vmware_maintenancemode
hostname: vc_host
username: vc_user
password: vc_pass
esxi_hostname: esxi.host.example
vsan: ensureObjectAccessibility
evacuate: yes
timeout: 3600
state: present
'''
RETURN = '''
hostsystem:
description: Name of vim reference
returned: always
type: string
sample: "'vim.HostSystem:host-236'"
hostname:
description: Name of host in vCenter
returned: always
type: string
sample: "esxi.local.domain"
status:
description: Action taken
return: always
type: string
sample: "ENTER"
'''
try:
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def EnterMaintenanceMode(module, host):
if host.runtime.inMaintenanceMode:
module.exit_json(
changed=False,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='NO_ACTION',
msg='Host already in maintenance mode')
spec = vim.host.MaintenanceSpec()
if module.params['vsan']:
spec.vsanMode = vim.vsan.host.DecommissionMode()
spec.vsanMode.objectAction = module.params['vsan']
try:
task = host.EnterMaintenanceMode_Task(
module.params['timeout'],
module.params['evacuate'],
spec)
success, result = wait_for_task(task)
return dict(changed=success,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='ENTER',
msg='Host entered maintenance mode')
except TaskError:
module.fail_json(
msg='Host failed to enter maintenance mode')
def ExitMaintenanceMode(module, host):
if not host.runtime.inMaintenanceMode:
module.exit_json(
changed=False,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='NO_ACTION',
msg='Host not in maintenance mode')
try:
task = host.ExitMaintenanceMode_Task(
module.params['timeout'])
success, result = wait_for_task(task)
return dict(changed=success,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='EXIT',
msg='Host exited maintenance mode')
except TaskError:
module.fail_json(
msg='Host failed to exit maintenance mode')
def main():
spec = vmware_argument_spec()
spec.update(dict(
esxi_hostname=dict(required=True),
vsan=dict(required=False, choices=['ensureObjectAccessibility',
'evacuateAllData',
'noAction']),
evacuate=dict(required=False, type='bool', default=False),
timeout=dict(required=False, default=0),
state=dict(required=False,
default='present',
choices=['present', 'absent'])))
module = AnsibleModule(argument_spec=spec)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
content = connect_to_api(module)
host = find_hostsystem_by_name(content, module.params['esxi_hostname'])
if not host:
module.fail_json(
msg='Host not found in vCenter')
if module.params['state'] == 'present':
result = EnterMaintenanceMode(module, host)
elif module.params['state'] == 'absent':
result = ExitMaintenanceMode(module, host)
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.vmware import *
if __name__ == '__main__':
main()
| gpl-3.0 | -8,286,113,376,052,002,000 | 26.622642 | 75 | 0.610997 | false |
sukwon0709/byterun | byterun/pyvm2.py | 1 | 37105 | """A pure-Python Python bytecode interpreter."""
# Based on:
# pyvm2 by Paul Swartz (z3p), from http://www.twistedmatrix.com/users/z3p/
from __future__ import print_function, division
import dis
import inspect
import linecache
import logging
import operator
import sys
import six
from six.moves import reprlib
PY3, PY2 = six.PY3, not six.PY3
from .pyobj import Cell, Frame, Block, Method, Function, Generator
log = logging.getLogger(__name__)
if six.PY3:
byteint = lambda b: b
else:
byteint = ord
# Create a repr that won't overflow.
repr_obj = reprlib.Repr()
repr_obj.maxother = 120
repper = repr_obj.repr
import inspect
import symex
class VirtualMachineError(Exception):
"""For raising errors in the operation of the VM."""
pass
class VirtualMachine(object):
def __init__(self, symbolic_on=False):
# The call stack of frames.
self.frames = []
# The current frame.
self.frame = None
self.return_value = None
self.last_exception = None
self.symbolic_on = symbolic_on
self.interesting_paths = {} # code obj -> list(path)
self._cur_interesting_path = []
self._co_to_decls = {}
self._co_to_envs = {}
def get_decl(self, code_obj):
return self._co_to_decls[code_obj]
def get_env(self, code_obj):
return self._co_to_envs[code_obj].copy()
def set_co_to_decls(self, co_to_decls):
self._co_to_decls.update(co_to_decls)
def set_co_to_envs(self, co_to_envs):
self._co_to_envs.update(co_to_envs)
@property
def cur_interesting_path(self):
return self._cur_interesting_path[-1]
def top(self):
"""Return the value at the top of the stack, with no changes."""
return self.frame.stack[-1]
def pop(self, i=0):
"""Pop a value from the stack.
Default to the top of the stack, but `i` can be a count from the top
instead.
"""
return self.frame.stack.pop(-1-i)
def push(self, *vals):
"""Push values onto the value stack."""
self.frame.stack.extend(vals)
def popn(self, n):
"""Pop a number of values from the value stack.
A list of `n` values is returned, the deepest value first.
"""
if n:
ret = self.frame.stack[-n:]
self.frame.stack[-n:] = []
return ret
else:
return []
def peek(self, n):
"""Get a value `n` entries down in the stack, without changing the stack."""
return self.frame.stack[-n]
def jump(self, jump):
"""Move the bytecode pointer to `jump`, so it will execute next."""
self.frame.f_lasti = jump
def push_block(self, type, handler=None, level=None):
if level is None:
level = len(self.frame.stack)
self.frame.block_stack.append(Block(type, handler, level))
def pop_block(self):
return self.frame.block_stack.pop()
def make_frame(self, code, callargs={}, f_globals=None, f_locals=None):
log.info("make_frame: code=%r, callargs=%s" % (code, repper(callargs)))
if f_globals is not None:
f_globals = f_globals
if f_locals is None:
f_locals = f_globals
elif self.frames:
f_globals = self.frame.f_globals
f_locals = {}
else:
f_globals = f_locals = {
'__builtins__': __builtins__,
'__name__': '__main__',
'__doc__': None,
'__package__': None,
}
f_locals.update(callargs)
frame = Frame(code, f_globals, f_locals, self.frame)
return frame
def push_frame(self, frame):
self.frames.append(frame)
self.frame = frame
def pop_frame(self):
self.frames.pop()
if self.frames:
self.frame = self.frames[-1]
else:
self.frame = None
def print_frames(self):
"""Print the call stack, for debugging."""
for f in self.frames:
filename = f.f_code.co_filename
lineno = f.line_number()
print(' File "%s", line %d, in %s' % (
filename, lineno, f.f_code.co_name
))
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
print(' ' + line.strip())
def resume_frame(self, frame):
frame.f_back = self.frame
val = self.run_frame(frame)
frame.f_back = None
return val
def add_interesting_path(self, code, path):
if code not in self.interesting_paths:
self.interesting_paths[code] = []
self.interesting_paths[code].append(path)
def fork(self, code, f_globals=None, f_locals=None):
newVM = VirtualMachine(self.symbolic_on)
newVM.interesting_paths = self.interesting_paths.copy()
newVM._co_to_decls = self._co_to_decls.copy()
newVM._co_to_envs = self._co_to_envs.copy()
newVM.frame = self.frame
val = newVM.run_code(code, f_globals=f_globals)
return val
def run_code(self, code, f_globals=None, f_locals=None):
frame = self.make_frame(code, f_globals=f_globals, f_locals=f_locals)
if self.symbolic_on:
paths = self.interesting_paths.get(code, [])
path = paths.pop(0) if paths else None
val = None
if path:
self._cur_interesting_path.append(path[1:]) # skip entry block
val = self.run_frame(frame)
# Check some invariants
if self.frames: # pragma: no cover
raise VirtualMachineError("Frames left over!")
if self.frame and self.frame.stack: # pragma: no cover
raise VirtualMachineError("Data left on stack! %r" % self.frame.stack)
return val
else:
val = self.run_frame(frame)
# Check some invariants
if self.frames: # pragma: no cover
raise VirtualMachineError("Frames left over!")
if self.frame and self.frame.stack: # pragma: no cover
raise VirtualMachineError("Data left on stack! %r" % self.frame.stack)
return val
def unwind_block(self, block):
if block.type == 'except-handler':
offset = 3
else:
offset = 0
while len(self.frame.stack) > block.level + offset:
self.pop()
if block.type == 'except-handler':
tb, value, exctype = self.popn(3)
self.last_exception = exctype, value, tb
def parse_byte_and_args(self):
""" Parse 1 - 3 bytes of bytecode into
an instruction and optionally arguments."""
f = self.frame
opoffset = f.f_lasti
byteCode = byteint(f.f_code.co_code[opoffset])
f.f_lasti += 1
byteName = dis.opname[byteCode]
arg = None
arguments = []
if byteCode >= dis.HAVE_ARGUMENT:
arg = f.f_code.co_code[f.f_lasti:f.f_lasti+2]
f.f_lasti += 2
intArg = byteint(arg[0]) + (byteint(arg[1]) << 8)
if byteCode in dis.hasconst:
arg = f.f_code.co_consts[intArg]
elif byteCode in dis.hasfree:
if intArg < len(f.f_code.co_cellvars):
arg = f.f_code.co_cellvars[intArg]
else:
var_idx = intArg - len(f.f_code.co_cellvars)
arg = f.f_code.co_freevars[var_idx]
elif byteCode in dis.hasname:
arg = f.f_code.co_names[intArg]
elif byteCode in dis.hasjrel:
arg = f.f_lasti + intArg
elif byteCode in dis.hasjabs:
arg = intArg
elif byteCode in dis.haslocal:
arg = f.f_code.co_varnames[intArg]
else:
arg = intArg
arguments = [arg]
return byteName, arguments, opoffset
def log(self, byteName, arguments, opoffset):
""" Log arguments, block stack, and data stack for each opcode."""
op = "%d: %s" % (opoffset, byteName)
if arguments:
op += " %r" % (arguments[0],)
indent = " "*(len(self.frames)-1)
stack_rep = repper(self.frame.stack)
block_stack_rep = repper(self.frame.block_stack)
log.info(" %sdata: %s" % (indent, stack_rep))
log.info(" %sblks: %s" % (indent, block_stack_rep))
log.info("%s%s" % (indent, op))
def dispatch(self, byteName, arguments):
""" Dispatch by bytename to the corresponding methods.
Exceptions are caught and set on the virtual machine."""
print('%s %s' % (byteName, arguments))
why = None
try:
if byteName.startswith('UNARY_'):
self.unaryOperator(byteName[6:])
elif byteName.startswith('BINARY_'):
self.binaryOperator(byteName[7:])
elif byteName.startswith('INPLACE_'):
self.inplaceOperator(byteName[8:])
elif 'SLICE+' in byteName:
self.sliceOperator(byteName)
else:
# dispatch
bytecode_fn = getattr(self, 'byte_%s' % byteName, None)
if not bytecode_fn: # pragma: no cover
raise VirtualMachineError(
"unknown bytecode type: %s" % byteName
)
why = bytecode_fn(*arguments)
except:
# deal with exceptions encountered while executing the op.
self.last_exception = sys.exc_info()[:2] + (None,)
log.exception("Caught exception during execution")
why = 'exception'
return why
def manage_block_stack(self, why):
""" Manage a frame's block stack.
Manipulate the block stack and data stack for looping,
exception handling, or returning."""
assert why != 'yield'
block = self.frame.block_stack[-1]
if block.type == 'loop' and why == 'continue':
self.jump(self.return_value)
why = None
return why
self.pop_block()
self.unwind_block(block)
if block.type == 'loop' and why == 'break':
why = None
self.jump(block.handler)
return why
if PY2:
if (
block.type == 'finally' or
(block.type == 'setup-except' and why == 'exception') or
block.type == 'with'
):
if why == 'exception':
exctype, value, tb = self.last_exception
self.push(tb, value, exctype)
else:
if why in ('return', 'continue'):
self.push(self.return_value)
self.push(why)
why = None
self.jump(block.handler)
return why
elif PY3:
if (
why == 'exception' and
block.type in ['setup-except', 'finally']
):
self.push_block('except-handler')
exctype, value, tb = self.last_exception
self.push(tb, value, exctype)
# PyErr_Normalize_Exception goes here
self.push(tb, value, exctype)
why = None
self.jump(block.handler)
return why
elif block.type == 'finally':
if why in ('return', 'continue'):
self.push(self.return_value)
self.push(why)
why = None
self.jump(block.handler)
return why
return why
def run_frame(self, frame):
"""Run a frame until it returns (somehow).
Exceptions are raised, the return value is returned.
"""
self.push_frame(frame)
while True:
byteName, arguments, opoffset = self.parse_byte_and_args()
if log.isEnabledFor(logging.INFO):
self.log(byteName, arguments, opoffset)
# When unwinding the block stack, we need to keep track of why we
# are doing it.
why = self.dispatch(byteName, arguments)
if why == 'exception':
# TODO: ceval calls PyTraceBack_Here, not sure what that does.
pass
if why == 'reraise':
why = 'exception'
if why != 'yield':
while why and frame.block_stack:
# Deal with any block management we need to do.
why = self.manage_block_stack(why)
if why:
break
# TODO: handle generator exception state
self.pop_frame()
if why == 'exception':
six.reraise(*self.last_exception)
return self.return_value
## Stack manipulation
def byte_LOAD_CONST(self, const):
self.push(const)
def byte_POP_TOP(self):
self.pop()
def byte_DUP_TOP(self):
self.push(self.top())
def byte_DUP_TOPX(self, count):
items = self.popn(count)
for i in [1, 2]:
self.push(*items)
def byte_DUP_TOP_TWO(self):
# Py3 only
a, b = self.popn(2)
self.push(a, b, a, b)
def byte_ROT_TWO(self):
a, b = self.popn(2)
self.push(b, a)
def byte_ROT_THREE(self):
a, b, c = self.popn(3)
self.push(c, a, b)
def byte_ROT_FOUR(self):
a, b, c, d = self.popn(4)
self.push(d, a, b, c)
## Names
def byte_LOAD_NAME(self, name):
frame = self.frame
if name in frame.f_locals:
val = frame.f_locals[name]
elif name in frame.f_globals:
val = frame.f_globals[name]
elif name in frame.f_builtins:
val = frame.f_builtins[name]
else:
raise NameError("name '%s' is not defined" % name)
self.push(val)
def byte_STORE_NAME(self, name):
self.frame.f_locals[name] = self.pop()
def byte_DELETE_NAME(self, name):
del self.frame.f_locals[name]
def byte_LOAD_FAST(self, name):
if name in self.frame.f_locals:
val = self.frame.f_locals[name]
else:
raise UnboundLocalError(
"local variable '%s' referenced before assignment" % name
)
self.push(val)
def byte_STORE_FAST(self, name):
self.frame.f_locals[name] = self.pop()
def byte_DELETE_FAST(self, name):
del self.frame.f_locals[name]
def byte_LOAD_GLOBAL(self, name):
f = self.frame
if name in f.f_globals:
val = f.f_globals[name]
elif name in f.f_builtins:
val = f.f_builtins[name]
else:
raise NameError("global name '%s' is not defined" % name)
self.push(val)
def byte_LOAD_DEREF(self, name):
self.push(self.frame.cells[name].get())
def byte_STORE_DEREF(self, name):
self.frame.cells[name].set(self.pop())
def byte_LOAD_LOCALS(self):
self.push(self.frame.f_locals)
## Operators
UNARY_OPERATORS = {
'POSITIVE': operator.pos,
'NEGATIVE': operator.neg,
'NOT': operator.not_,
'CONVERT': repr,
'INVERT': operator.invert,
}
def unaryOperator(self, op):
x = self.pop()
self.push(self.UNARY_OPERATORS[op](x))
BINARY_OPERATORS = {
'POWER': pow,
'MULTIPLY': operator.mul,
'DIVIDE': getattr(operator, 'div', lambda x, y: None),
'FLOOR_DIVIDE': operator.floordiv,
'TRUE_DIVIDE': operator.truediv,
'MODULO': operator.mod,
'ADD': operator.add,
'SUBTRACT': operator.sub,
'SUBSCR': operator.getitem,
'LSHIFT': operator.lshift,
'RSHIFT': operator.rshift,
'AND': operator.and_,
'XOR': operator.xor,
'OR': operator.or_,
}
def binaryOperator(self, op):
x, y = self.popn(2)
self.push(self.BINARY_OPERATORS[op](x, y))
def inplaceOperator(self, op):
x, y = self.popn(2)
if op == 'POWER':
x **= y
elif op == 'MULTIPLY':
x *= y
elif op in ['DIVIDE', 'FLOOR_DIVIDE']:
x //= y
elif op == 'TRUE_DIVIDE':
x /= y
elif op == 'MODULO':
x %= y
elif op == 'ADD':
x += y
elif op == 'SUBTRACT':
x -= y
elif op == 'LSHIFT':
x <<= y
elif op == 'RSHIFT':
x >>= y
elif op == 'AND':
x &= y
elif op == 'XOR':
x ^= y
elif op == 'OR':
x |= y
else: # pragma: no cover
raise VirtualMachineError("Unknown in-place operator: %r" % op)
self.push(x)
def sliceOperator(self, op):
start = 0
end = None # we will take this to mean end
op, count = op[:-2], int(op[-1])
if count == 1:
start = self.pop()
elif count == 2:
end = self.pop()
elif count == 3:
end = self.pop()
start = self.pop()
l = self.pop()
if end is None:
end = len(l)
if op.startswith('STORE_'):
l[start:end] = self.pop()
elif op.startswith('DELETE_'):
del l[start:end]
else:
self.push(l[start:end])
COMPARE_OPERATORS = [
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
#lambda x, y: x in y,
symex.symbolic_in,
lambda x, y: x not in y,
#lambda x, y: x is y,
symex.symbolic_is,
lambda x, y: x is not y,
lambda x, y: issubclass(x, Exception) and issubclass(x, y),
]
def byte_COMPARE_OP(self, opnum):
x, y = self.popn(2)
self.push(self.COMPARE_OPERATORS[opnum](x, y))
## Attributes and indexing
def byte_LOAD_ATTR(self, attr):
obj = self.pop()
val = getattr(obj, attr)
self.push(val)
def byte_STORE_ATTR(self, name):
val, obj = self.popn(2)
setattr(obj, name, val)
def byte_DELETE_ATTR(self, name):
obj = self.pop()
delattr(obj, name)
def byte_STORE_SUBSCR(self):
val, obj, subscr = self.popn(3)
obj[subscr] = val
def byte_DELETE_SUBSCR(self):
obj, subscr = self.popn(2)
del obj[subscr]
## Building
def byte_BUILD_TUPLE(self, count):
elts = self.popn(count)
self.push(tuple(elts))
def byte_BUILD_LIST(self, count):
elts = self.popn(count)
self.push(elts)
def byte_BUILD_SET(self, count):
# TODO: Not documented in Py2 docs.
elts = self.popn(count)
self.push(set(elts))
def byte_BUILD_MAP(self, size):
# size is ignored.
self.push({})
def byte_STORE_MAP(self):
the_map, val, key = self.popn(3)
the_map[key] = val
self.push(the_map)
def byte_UNPACK_SEQUENCE(self, count):
seq = self.pop()
for x in reversed(seq):
self.push(x)
def byte_BUILD_SLICE(self, count):
if count == 2:
x, y = self.popn(2)
self.push(slice(x, y))
elif count == 3:
x, y, z = self.popn(3)
self.push(slice(x, y, z))
else: # pragma: no cover
raise VirtualMachineError("Strange BUILD_SLICE count: %r" % count)
def byte_LIST_APPEND(self, count):
val = self.pop()
the_list = self.peek(count)
the_list.append(val)
def byte_SET_ADD(self, count):
val = self.pop()
the_set = self.peek(count)
the_set.add(val)
def byte_MAP_ADD(self, count):
val, key = self.popn(2)
the_map = self.peek(count)
the_map[key] = val
## Printing
if 0: # Only used in the interactive interpreter, not in modules.
def byte_PRINT_EXPR(self):
print(self.pop())
def byte_PRINT_ITEM(self):
item = self.pop()
self.print_item(item)
def byte_PRINT_ITEM_TO(self):
to = self.pop()
item = self.pop()
self.print_item(item, to)
def byte_PRINT_NEWLINE(self):
self.print_newline()
def byte_PRINT_NEWLINE_TO(self):
to = self.pop()
self.print_newline(to)
def print_item(self, item, to=None):
if to is None:
to = sys.stdout
if to.softspace:
print(" ", end="", file=to)
to.softspace = 0
print(item, end="", file=to)
if isinstance(item, str):
if (not item) or (not item[-1].isspace()) or (item[-1] == " "):
to.softspace = 1
else:
to.softspace = 1
def print_newline(self, to=None):
if to is None:
to = sys.stdout
print("", file=to)
to.softspace = 0
## Jumps
def byte_JUMP_FORWARD(self, jump):
self.jump(jump)
def byte_JUMP_ABSOLUTE(self, jump):
self.jump(jump)
if 0: # Not in py2.7
def byte_JUMP_IF_TRUE(self, jump):
val = self.top()
if val:
self.jump(jump)
def byte_JUMP_IF_FALSE(self, jump):
val = self.top()
if not val:
self.jump(jump)
def byte_POP_JUMP_IF_TRUE_SYM(self, jump):
val = self.pop()
if isinstance(val, symex.SymbolicVar):
branch_cond = self.cur_interesting_path.pop(0)[1]
if branch_cond == 'TRUE':
val.isTrue()
self.jump(jump)
elif branch_cond == 'FALSE':
val.isFalse()
else:
import ipdb
ipdb.set_trace()
pass
else:
if val:
self.jump(jump)
def byte_POP_JUMP_IF_TRUE(self, jump):
if self.symbolic_on:
self.byte_POP_JUMP_IF_TRUE_SYM(jump)
else:
val = self.pop()
if val:
self.jump(jump)
def byte_POP_JUMP_IF_FALSE_SYM(self, jump):
val = self.pop()
if isinstance(val, symex.SymbolicVar):
branch_cond = self.cur_interesting_path.pop(0)[1]
if branch_cond == 'TRUE':
val.isTrue()
elif branch_cond == 'FALSE':
val.isFalse()
self.jump(jump)
else:
import ipdb
ipdb.set_trace()
pass
else:
if not val:
self.jump(jump)
def byte_POP_JUMP_IF_FALSE(self, jump):
if self.symbolic_on:
self.byte_POP_JUMP_IF_FALSE_SYM(jump)
else:
val = self.pop()
if not val:
self.jump(jump)
def byte_JUMP_IF_TRUE_OR_POP(self, jump):
val = self.top()
if val:
self.jump(jump)
else:
self.pop()
def byte_JUMP_IF_FALSE_OR_POP(self, jump):
val = self.top()
if not val:
self.jump(jump)
else:
self.pop()
## Blocks
def byte_SETUP_LOOP(self, dest):
self.push_block('loop', dest)
def byte_GET_ITER(self):
self.push(iter(self.pop()))
def byte_FOR_ITER(self, jump):
iterobj = self.top()
try:
v = next(iterobj)
self.push(v)
except StopIteration:
self.pop()
self.jump(jump)
def byte_BREAK_LOOP(self):
return 'break'
def byte_CONTINUE_LOOP(self, dest):
# This is a trick with the return value.
# While unrolling blocks, continue and return both have to preserve
# state as the finally blocks are executed. For continue, it's
# where to jump to, for return, it's the value to return. It gets
# pushed on the stack for both, so continue puts the jump destination
# into return_value.
self.return_value = dest
return 'continue'
def byte_SETUP_EXCEPT(self, dest):
self.push_block('setup-except', dest)
def byte_SETUP_FINALLY(self, dest):
self.push_block('finally', dest)
def byte_END_FINALLY(self):
v = self.pop()
if isinstance(v, str):
why = v
if why in ('return', 'continue'):
self.return_value = self.pop()
if why == 'silenced': # PY3
block = self.pop_block()
assert block.type == 'except-handler'
self.unwind_block(block)
why = None
elif v is None:
why = None
elif issubclass(v, BaseException):
exctype = v
val = self.pop()
tb = self.pop()
self.last_exception = (exctype, val, tb)
why = 'reraise'
else: # pragma: no cover
raise VirtualMachineError("Confused END_FINALLY")
return why
def byte_POP_BLOCK(self):
self.pop_block()
if PY2:
def byte_RAISE_VARARGS(self, argc):
# NOTE: the dis docs are completely wrong about the order of the
# operands on the stack!
exctype = val = tb = None
if argc == 0:
exctype, val, tb = self.last_exception
elif argc == 1:
exctype = self.pop()
elif argc == 2:
val = self.pop()
exctype = self.pop()
elif argc == 3:
tb = self.pop()
val = self.pop()
exctype = self.pop()
# There are a number of forms of "raise", normalize them somewhat.
if isinstance(exctype, BaseException):
val = exctype
exctype = type(val)
self.last_exception = (exctype, val, tb)
if tb:
return 'reraise'
else:
return 'exception'
elif PY3:
def byte_RAISE_VARARGS(self, argc):
cause = exc = None
if argc == 2:
cause = self.pop()
exc = self.pop()
elif argc == 1:
exc = self.pop()
return self.do_raise(exc, cause)
def do_raise(self, exc, cause):
if exc is None: # reraise
exc_type, val, tb = self.last_exception
if exc_type is None:
return 'exception' # error
else:
return 'reraise'
elif type(exc) == type:
# As in `raise ValueError`
exc_type = exc
val = exc() # Make an instance.
elif isinstance(exc, BaseException):
# As in `raise ValueError('foo')`
exc_type = type(exc)
val = exc
else:
return 'exception' # error
# If you reach this point, you're guaranteed that
# val is a valid exception instance and exc_type is its class.
# Now do a similar thing for the cause, if present.
if cause:
if type(cause) == type:
cause = cause()
elif not isinstance(cause, BaseException):
return 'exception' # error
val.__cause__ = cause
self.last_exception = exc_type, val, val.__traceback__
return 'exception'
def byte_POP_EXCEPT(self):
block = self.pop_block()
if block.type != 'except-handler':
raise Exception("popped block is not an except handler")
self.unwind_block(block)
def byte_SETUP_WITH(self, dest):
ctxmgr = self.pop()
self.push(ctxmgr.__exit__)
ctxmgr_obj = ctxmgr.__enter__()
if PY2:
self.push_block('with', dest)
elif PY3:
self.push_block('finally', dest)
self.push(ctxmgr_obj)
def byte_WITH_CLEANUP(self):
# The code here does some weird stack manipulation: the exit function
# is buried in the stack, and where depends on what's on top of it.
# Pull out the exit function, and leave the rest in place.
v = w = None
u = self.top()
if u is None:
exit_func = self.pop(1)
elif isinstance(u, str):
if u in ('return', 'continue'):
exit_func = self.pop(2)
else:
exit_func = self.pop(1)
u = None
elif issubclass(u, BaseException):
if PY2:
w, v, u = self.popn(3)
exit_func = self.pop()
self.push(w, v, u)
elif PY3:
w, v, u = self.popn(3)
tp, exc, tb = self.popn(3)
exit_func = self.pop()
self.push(tp, exc, tb)
self.push(None)
self.push(w, v, u)
block = self.pop_block()
assert block.type == 'except-handler'
self.push_block(block.type, block.handler, block.level-1)
else: # pragma: no cover
raise VirtualMachineError("Confused WITH_CLEANUP")
exit_ret = exit_func(u, v, w)
err = (u is not None) and bool(exit_ret)
if err:
# An error occurred, and was suppressed
if PY2:
self.popn(3)
self.push(None)
elif PY3:
self.push('silenced')
## Functions
def byte_MAKE_FUNCTION(self, argc):
if PY3:
name = self.pop()
else:
name = None
code = self.pop()
defaults = self.popn(argc)
globs = self.frame.f_globals
fn = Function(name, code, globs, defaults, None, self)
self.push(fn)
def byte_LOAD_CLOSURE(self, name):
self.push(self.frame.cells[name])
def byte_MAKE_CLOSURE(self, argc):
if PY3:
# TODO: the py3 docs don't mention this change.
name = self.pop()
else:
name = None
closure, code = self.popn(2)
defaults = self.popn(argc)
globs = self.frame.f_globals
fn = Function(name, code, globs, defaults, closure, self)
self.push(fn)
def byte_CALL_FUNCTION(self, arg):
return self.call_function(arg, [], {})
def byte_CALL_FUNCTION_VAR(self, arg):
args = self.pop()
return self.call_function(arg, args, {})
def byte_CALL_FUNCTION_KW(self, arg):
kwargs = self.pop()
return self.call_function(arg, [], kwargs)
def byte_CALL_FUNCTION_VAR_KW(self, arg):
args, kwargs = self.popn(2)
return self.call_function(arg, args, kwargs)
def call_function(self, arg, args, kwargs):
lenKw, lenPos = divmod(arg, 256)
namedargs = {}
for i in range(lenKw):
key, val = self.popn(2)
namedargs[key] = val
namedargs.update(kwargs)
posargs = self.popn(lenPos)
posargs.extend(args)
func = self.pop()
frame = self.frame
ffunc = func
if hasattr(func, 'im_func'):
# Methods get self as an implicit first parameter.
if func.im_self:
posargs.insert(0, func.im_self)
# The first parameter must be the correct type.
if not isinstance(posargs[0], func.im_class):
raise TypeError(
'unbound method %s() must be called with %s instance '
'as first argument (got %s instance instead)' % (
func.im_func.func_name,
func.im_class.__name__,
type(posargs[0]).__name__,
)
)
func = func.im_func
if self.symbolic_on:
if isinstance(func, Function):
func = func._func
if not hasattr(func, 'func_code'):
import ipdb
ipdb.set_trace()
retval = func(*posargs, **namedargs)
elif func.func_code in self.interesting_paths:
import ipdb
ipdb.set_trace()
# setup env from posargs and namedargs
decl = self.get_decl(func.func_code)
env = self.get_env(decl.parent_module.code_object)
func_args = inspect.getargspec(func)
argnames = func_args.args[:]
posargs_copy = posargs[:]
namedargs_copy = namedargs.copy()
defaults = func_args.defaults
defaults_copy = list(defaults) if defaults is not None else None
for name, var in zip(func_args.args, posargs):
env[name] = var
argnames.pop(0)
posargs_copy.pop(0)
# if all posargs were used up, use kwargs
for argname in argnames:
if argname in namedargs:
env[argname] = namedargs[argname]
namedargs_copy.pop(argname)
else:
env[argname] = defaults_copy.pop(0)
if func_args.varargs:
env[func_args.varargs] = []
for var in posargs_copy:
env[func_args.varargs].append(var)
if func_args.keywords:
env[func_args.keywords] = {}
for name, val in namedargs_copy.iteritems():
env[func_args.keywords][name] = val
# XXX(soh): handles closure
closures = func.func_closure or []
for closure in closures:
import ipdb
ipdb.set_trace()
cell_contents = closure.cell_contents
if not self.frame.cells:
self.frame.cells = {}
for var in func.func_code.co_freevars:
cell = Cell(cell_contents)
self.frame.cells[var] = cell
import ipdb
ipdb.set_trace()
retval = self.fork(func.func_code, f_globals=env)
else:
retval = func(*posargs, **namedargs)
else:
retval = func(*posargs, **namedargs)
self.push(retval)
def byte_RETURN_VALUE(self):
self.return_value = self.pop()
if self.frame.generator:
self.frame.generator.finished = True
return "return"
def byte_YIELD_VALUE(self):
self.return_value = self.pop()
return "yield"
def byte_YIELD_FROM(self):
u = self.pop()
x = self.top()
try:
if not isinstance(x, Generator) or u is None:
# Call next on iterators.
retval = next(x)
else:
retval = x.send(u)
self.return_value = retval
except StopIteration as e:
self.pop()
self.push(e.value)
else:
# YIELD_FROM decrements f_lasti, so that it will be called
# repeatedly until a StopIteration is raised.
self.jump(self.frame.f_lasti - 1)
# Returning "yield" prevents the block stack cleanup code
# from executing, suspending the frame in its current state.
return "yield"
## Importing
def byte_IMPORT_NAME(self, name):
level, fromlist = self.popn(2)
frame = self.frame
self.push(
__import__(name, frame.f_globals, frame.f_locals, fromlist, level)
)
def byte_IMPORT_STAR(self):
# TODO: this doesn't use __all__ properly.
mod = self.pop()
for attr in dir(mod):
if attr[0] != '_':
self.frame.f_locals[attr] = getattr(mod, attr)
def byte_IMPORT_FROM(self, name):
mod = self.top()
self.push(getattr(mod, name))
## And the rest...
def byte_EXEC_STMT(self):
stmt, globs, locs = self.popn(3)
six.exec_(stmt, globs, locs)
if PY2:
def byte_BUILD_CLASS(self):
name, bases, methods = self.popn(3)
self.push(type(name, bases, methods))
elif PY3:
def byte_LOAD_BUILD_CLASS(self):
# New in py3
self.push(__build_class__)
def byte_STORE_LOCALS(self):
self.frame.f_locals = self.pop()
if 0: # Not in py2.7
def byte_SET_LINENO(self, lineno):
self.frame.f_lineno = lineno
| mit | -8,876,592,344,558,702,000 | 30.180672 | 90 | 0.507479 | false |
jonboiser/content-curation | contentcuration/contentcuration/migrations/0059_auto_20170402_1504.py | 2 | 1570 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-04-02 22:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0058_auto_20170223_1636'),
]
operations = [
migrations.CreateModel(
name='ChannelResourceSize',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tree_id', models.IntegerField()),
('resource_size', models.IntegerField()),
],
options={
'db_table': 'contentcuration_channel_resource_sizes',
'managed': False,
},
),
migrations.AlterField(
model_name='formatpreset',
name='id',
field=models.CharField(choices=[(b'high_res_video', b'High Resolution'), (b'low_res_video', b'Low Resolution'), (b'vector_video', b'Vectorized'), (b'video_thumbnail', b'Thumbnail'), (b'video_subtitle', b'Subtitle'), (b'audio', b'Audio'), (b'audio_thumbnail', b'Thumbnail'), (b'document', b'Document'), (b'document_thumbnail', b'Thumbnail'), (b'exercise', b'Exercise'), (b'exercise_thumbnail', b'Thumbnail'), (b'exercise_image', b'Exercise Image'), (b'exercise_graphie', b'Exercise Graphie'), (b'channel_thumbnail', b'Channel Thumbnail'), (b'html5_zip', b'HTML5 Zip'), (b'html5_thumbnail', b'HTML5 Thumbnail')], max_length=150, primary_key=True, serialize=False),
),
]
| mit | -990,422,165,474,454,700 | 48.0625 | 674 | 0.603822 | false |
JianyuWang/neutron | neutron/tests/unit/agent/linux/test_async_process.py | 8 | 10752 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import signal
import eventlet.event
import eventlet.queue
import eventlet.timeout
import mock
import testtools
from neutron.agent.linux import async_process
from neutron.agent.linux import utils
from neutron.tests import base
class TestAsyncProcess(base.BaseTestCase):
def setUp(self):
super(TestAsyncProcess, self).setUp()
self.proc = async_process.AsyncProcess(['fake'])
def test_construtor_raises_exception_for_negative_respawn_interval(self):
with testtools.ExpectedException(ValueError):
async_process.AsyncProcess(['fake'], respawn_interval=-1)
def test__spawn(self):
expected_process = 'Foo'
proc = self.proc
with mock.patch.object(utils, 'create_process') as mock_create_process:
mock_create_process.return_value = [expected_process, None]
with mock.patch('eventlet.spawn') as mock_spawn:
proc._spawn()
self.assertIsInstance(proc._kill_event, eventlet.event.Event)
self.assertEqual(proc._process, expected_process)
mock_spawn.assert_has_calls([
mock.call(proc._watch_process,
proc._read_stdout,
proc._kill_event),
mock.call(proc._watch_process,
proc._read_stderr,
proc._kill_event),
])
self.assertEqual(len(proc._watchers), 2)
def test__handle_process_error_kills_with_respawn(self):
with mock.patch.object(self.proc, '_kill') as kill:
self.proc._handle_process_error()
kill.assert_has_calls([mock.call(signal.SIGKILL, respawning=False)])
def test__handle_process_error_kills_without_respawn(self):
self.proc.respawn_interval = 1
with mock.patch.object(self.proc, '_kill') as kill:
with mock.patch.object(self.proc, '_spawn') as spawn:
with mock.patch('eventlet.sleep') as sleep:
self.proc._handle_process_error()
kill.assert_has_calls([mock.call(signal.SIGKILL, respawning=True)])
sleep.assert_has_calls([mock.call(self.proc.respawn_interval)])
spawn.assert_called_once_with()
def _test__watch_process(self, callback, kill_event):
self.proc._kill_event = kill_event
# Ensure the test times out eventually if the watcher loops endlessly
with eventlet.timeout.Timeout(5):
with mock.patch.object(self.proc,
'_handle_process_error') as func:
self.proc._watch_process(callback, kill_event)
if not kill_event.ready():
func.assert_called_once_with()
def test__watch_process_exits_on_callback_failure(self):
self._test__watch_process(lambda: None, eventlet.event.Event())
def test__watch_process_exits_on_exception(self):
def foo():
raise Exception('Error!')
self._test__watch_process(foo, eventlet.event.Event())
def test__watch_process_exits_on_sent_kill_event(self):
kill_event = eventlet.event.Event()
kill_event.send()
self._test__watch_process(None, kill_event)
def _test_read_output_queues_and_returns_result(self, output):
queue = eventlet.queue.LightQueue()
mock_stream = mock.Mock()
with mock.patch.object(mock_stream, 'readline') as mock_readline:
mock_readline.return_value = output
result = self.proc._read(mock_stream, queue)
if output:
self.assertEqual(output, result)
self.assertEqual(output, queue.get_nowait())
else:
self.assertFalse(result)
self.assertTrue(queue.empty())
def test__read_queues_and_returns_output(self):
self._test_read_output_queues_and_returns_result('foo')
def test__read_returns_none_for_missing_output(self):
self._test_read_output_queues_and_returns_result('')
def test_start_raises_exception_if_process_already_started(self):
self.proc._kill_event = True
with testtools.ExpectedException(async_process.AsyncProcessException):
self.proc.start()
def test_start_invokes__spawn(self):
with mock.patch.object(self.proc, '_spawn') as mock_start:
self.proc.start()
mock_start.assert_called_once_with()
def test__iter_queue_returns_empty_list_for_empty_queue(self):
result = list(self.proc._iter_queue(eventlet.queue.LightQueue(),
False))
self.assertEqual(result, [])
def test__iter_queue_returns_queued_data(self):
queue = eventlet.queue.LightQueue()
queue.put('foo')
result = list(self.proc._iter_queue(queue, False))
self.assertEqual(result, ['foo'])
def _test_iter_output_calls_iter_queue_on_output_queue(self, output_type):
expected_value = 'foo'
with mock.patch.object(self.proc, '_iter_queue') as mock_iter_queue:
mock_iter_queue.return_value = expected_value
target_func = getattr(self.proc, 'iter_%s' % output_type, None)
value = target_func()
self.assertEqual(value, expected_value)
queue = getattr(self.proc, '_%s_lines' % output_type, None)
mock_iter_queue.assert_called_with(queue, False)
def test_iter_stdout(self):
self._test_iter_output_calls_iter_queue_on_output_queue('stdout')
def test_iter_stderr(self):
self._test_iter_output_calls_iter_queue_on_output_queue('stderr')
def _test__kill(self, respawning, pid=None):
with mock.patch.object(self.proc, '_kill_event'
) as mock_kill_event,\
mock.patch.object(utils, 'get_root_helper_child_pid',
return_value=pid),\
mock.patch.object(self.proc, '_kill_process'
) as mock_kill_process,\
mock.patch.object(self.proc, '_process'):
self.proc._kill(signal.SIGKILL, respawning)
if respawning:
self.assertIsNotNone(self.proc._kill_event)
else:
self.assertIsNone(self.proc._kill_event)
mock_kill_event.send.assert_called_once_with()
if pid:
mock_kill_process.assert_called_once_with(pid, signal.SIGKILL)
def test__kill_when_respawning_does_not_clear_kill_event(self):
self._test__kill(True)
def test__kill_when_not_respawning_clears_kill_event(self):
self._test__kill(False)
def test__kill_targets_process_for_pid(self):
self._test__kill(False, pid='1')
def _test__kill_process(self, pid, expected, exception_message=None,
kill_signal=signal.SIGKILL):
self.proc.run_as_root = True
if exception_message:
exc = RuntimeError(exception_message)
else:
exc = None
with mock.patch.object(utils, 'execute',
side_effect=exc) as mock_execute:
actual = self.proc._kill_process(pid, kill_signal)
self.assertEqual(expected, actual)
mock_execute.assert_called_with(['kill', '-%d' % kill_signal, pid],
run_as_root=self.proc.run_as_root)
def test__kill_process_returns_true_for_valid_pid(self):
self._test__kill_process('1', True)
def test__kill_process_returns_true_for_stale_pid(self):
self._test__kill_process('1', True, 'No such process')
def test__kill_process_returns_false_for_execute_exception(self):
self._test__kill_process('1', False, 'Invalid')
def test_kill_process_with_different_signal(self):
self._test__kill_process('1', True, kill_signal=signal.SIGTERM)
def test_stop_calls_kill_with_provided_signal_number(self):
self.proc._kill_event = True
with mock.patch.object(self.proc, '_kill') as mock_kill:
self.proc.stop(kill_signal=signal.SIGTERM)
mock_kill.assert_called_once_with(signal.SIGTERM)
def test_stop_raises_exception_if_already_started(self):
with testtools.ExpectedException(async_process.AsyncProcessException):
self.proc.stop()
def test_cmd(self):
for expected, cmd in (('ls -l file', ['ls', '-l', 'file']),
('fake', ['fake'])):
proc = async_process.AsyncProcess(cmd)
self.assertEqual(expected, proc.cmd)
class TestAsyncProcessLogging(base.BaseTestCase):
def setUp(self):
super(TestAsyncProcessLogging, self).setUp()
self.log_mock = mock.patch.object(async_process, 'LOG').start()
def _test__read_stdout_logging(self, enable):
proc = async_process.AsyncProcess(['fakecmd'], log_output=enable)
with mock.patch.object(proc, '_read', return_value='fakedata'),\
mock.patch.object(proc, '_process'):
proc._read_stdout()
self.assertEqual(enable, self.log_mock.debug.called)
def _test__read_stderr_logging(self, enable):
proc = async_process.AsyncProcess(['fake'], log_output=enable)
with mock.patch.object(proc, '_read', return_value='fakedata'),\
mock.patch.object(proc, '_process'):
proc._read_stderr()
self.assertEqual(enable, self.log_mock.error.called)
def test__read_stdout_logging_enabled(self):
self._test__read_stdout_logging(enable=True)
def test__read_stdout_logging_disabled(self):
self._test__read_stdout_logging(enable=False)
def test__read_stderr_logging_enabled(self):
self._test__read_stderr_logging(enable=True)
def test__read_stderr_logging_disabled(self):
self._test__read_stderr_logging(enable=False)
class TestAsyncProcessDieOnError(base.BaseTestCase):
def test__read_stderr_returns_none_on_error(self):
proc = async_process.AsyncProcess(['fakecmd'], die_on_error=True)
with mock.patch.object(proc, '_read', return_value='fakedata'),\
mock.patch.object(proc, '_process'):
self.assertIsNone(proc._read_stderr())
| apache-2.0 | -7,487,848,878,019,811,000 | 38.97026 | 79 | 0.624442 | false |
rickerc/cinder_audit | cinder/tests/db/fakes.py | 3 | 1434 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Stubouts, mocks and fixtures for the test suite."""
from cinder import db
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
if key in self.values:
return self.values[key]
else:
raise NotImplementedError()
def __repr__(self):
return '<FakeModel: %s>' % self.values
def stub_out(stubs, funcs):
"""Set the stubs in mapping in the db api."""
for func in funcs:
func_name = '_'.join(func.__name__.split('_')[1:])
stubs.Set(db, func_name, func)
| apache-2.0 | 7,077,990,607,924,352,000 | 30.173913 | 78 | 0.654114 | false |
aljim/deploymentmanager-samples | community/hierarchical_configuration/Organization_with_departments/systems/System_with_project_creation_and_helper_function/templates/simple_frontend.py | 4 | 2226 | from helper import config_merger
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
def GlobalComputeUrl(project, collection, name):
return ''.join([
COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/',
name
])
def ZonalComputeUrl(project, zone, collection, name):
return ''.join([
COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/',
collection, '/', name
])
def GenerateConfig(context):
"""Generate configuration."""
module = "frontend"
cc = config_merger.ConfigContext(context.properties, module)
name_prefix = cc.configs["Org_Name"] + cc.configs["ProjectAbrevation"] + context.properties["envName"] + module
i_name_prefix = cc.configs["Org_Name"] + "-" + cc.configs["ProjectAbrevation"] + "-" + context.properties["envName"] + module
instance = {
'zone':
cc.configs['zone'],
'machineType':
ZonalComputeUrl(context.env['project'], cc.configs['zone'],
'machineTypes',
cc.configs['Instance']['InstanceType']),
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'autoDelete': True,
'boot': True,
'initializeParams': {
'diskName':
name_prefix.replace(' ', '-').lower() + '-disk',
'sourceImage':
GlobalComputeUrl(cc.configs['Instance']['OSFamily'],
'images/family',
cc.configs['Instance']['OSVersion'])
},
}],
'networkInterfaces': [{
'accessConfigs': [{
'name': 'external-nat',
'type': 'ONE_TO_ONE_NAT'
}],
'network':
GlobalComputeUrl(context.env['project'], 'networks', 'default')
}]
}
# Resources to return.
resources = {
'resources': [{
'name': i_name_prefix.replace(' ', '-').lower() + '-i',
'type': 'compute.v1.instance',
'properties': instance
}]
}
return resources
| apache-2.0 | 4,018,374,107,771,386,000 | 30.735294 | 129 | 0.493711 | false |
agua/aguadev | bin/scripts/resources/starcluster/plugins/sge.py | 1 | 27762 | #!/usr/bin/env python
import os
import re
import string
import sys
import time
import posixpath
import subprocess
etchosts_template = """127.0.0.1 localhost
# The following lines are desirable for IPv6 capable hosts ::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts
"""
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
from starcluster import utils
class NullDevice():
def write(self, s):
pass
class CreateCell (ClusterSetup):
"""
Configure a custom SGE cell for a StarCluster cluster
"""
def __init__(self, privatekey, publiccert, cell, execdport, qmasterport, root, slots):
log.info("Loaded plugin: sge.CreateCell")
log.debug("sge.CreateCell.__init__ Initialising CreateCell plugin.")
log.debug("sge.CreateCell.__init__ privatekey %s" % privatekey)
log.debug("sge.CreateCell.__init__ publiccert %s" % publiccert)
log.debug("sge.CreateCell.__init__ cell %s" % cell)
log.debug("sge.CreateCell.__init__ execdport %s" % execdport)
log.debug("sge.CreateCell.__init__ qmasterport %s" % qmasterport)
log.debug("sge.CreateCell.__init__ root %s" % root)
log.debug("sge.CreateCell.__init__ slots %s" % slots)
self.headgroup = "default"
self.privatekey = privatekey
self.publiccert = publiccert
self.cell = cell
self.execdport = execdport
self.qmasterport = qmasterport
self.root = root
self.slots = slots
""" SET HEAD NODE'S ROOT PATH TO SGE BINARIES """
rootpath = os.environ['ROOTPATH'];
rootpath = re.sub(r'^.', '', rootpath)
log.info("rootpath: %s", rootpath)
self.rootpath = rootpath
os.environ['SGE_ROOT'] = root
os.environ['SGE_CELL'] = cell
os.environ['SGE_QMASTER_PORT'] = qmasterport
os.environ['SGE_EXECD_PORT'] = execdport
def run(self, nodes, master, user, user_shell, volumes):
"""
Mount NFS shares on master and all nodes
"""
##### OPEN NEW PORTS ON EC2 ON HEAD
self.openSgePorts()
#### CREATE NEW CELL DIRECTORY ON HEAD AND MASTER/NODES
self.copyCellOnHead()
self.copyCell(master)
#### SET MASTER HOSTNAME AS INTERNAL IP
self.setMasterHostname(master)
#### SET HEADNODE HOSTNAME AS INTERNAL IP
self.setHeadHostname()
#### SET MASTER act_qmaster AS MASTER INTERNAL IP
self.setMasterActQmaster(master)
#### SET MASTER INTERNAL IP IN /etc/hosts
self.setMasterEtcHosts(master)
#### START SGE ON MASTER
self.restartSge(master)
#### ADD ENVIRONMENT VARIABLES TO /etc/profile ON MASTER/NODES
for node in nodes:
self.addEnvarsToProfile(node)
#### SET MASTER AS SUBMIT AND ADMIN HOST
self.setMasterSubmit(master)
#### SET HEADNODE qmaster_info AS QUICK LOOKUP FOR MASTER INFO
self.setMasterInfo(master)
#### SET MASTER'S IP ADDRESS IN act_qmaster FILE ON HEAD
self.updateHeadActQmaster(master)
#### SET HEAD AS SUBMIT AND ADMIN HOST
self.setHeadSubmit(master)
#### INSTEAD OF 'master', USE MASTER INTERNAL IP IN @allhosts
self.addMasterToAllHosts(master)
##### RESTART SGE ON MASTER/NODES
for node in nodes:
self.restartSge(node)
#### SCHEDULING INFO
self.enableSchedulingInfo()
#### ADD threaded PARALLEL ENVIRONMENT ON MASTER
self.addParallelEnvironment(master)
#### ADD NODES TO @allhosts GROUP
for node in nodes:
if node.alias != "master":
self.addToAllhosts(node, master)
##### RESTART SGE ON MASTER/NODES
for node in nodes:
self.restartSge(node)
#### REMOVE DEFAULT all.q QUEUE
self.removeAllq()
log.info("Completed plugin sge")
def getRootPath(self, node):
instanceid = node.ssh.execute("curl -s http://169.254.169.254/latest/meta-data/instance-id")
command = 'grep ROOTPATH /etc/profile.d/sge.sh'
rootpath = node.ssh.execute(command)
log.debug("sge.CreateCell.getRootPath rootpath: %s", rootpath)
entry = rootpath[0]
# FORMAT: ['export ROOTPATH="$ROOTPATH:$SGE_ROOT/bin/lx-x64"']
match = re.search('SGE_ROOT\/([^"]+?)"', entry)
rootpath = match.group(1)
rootpath = "/opt/sge6/" + rootpath
return rootpath
def openSgePorts(self):
"""
Open the particular SGE qmaster and execd daemon ports for this cluster
"""
log.info("Opening SGE qmaster and execd ports")
qmasterport = self.qmasterport
execdport = self.execdport
cluster = self.cell
envars = self.exportEnvironmentVars()
log.debug("sge.CreateCell.openSgePorts qmasterport; %s", qmasterport)
log.debug("sge.CreateCell.openSgePorts execdport; %s", execdport)
log.debug("sge.CreateCell.openSgePorts envars; %s", envars)
#### SET EC2 KEY FILE ENVIRONMENT VARIABLES
ec2vars = "export EC2_PRIVATE_KEY=" + self.privatekey + "; "
ec2vars += "export EC2_CERT=" + self.publiccert + "; "
# HEAD NODE (I.E., NOT MASTER OR NODE)
commands = [
ec2vars + 'ec2-authorize @sc-' + cluster + ' -p ' + execdport + ' -P tcp',
ec2vars + 'ec2-authorize @sc-' + cluster + ' -p ' + execdport + ' -P udp',
ec2vars + 'ec2-authorize @sc-' + cluster + ' -p ' + qmasterport + ' -P tcp',
ec2vars + 'ec2-authorize @sc-' + cluster + ' -p ' + qmasterport + ' -P udp',
ec2vars + 'ec2-authorize ' + self.headgroup + ' -p ' + execdport + ' -P tcp',
ec2vars + 'ec2-authorize ' + self.headgroup + ' -p ' + execdport + ' -P udp',
ec2vars + 'ec2-authorize ' + self.headgroup + ' -p ' + qmasterport + ' -P tcp',
ec2vars + 'ec2-authorize ' + self.headgroup + ' -p ' + qmasterport + ' -P udp'
]
for command in commands:
self.runSystemCommand(command);
def runSystemCommand(self, command):
log.info(command)
os.system(command)
def setMasterActQmaster(self, master):
"""
Set master hostname as INTERNAL IP to disambiguate from other
cluster 'master' nodes given multiple clusters
"""
log.info("Setting act_qmaster file contents")
hostname = self.getHostname(master)
act_qmaster = self.root + "/" + self.cell + "/common/act_qmaster"
command = "echo '" + hostname + "' > " + act_qmaster
log.debug("sge.CreateCell.setMasterActQmaster command: %s", command)
master.ssh.execute(command)
def setMasterHostname(self, master):
"""
Set master hostname as internal IP to disambiguate
from other 'master' nodes given multiple clusters
"""
log.info("Setting master hostname")
hostname = self.getHostname(master)
command = "hostname " + hostname
log.info("sge.CreateCell.setMasterHostname command: %s", command)
master.ssh.execute(command)
command = "echo '" + hostname + "' > /etc/hostname"
log.info("sge.CreateCell.setMasterHostname command: %s", command)
master.ssh.execute(command)
def setHeadHostname(self):
"""
Set master hostname as internal IP to disambiguate
from other 'master' nodes given multiple clusters
"""
log.info("Setting headnode hostname")
command = "curl -s http://169.254.169.254/latest/meta-data/local-hostname"
hostname = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).stdout.read()
log.info("sge.CreateCell.setHeadnodeHostname hostname: %s", hostname)
command = "hostname " + hostname
log.info("sge.CreateCell.setHeadnodeHostname command: %s", command)
os.system(command)
command = "echo '" + hostname + "' > /etc/hostname"
log.info("sge.CreateCell.setHeadnodeHostname command: %s", command)
os.system(command)
def getHostname(self, master):
log.info("sge.CreateCell.getHostname returning hostname: %s", master.private_dns_name)
return master.private_dns_name
def setMasterEtcHosts (self, master):
log.info("Adding master hostname to own /etc/hosts")
envars = self.exportEnvironmentVars()
command = "cat /etc/hosts"
log.debug("sge.CreateCell.setMasterEtcHosts command: %s" % command)
etchosts = etchosts_template
ip_address = master.ip_address
dns_name = master.dns_name
insert = master.private_ip_address
insert += "\t"
insert += self.getHostname(master)
insert += "\t"
insert += "localhost"
etchosts += insert + "\n"
log.debug("sge.CreateCell.setMasterEtcHosts AFTER etchosts: %s", etchosts)
etchosts_file = master.ssh.remote_file("/etc/hosts")
print >> etchosts_file, etchosts
etchosts_file.close()
# DEPRECATED:
#command = "/etc/init.d/networking restart"
command = "sh -c \"ifdown eth0 && ifup eth0\""
log.debug("sge.CreateCell.setMasterEtcHosts command: %s", command)
result = master.ssh.execute(command)
log.debug("sge.CreateCell.setMasterEtcHosts result: %s", result)
def setMasterSubmit(self, master):
hostname = self.getHostname(master)
envars = self.exportEnvironmentVars()
rootpath = self.getRootPath(master)
log.info("CreateCell.setMasterSubmit rootpath: %s", rootpath)
add_submit = envars + rootpath + '/qconf -as ' + hostname
add_admin = envars + rootpath + '/qconf -ah ' + hostname
log.debug("sge.CreateCell.setMasterSubmit add_submit: %s", add_submit)
master.ssh.execute(add_submit)
log.debug("sge.CreateCell.setMasterSubmit add_admin: %s", add_admin)
master.ssh.execute(add_admin)
def addMasterToAllHosts (self, master):
log.info("sge.CreateCell.addMasterToAllHosts Replacing 'master' with master INTERNAL IP in @allhosts")
envars = self.exportEnvironmentVars()
command = envars + self.rootpath + "/qconf -shgrp @allhosts"
log.info("sge.CreateCell.addMasterToAllHosts command: %s" % command)
allhosts_template = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).stdout.read()
log.info("sge.CreateCell.addMasterToAllHosts BEFORE allhosts_template: %s", allhosts_template)
#### GET hostname
hostname = self.getHostname(master)
#### REMOVE master AND hostname IF EXISTS
match = "master"
allhosts_template = string.replace(allhosts_template, match, '')
allhosts_template = string.replace(allhosts_template, hostname, '')
#### ADD hostname
allhosts_template = allhosts_template.strip('\s\n\r')
allhosts_template += " " + hostname
#allhosts_template = re.sub('\s+$/s', '', allhosts_template)
log.info("sge.CreateCell.addMasterToAllHosts AFTER allhosts_template: %s", allhosts_template)
filename = "/tmp/" + self.cell + "-allhosts.txt"
allhosts_file = open(filename, 'w')
print >> allhosts_file, allhosts_template
allhosts_file.close()
log.info("sge.CreateCell.addMasterToAllHosts printed filename: %s", filename)
set_command = envars + self.rootpath + "/qconf -Mhgrp " + filename
log.info("sge.CreateCell.addMasterToAllHosts set_command: %s" % set_command)
os.system(set_command)
def addToAllhosts(self, node, master):
"""
Add host to @allhosts group to enable it to be an execution host
"""
log.info("Add %s to @allhosts group", node.alias)
os.environ['SGE_ROOT'] = self.root
os.environ['SGE_CELL'] = self.cell
os.environ['SGE_QMASTER_PORT'] = self.qmasterport
os.environ['SGE_EXECD_PORT'] = self.execdport
rootpath = self.getRootPath(master)
log.info("CreateCell.addToAllhosts rootpath: %s", rootpath)
hostname = node.alias
#if node.alias == "master":
# hostname = self.getHostname(master)
command = rootpath + "/qconf -aattr hostgroup hostlist " + hostname + " @allhosts >> /tmp/allhosts.out; "
log.info("sge.addToAllhosts command: %s", command)
envars = self.exportEnvironmentVars()
original_stdout = sys.stdout
sys.stdout = NullDevice()
master.ssh.execute(envars + command)
sys.stdout = original_stdout
def setHeadSubmit(self, master):
"""
Add head node to submit hosts and admin hosts lists
"""
log.info("Adding head node to submit hosts and admin hosts lists")
#### SET HEAD NODE INTERNAL IP
self.getHeadIp();
envars = self.exportEnvironmentVars()
rootpath = self.getRootPath(master)
log.info("CreateCell.setHeadSubmit rootpath: %s", rootpath)
add_submit = envars + rootpath + '/qconf -as ' + head_ip
add_admin = envars + rootpath + '/qconf -ah ' + head_ip
log.debug("sge.CreateCell.setHeadSubmit %s", add_submit)
master.ssh.execute(add_submit)
log.debug("sge.CreateCell.setHeadSubmit %s", add_admin)
master.ssh.execute(add_admin)
def getHeadIp(self):
log.info("sge.CreateCell.getHeadIp Getting headnode internal IP")
p = os.popen('curl -s http://169.254.169.254/latest/meta-data/instance-id');
instanceid = p.read()
log.debug("sge.CreateCell.getHeadIp instanceid: %s" % instanceid)
command = "ec2-describe-instances -K " + self.privatekey \
+ " -C " + self.publiccert \
+ " " + instanceid
log.debug("sge.CreateCell.getHeadIp command: %s" % command)
p = os.popen(command);
reservation = p.read()
log.debug("sge.CreateCell.getHeadIp reservation: %s" % reservation)
instance = reservation.split("INSTANCE")[1];
log.debug("sge.CreateCell.getHeadIp instance: %s" % instance)
instanceRow = instance.split('\t')
self.head_ip = instanceRow[17]
log.info("sge.CreateCell.getHeadIp self.head_ip: %s" % self.head_ip)
def removeAllq (self):
"""
Delete default 'all.q' queue
"""
log.info("sge.CreateCell.removeAllq Removing the default 'all.q' queue")
envars = self.exportEnvironmentVars()
command = envars + self.rootpath + "/qconf -dq all.q"
log.debug("sge.CreateCell.removeAllq command: %s" % command)
os.system(command)
def addEnvarsToProfile(self, node):
"""
Add environment variables (SGE_CELL, ports, etc.) to /etc/profile
"""
log.info("Adding environment variables to /etc/profile")
envars = self.exportEnvironmentVars();
log.debug("sge.CreateCell.addEnvarsToProfile envars: echo '%s' >> /etc/profile", envars)
node.ssh.execute("echo '" + envars + "' >> /etc/profile")
def enableSchedulingInfo(self):
"""
Enable job scheduling info output for 'qstat -j'
"""
log.info("Enabling job scheduling info")
envars = self.exportEnvironmentVars()
log.debug(envars + self.rootpath + "/qconf -ssconf")
queue_template = subprocess.Popen(envars + self.rootpath + "/qconf -ssconf", stdout=subprocess.PIPE, shell=True).stdout.read()
log.debug("sge.CreateCell.enableSchedulingInfo BEFORE queue_template: %s", queue_template)
match = "schedd_job_info false"
insert = "schedd_job_info true"
queue_template = string.replace(queue_template, match, insert)
log.debug("sge.CreateCell.enableSchedulingInfo AFTER queue_template: %s", queue_template)
pid = os.getpid()
filename = "/tmp/queue-" + str(os.getpid()) + ".txt"
queue_file = open(filename, 'w')
print >> queue_file, queue_template
queue_file.close()
cmd = envars + self.rootpath + "/qconf -Msconf " + filename
log.debug(cmd)
os.system(cmd)
remove = "rm -fr " + filename
log.debug(remove)
os.system(remove)
def addParallelEnvironment(self, master):
"""
Add 'threaded' parallel environment
"""
log.info("Adding 'threaded' parallel environment")
sge_pe_template = """
pe_name threaded
slots %s
user_lists NONE
xuser_lists NONE
start_proc_args /bin/true
stop_proc_args /bin/true
allocation_rule $pe_slots
control_slaves TRUE
job_is_first_task FALSE
urgency_slots min
accounting_summary FALSE
"""
log.debug("addParallelEnvironment sge_pe_template: %s", sge_pe_template)
#### PRINT TEMPLATE FILE
pe_file = master.ssh.remote_file("/tmp/pe.txt")
print >> pe_file, sge_pe_template % 99999
pe_file.close()
envars = self.exportEnvironmentVars()
rootpath = self.getRootPath(master)
log.debug("CreateCell.addParallelEnvironment rootpath: %s", rootpath)
master.ssh.execute(envars + rootpath + "/qconf -Ap %s &> /tmp/pe.out" % pe_file.name)
master.ssh.execute(envars + rootpath + '/qconf -mattr queue pe_list "threaded" all.q &> /tmp/pe2q.out')
def setHeadSubmit(self, master):
"""
Add head node to submit and admin hosts lists on master
"""
log.info("Adding head node to submit hosts and admin hosts lists")
#### SET HEAD NODE INTERNAL IP
self.getHeadIp();
envars = self.exportEnvironmentVars()
rootpath = self.getRootPath(master)
log.info("CreateCell.setHeadSubmit rootpath: %s", rootpath)
add_submit = envars + rootpath + '/qconf -as ' + self.head_ip
add_admin = envars + rootpath + '/qconf -ah ' + self.head_ip
log.info("sge.CreateCell.setHeadSubmit %s", add_submit)
master.ssh.execute(add_submit)
log.info("sge.CreateCell.setHeadSubmit %s", add_admin)
master.ssh.execute(add_admin)
def restartSge(self, node):
"""
Restart SGE qmaster (master) and execd (master + nodes) daemons
"""
log.info("Restarting SGE qmaster and execd daemons")
rootpath = self.getRootPath(node)
log.debug("CreateCell.restartSge rootpath: %s", rootpath)
envars = self.exportEnvironmentVars()
stop_execd = envars + rootpath + '/qconf -ke all'
stop_qmaster = envars + rootpath + '/qconf -km'
start_qmaster = envars + rootpath + '/sge_qmaster'
start_execd = envars + rootpath + '/sge_execd'
sleep = 1
log.debug("sge.CreateCell.restartSge Doing RESTART SGE: %s (%s)", node.alias, node.private_ip_address)
#### KILL ANY LINGERING TERMINATED PROCESSES
killall = "/bin/ps aux | grep sgeadmin | cut -c9-14 | xargs -n1 -iPID /bin/kill -9 PID &> /dev/null"
log.debug(killall)
node.ssh.execute(killall, True, False, True)
killall = "/bin/ps aux | grep root | grep sge | cut -c9-14 | xargs -n1 -iPID /bin/kill -9 PID &> /dev/null"
log.debug(killall)
node.ssh.execute(killall, True, False, True)
log.debug("sge.CreateCell.restartSge node.alias: %s", node.alias)
if node.alias == "master":
time.sleep(float(sleep))
log.debug("sge.CreateCell.restartSge %s", start_qmaster)
node.ssh.execute(start_qmaster)
log.debug("sge.CreateCell.restartSge %s", start_execd)
node.ssh.execute(start_execd)
def settingsCommand(self):
target = self.root + "/" + self.cell + "/common"
cmd = 'cd ' + target + '; '
cmd += self.exportEnvironmentVars()
cmd += self.root + '/util/create_settings.sh ' + target
log.debug("sge.CreateCell.createSettings cmd: %s", cmd)
return cmd
def createSettings(self, node):
"""
Generate settings.sh file containing SGE_CELL, SGE_ROOT and port info
"""
log.info("Generating settings.sh file")
log.debug("sge.CreateCell.createSettings CreateCell.createSettings(master)")
cmd = self.settingsCommand()
log.debug("sge.CreateCell.createSettings cmd: %s", cmd)
node.ssh.execute(cmd)
def exportEnvironmentVars(self):
vars = 'export SGE_ROOT=' + self.root + '; '
vars += 'export SGE_CELL=' + self.cell + '; '
vars += 'export SGE_QMASTER_PORT=' + self.qmasterport + '; '
vars += 'export SGE_EXECD_PORT=' + self.execdport + '; '
return vars
def updateHeadIp(self):
"""
Set hostname as head_ip (in case has changed due to reboot)
"""
log.info("Updating hostname on head node")
log.debug("sge.CreateCell.updateHeadIp self.head_long_ip: %s", self.head_long_ip)
cmd = "hostname " + self.head_long_ip
log.debug("sge.CreateCell.updateHeadIp cmd: %s", cmd)
os.system(cmd)
def updateHeadActQmaster(self, master):
"""
Replace 'master' with 'ip-XXX-XXX-XXX-XXX' hostname in act_qmaster file
"""
log.info("Updating act_qmaster file")
log.debug("sge.CreateCell.updateHeadActQmaster CreateCell.updateHeadActQmaster(nodes)")
target = self.root + "/" + self.cell
act_qmaster = target + "/common/act_qmaster"
log.debug("sge.CreateCell.updateHeadActQmaster act_qmaster: %s", act_qmaster)
hostname = self.getHostname(master)
log.debug("sge.CreateCell.updateHeadActQmaster hostname: %s", hostname)
cmd = "echo '" + hostname + "' > " + act_qmaster
log.debug("sge.CreateCell.updateHeadActQmaster cmd: %s", cmd)
os.system(cmd)
def setMasterInfo(self, master):
"""
Set ip, dns name and instance ID in 'qmaster info' file
"""
target = self.root + "/" + self.cell
qmaster_info = target + "/qmaster_info"
log.info("Setting qmaster_info file: %s", qmaster_info)
instanceid = master.ssh.execute("curl -s http://169.254.169.254/latest/meta-data/instance-id")
log.info("CreateCell.setMasterInfo instanceid: %s", instanceid)
cmd = "echo '" + master.private_ip_address + "\t" \
+ master.private_dns_name + "\t" \
+ instanceid[0] + "' > " + qmaster_info
log.info("CreateCell.setMasterInfo cmd: %s", cmd)
os.system(cmd)
def copyCellCommands(self):
source = self.root + "/default"
target = self.root + "/" + self.cell
return (
'mkdir ' + target + ' &> /dev/null',
'rsync -a ' + source + "/* " + target + " --exclude *tar.gz",
'chown -R sgeadmin:sgeadmin ' + target
)
def copyCellOnHead(self):
"""
Copy cell dir from default dir
"""
log.info("Copying cell directory on head node")
log.debug("sge.CreateCell.copyCellOnHead CreateCell.copyCellOnHead()")
commands = self.copyCellCommands()
log.debug("sge.CreateCell.copyCellOnHead commands: %s", commands)
target = self.root + "/" + self.cell
log.debug("sge.CreateCell.copyCell target: %s", target)
log.debug("sge.CreateCell.copyCellOnHead os.path.isdir(target): %s", os.path.isdir(target))
if not os.path.isdir(target):
for command in commands:
log.info(command)
os.system(command)
##### CREATE NEW settings.sh FILE
command = self.settingsCommand()
log.info(command)
os.system(command)
def copyCell(self, node):
"""
Copy cell dir from default dir
"""
log.info("Copying cell directory on %s", node.alias)
log.debug("sge.CreateCell.copyCell CreateCell.copyCell(" + node.alias + ")")
commands = self.copyCellCommands()
log.debug("sge.CreateCell.copyCell commands: %s", commands)
target = self.root + "/" + self.cell
log.debug("sge.CreateCell.copyCell target: %s", target)
log.debug("sge.CreateCell.copyCell os.path.isdir(target): %s", os.path.isdir(target))
#if not os.path.isdir(target):
for command in commands:
log.info(command)
node.ssh.execute(command, True, False, True)
#### PAUSE TO ALLOW FILE SYSTEM TO CATCH UP
time.sleep(2)
##### CREATE NEW settings.sh FILE
command = self.settingsCommand()
log.info("Creating settings.sh file")
log.info(command)
os.system(command)
def on_add_node(self, node, nodes, master, user, user_shell, volumes):
log.info("Doing 'on_add_node' for plugin: sge.CreateCell");
log.info("Adding %s", node.alias)
log.debug("sge.CreateCell.on_add_node CreateCell.on_add_node(self, node, nodes, master, user, user_shell, volumes)")
log.debug("sge.CreateCell.on_add_node node.private_dns_name: %s" % node.private_dns_name)
#### SET HEAD NODE INTERNAL IP
self.getHeadIp();
#### ADD ENVIRONMENT VARIABLES TO /etc/profile ON MASTER
self.addEnvarsToProfile(node)
##### CREATE NEW CELL DIRECTORY ON HEAD AND MASTER
self.copyCell(node);
##### RESTART SGE ON NODE
self.restartSge(node)
#### ADD NODE TO @allhosts GROUP
self.addToAllhosts(node, master)
log.info("Completed 'on_add_node' for plugin: sge.CreateCell");
def on_remove_node(self, node, nodes, master, user, user_shell, volumes):
log.info("Doing on_remove_node for plugin: sge.CreateCell")
log.info("Removing %s " % node.alias)
log.debug("sge.CreateCell.on_remove_node node.private_dns_name: %s" % node.private_dns_name)
| mit | -5,973,548,482,161,168,000 | 38.587719 | 134 | 0.57431 | false |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/pandas/core/nanops.py | 2 | 21735 | import sys
import itertools
import functools
import numpy as np
try:
import bottleneck as bn
_USE_BOTTLENECK = True
except ImportError: # pragma: no cover
_USE_BOTTLENECK = False
import pandas.core.common as com
import pandas.hashtable as _hash
from pandas import compat, lib, algos, tslib
from pandas.compat import builtins
from pandas.core.common import (isnull, notnull, _values_from_object,
_maybe_upcast_putmask,
ensure_float, _ensure_float64,
_ensure_int64, _ensure_object,
is_float, is_integer, is_complex,
is_float_dtype, is_floating_dtype,
is_complex_dtype, is_integer_dtype,
is_bool_dtype, is_object_dtype,
is_datetime64_dtype, is_timedelta64_dtype,
is_datetime_or_timedelta_dtype,
is_int_or_datetime_dtype, is_any_int_dtype)
class disallow(object):
def __init__(self, *dtypes):
super(disallow, self).__init__()
self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
def check(self, obj):
return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
self.dtypes)
def __call__(self, f):
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, compat.itervalues(kwargs))
if any(self.check(obj) for obj in obj_iter):
raise TypeError('reduction operation {0!r} not allowed for '
'this dtype'.format(f.__name__.replace('nan',
'')))
return f(*args, **kwargs)
return _f
class bottleneck_switch(object):
def __init__(self, zero_value=None, **kwargs):
self.zero_value = zero_value
self.kwargs = kwargs
def __call__(self, alt):
bn_name = alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(values, axis=None, skipna=True, **kwds):
if len(self.kwargs) > 0:
for k, v in compat.iteritems(self.kwargs):
if k not in kwds:
kwds[k] = v
try:
if self.zero_value is not None and values.size == 0:
if values.ndim == 1:
# wrap the 0's if needed
if is_timedelta64_dtype(values):
return lib.Timedelta(0)
return 0
else:
result_shape = (values.shape[:axis] +
values.shape[axis + 1:])
result = np.empty(result_shape)
result.fill(0)
return result
if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype,
bn_name):
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except Exception:
result = alt(values, axis=axis, skipna=skipna, **kwds)
return result
return f
def _bn_ok_dtype(dt, name):
# Bottleneck chokes on datetime64
if (not is_object_dtype(dt) and
not is_datetime_or_timedelta_dtype(dt)):
# bottleneck does not properly upcast during the sum
# so can overflow
if name == 'nansum':
if dt.itemsize < 8:
return False
return True
return False
def _has_infs(result):
if isinstance(result, np.ndarray):
if result.dtype == 'f8':
return lib.has_infs_f8(result.ravel())
elif result.dtype == 'f4':
return lib.has_infs_f4(result.ravel())
try:
return np.isinf(result).any()
except (TypeError, NotImplementedError) as e:
# if it doesn't support infs, then it can't have infs
return False
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == '+inf':
return np.inf
else:
return -np.inf
else:
if fill_value_typ is None:
return tslib.iNaT
else:
if fill_value_typ == '+inf':
# need the max int here
return np.iinfo(np.int64).max
else:
return tslib.iNaT
def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
isfinite=False, copy=True):
""" utility to get the values view, mask, dtype
if necessary copy and mask using the specified fill_value
copy = True will force the copy """
values = _values_from_object(values)
if isfinite:
mask = _isfinite(values)
else:
mask = isnull(values)
dtype = values.dtype
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(dtype, fill_value=fill_value,
fill_value_typ=fill_value_typ)
if skipna:
if copy:
values = values.copy()
if dtype_ok:
np.putmask(values, mask, fill_value)
# promote if needed
else:
values, changed = _maybe_upcast_putmask(values, mask, fill_value)
elif copy:
values = values.copy()
values = _view_if_needed(values)
# return a platform independent precision dtype
dtype_max = dtype
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
dtype_max = np.int64
elif is_float_dtype(dtype):
dtype_max = np.float64
return values, mask, dtype, dtype_max
def _isfinite(values):
if is_datetime_or_timedelta_dtype(values):
return isnull(values)
if (is_complex_dtype(values) or is_float_dtype(values) or
is_integer_dtype(values) or is_bool_dtype(values)):
return ~np.isfinite(values)
return ~np.isfinite(values.astype('float64'))
def _na_ok_dtype(dtype):
return not is_int_or_datetime_dtype(dtype)
def _view_if_needed(values):
if is_datetime_or_timedelta_dtype(values):
return values.view(np.int64)
return values
def _wrap_results(result, dtype):
""" wrap our results if needed """
if is_datetime64_dtype(dtype):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
result = lib.Timedelta(result)
else:
result = result.astype('i8').view(dtype)
return result
def nanany(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna)
return values.any(axis)
def nanall(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, True, copy=skipna)
return values.all(axis)
@disallow('M8')
@bottleneck_switch(zero_value=0)
def nansum(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
the_sum = values.sum(axis, dtype=dtype_max)
the_sum = _maybe_null_out(the_sum, axis, mask)
return _wrap_results(the_sum, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmean(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_max))
count = _get_counts(mask, axis)
if axis is not None and getattr(the_sum, 'ndim', False):
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return _wrap_results(the_mean, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmedian(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna)
def get_median(x):
mask = notnull(x)
if not skipna and not mask.all():
return np.nan
return algos.median(_values_from_object(x[mask]))
if values.dtype != np.float64:
values = values.astype('f8')
if axis is None:
values = values.ravel()
notempty = values.size
# an array from a frame
if values.ndim > 1:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
return _wrap_results(np.apply_along_axis(get_median, axis, values), dtype)
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
shp = np.array(values.shape)
dims = np.arange(values.ndim)
ret = np.empty(shp[dims != axis])
ret.fill(np.nan)
return _wrap_results(ret, dtype)
# otherwise return a scalar value
return _wrap_results(get_median(values) if notempty else np.nan, dtype)
def _get_counts_nanvar(mask, axis, ddof):
count = _get_counts(mask, axis)
d = count-ddof
# always return NaN, never inf
if np.isscalar(count):
if count <= ddof:
count = np.nan
d = np.nan
else:
mask2 = count <= ddof
if mask2.any():
np.putmask(d, mask2, np.nan)
np.putmask(count, mask2, np.nan)
return count, d
def _nanvar(values, axis=None, skipna=True, ddof=1):
# private nanvar calculator
mask = isnull(values)
if is_any_int_dtype(values):
values = values.astype('f8')
count, d = _get_counts_nanvar(mask, axis, ddof)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
X = _ensure_numeric(values.sum(axis))
XX = _ensure_numeric((values ** 2).sum(axis))
return np.fabs((XX - X ** 2 / count) / d)
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanstd(values, axis=None, skipna=True, ddof=1):
result = np.sqrt(_nanvar(values, axis=axis, skipna=skipna, ddof=ddof))
return _wrap_results(result, values.dtype)
@disallow('M8','m8')
@bottleneck_switch(ddof=1)
def nanvar(values, axis=None, skipna=True, ddof=1):
# we are going to allow timedelta64[ns] here
# but NOT going to coerce them to the Timedelta type
# as this could cause overflow
# so var cannot be computed (but std can!)
return _nanvar(values, axis=axis, skipna=skipna, ddof=ddof)
@disallow('M8','m8')
def nansem(values, axis=None, skipna=True, ddof=1):
var = nanvar(values, axis, skipna, ddof=ddof)
mask = isnull(values)
if not is_floating_dtype(values):
values = values.astype('f8')
count, _ = _get_counts_nanvar(mask, axis, ddof)
return np.sqrt(var)/np.sqrt(count)
@bottleneck_switch()
def nanmin(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna,
fill_value_typ='+inf')
# numpy 1.6.1 workaround in Python 3.x
if is_object_dtype(values) and compat.PY3:
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(builtins.min, apply_ax, values)
else:
try:
result = builtins.min(values)
except:
result = np.nan
else:
if ((axis is not None and values.shape[axis] == 0)
or values.size == 0):
try:
result = ensure_float(values.sum(axis, dtype=dtype_max))
result.fill(np.nan)
except:
result = np.nan
else:
result = values.min(axis)
result = _wrap_results(result, dtype)
return _maybe_null_out(result, axis, mask)
@bottleneck_switch()
def nanmax(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna,
fill_value_typ='-inf')
# numpy 1.6.1 workaround in Python 3.x
if is_object_dtype(values) and compat.PY3:
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(builtins.max, apply_ax, values)
else:
try:
result = builtins.max(values)
except:
result = np.nan
else:
if ((axis is not None and values.shape[axis] == 0)
or values.size == 0):
try:
result = ensure_float(values.sum(axis, dtype=dtype_max))
result.fill(np.nan)
except:
result = np.nan
else:
result = values.max(axis)
result = _wrap_results(result, dtype)
return _maybe_null_out(result, axis, mask)
def nanargmax(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf',
isfinite=True)
result = values.argmax(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
def nanargmin(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf',
isfinite=True)
result = values.argmin(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow('M8','m8')
def nanskew(values, axis=None, skipna=True):
mask = isnull(values)
if not is_floating_dtype(values):
values = values.astype('f8')
count = _get_counts(mask, axis)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** 2
C = (values ** 3).sum(axis) / count - A ** 3 - 3 * A * B
# floating point error
B = _zero_out_fperr(B)
C = _zero_out_fperr(C)
result = ((np.sqrt((count ** 2 - count)) * C) /
((count - 2) * np.sqrt(B) ** 3))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 3] = np.nan
return result
else:
result = 0 if B == 0 else result
if count < 3:
return np.nan
return result
@disallow('M8','m8')
def nankurt(values, axis=None, skipna=True):
mask = isnull(values)
if not is_floating_dtype(values):
values = values.astype('f8')
count = _get_counts(mask, axis)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** 2
C = (values ** 3).sum(axis) / count - A ** 3 - 3 * A * B
D = (values ** 4).sum(axis) / count - A ** 4 - 6 * B * A * A - 4 * C * A
B = _zero_out_fperr(B)
D = _zero_out_fperr(D)
if not isinstance(B, np.ndarray):
# if B is a scalar, check these corner cases first before doing division
if count < 4:
return np.nan
if B == 0:
return 0
result = (((count * count - 1.) * D / (B * B) - 3 * ((count - 1.) ** 2)) /
((count - 2.) * (count - 3.)))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 4] = np.nan
return result
@disallow('M8','m8')
def nanprod(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not is_any_int_dtype(values):
values = values.copy()
values[mask] = 1
result = values.prod(axis)
return _maybe_null_out(result, axis, mask)
def _maybe_arg_null_out(result, axis, mask, skipna):
# helper function for nanargmin/nanargmax
if axis is None or not getattr(result, 'ndim', False):
if skipna:
if mask.all():
result = -1
else:
if mask.any():
result = -1
else:
if skipna:
na_mask = mask.all(axis)
else:
na_mask = mask.any(axis)
if na_mask.any():
result[na_mask] = -1
return result
def _get_counts(mask, axis):
if axis is None:
return float(mask.size - mask.sum())
count = mask.shape[axis] - mask.sum(axis)
try:
return count.astype(float)
except AttributeError:
return np.array(count, dtype=float)
def _maybe_null_out(result, axis, mask):
if axis is not None and getattr(result, 'ndim', False):
null_mask = (mask.shape[axis] - mask.sum(axis)) == 0
if np.any(null_mask):
if np.iscomplexobj(result):
result = result.astype('c16')
else:
result = result.astype('f8')
result[null_mask] = np.nan
else:
null_mask = mask.size - mask.sum()
if null_mask == 0:
result = np.nan
return result
def _zero_out_fperr(arg):
if isinstance(arg, np.ndarray):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return 0 if np.abs(arg) < 1e-14 else arg
@disallow('M8','m8')
def nancorr(a, b, method='pearson', min_periods=None):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError('Operands to nancorr must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
f = get_corr_func(method)
return f(a, b)
def get_corr_func(method):
if method in ['kendall', 'spearman']:
from scipy.stats import kendalltau, spearmanr
def _pearson(a, b):
return np.corrcoef(a, b)[0, 1]
def _kendall(a, b):
rs = kendalltau(a, b)
if isinstance(rs, tuple):
return rs[0]
return rs
def _spearman(a, b):
return spearmanr(a, b)[0]
_cor_methods = {
'pearson': _pearson,
'kendall': _kendall,
'spearman': _spearman
}
return _cor_methods[method]
@disallow('M8','m8')
def nancov(a, b, min_periods=None):
if len(a) != len(b):
raise AssertionError('Operands to nancov must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
return np.cov(a, b)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if is_integer_dtype(x) or is_bool_dtype(x):
x = x.astype(np.float64)
elif is_object_dtype(x):
try:
x = x.astype(np.complex128)
except:
x = x.astype(np.float64)
else:
if not np.any(x.imag):
x = x.real
elif not (is_float(x) or is_integer(x) or is_complex(x)):
try:
x = float(x)
except Exception:
try:
x = complex(x)
except Exception:
raise TypeError('Could not convert %s to numeric' % str(x))
return x
# NA-friendly array comparisons
import operator
def make_nancomp(op):
def f(x, y):
xmask = isnull(x)
ymask = isnull(y)
mask = xmask | ymask
result = op(x, y)
if mask.any():
if is_bool_dtype(result):
result = result.astype('O')
np.putmask(result, mask, np.nan)
return result
return f
nangt = make_nancomp(operator.gt)
nange = make_nancomp(operator.ge)
nanlt = make_nancomp(operator.lt)
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
def unique1d(values):
"""
Hash table-based unique
"""
if np.issubdtype(values.dtype, np.floating):
table = _hash.Float64HashTable(len(values))
uniques = np.array(table.unique(_ensure_float64(values)),
dtype=np.float64)
elif np.issubdtype(values.dtype, np.datetime64):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('M8[ns]')
elif np.issubdtype(values.dtype, np.timedelta64):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('m8[ns]')
elif np.issubdtype(values.dtype, np.integer):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
else:
table = _hash.PyObjectHashTable(len(values))
uniques = table.unique(_ensure_object(values))
return uniques
| gpl-2.0 | 7,385,500,466,185,315,000 | 28.53125 | 86 | 0.553577 | false |
cloudn/cf-php-build-pack | tests/common/base.py | 9 | 1024 | import os
from build_pack_utils import BuildPack
from common.integration import DirectoryHelper
from common.integration import OptionsHelper
class BaseCompileApp(object):
def setUp(self):
self.dh = DirectoryHelper()
(self.build_dir,
self.cache_dir,
self.temp_dir) = self.dh.create_bp_env(self.app_name)
self.bp = BuildPack({
'BUILD_DIR': self.build_dir,
'CACHE_DIR': self.cache_dir,
'TMPDIR': self.temp_dir
}, '.')
if 'BP_DEBUG' in os.environ.keys():
self.bp._ctx['BP_DEBUG'] = True
self.dh.copy_build_pack_to(self.bp.bp_dir)
self.dh.register_to_delete(self.bp.bp_dir)
self.opts = OptionsHelper(os.path.join(self.bp.bp_dir,
'defaults',
'options.json'))
self.opts.set_download_url(
'http://localhost:5000/binaries/{STACK}')
def tearDown(self):
self.dh.cleanup()
| apache-2.0 | -6,366,385,937,548,542,000 | 34.310345 | 63 | 0.552734 | false |
evidation-health/pymc3 | pymc3/examples/disaster_model_missing.py | 3 | 2197 | """
A model for coal mining disasters data with a changepoint
switchpoint ~ U(0, 110)
early_mean ~ Exp(1.)
late_mean ~ Exp(1.)
disasters[t] ~ Po(early_mean if t <= switchpoint, late_mean otherwise)
"""
from pymc3 import *
import theano.tensor as t
from numpy import arange, array, ones, concatenate
from numpy.random import randint
from numpy.ma import masked_values
__all__ = ['disasters_data', 'switchpoint', 'early_mean', 'late_mean', 'rate',
'disasters']
# Time series of recorded coal mining disasters in the UK from 1851 to 1962
disasters_data = array([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, -1, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, -1, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
years = len(disasters_data)
masked_values = masked_values(disasters_data, value=-1)
with Model() as model:
# Prior for distribution of switchpoint location
switchpoint = DiscreteUniform('switchpoint', lower=0, upper=years)
# Priors for pre- and post-switch mean number of disasters
early_mean = Exponential('early_mean', lam=1.)
late_mean = Exponential('late_mean', lam=1.)
# Allocate appropriate Poisson rates to years before and after current
# switchpoint location
idx = arange(years)
rate = switch(switchpoint >= idx, early_mean, late_mean)
# Data likelihood
disasters = Poisson('disasters', rate, observed=masked_values)
def run(n=1000):
if n == "short":
n = 500
with model:
# Initial values for stochastic nodes
start = {'early_mean': 2., 'late_mean': 3.}
# Use slice sampler for means (other varibles auto-selected)
step = Slice([early_mean, late_mean])
tr = sample(n, tune=500, start=start, step=step)
summary(tr, vars=['disasters_missing'])
if __name__ == '__main__':
run()
| apache-2.0 | -7,075,218,614,544,722,000 | 32.287879 | 78 | 0.563496 | false |
citrix-openstack/build-swift | test/unit/obj/test_diskfile.py | 2 | 35609 | #-*- coding:utf-8 -*-
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.obj.diskfile"""
from __future__ import with_statement
import cPickle as pickle
import os
import errno
import mock
import unittest
import email
import tempfile
from shutil import rmtree
from time import time
from tempfile import mkdtemp
from hashlib import md5
from contextlib import closing
from gzip import GzipFile
from eventlet import tpool
from test.unit import FakeLogger, mock as unit_mock
from test.unit import _setxattr as setxattr
from swift.obj import diskfile
from swift.common import ondisk
from swift.common.utils import mkdirs
from swift.common.ondisk import hash_path, normalize_timestamp
from swift.common import ring
from swift.common.exceptions import DiskFileNotExist, DiskFileDeviceUnavailable
def _create_test_ring(path):
testgz = os.path.join(path, 'object.ring.gz')
intended_replica2part2dev_id = [
[0, 1, 2, 3, 4, 5, 6],
[1, 2, 3, 0, 5, 6, 4],
[2, 3, 0, 1, 6, 4, 5]]
intended_devs = [
{'id': 0, 'device': 'sda', 'zone': 0, 'ip': '127.0.0.0', 'port': 6000},
{'id': 1, 'device': 'sda', 'zone': 1, 'ip': '127.0.0.1', 'port': 6000},
{'id': 2, 'device': 'sda', 'zone': 2, 'ip': '127.0.0.2', 'port': 6000},
{'id': 3, 'device': 'sda', 'zone': 4, 'ip': '127.0.0.3', 'port': 6000},
{'id': 4, 'device': 'sda', 'zone': 5, 'ip': '127.0.0.4', 'port': 6000},
{'id': 5, 'device': 'sda', 'zone': 6,
'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6000},
{'id': 6, 'device': 'sda', 'zone': 7,
'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'port': 6000}]
intended_part_shift = 30
intended_reload_time = 15
with closing(GzipFile(testgz, 'wb')) as f:
pickle.dump(
ring.RingData(intended_replica2part2dev_id, intended_devs,
intended_part_shift),
f)
return ring.Ring(path, ring_name='object',
reload_time=intended_reload_time)
class TestDiskFileModuleMethods(unittest.TestCase):
def setUp(self):
ondisk.HASH_PATH_SUFFIX = 'endcap'
ondisk.HASH_PATH_PREFIX = ''
# Setup a test ring (stolen from common/test_ring.py)
self.testdir = tempfile.mkdtemp()
self.devices = os.path.join(self.testdir, 'node')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
os.mkdir(self.devices)
os.mkdir(os.path.join(self.devices, 'sda'))
self.objects = os.path.join(self.devices, 'sda', 'objects')
os.mkdir(self.objects)
self.parts = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
self.ring = _create_test_ring(self.testdir)
self.conf = dict(
swift_dir=self.testdir, devices=self.devices, mount_check='false',
timeout='300', stats_interval='1')
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_hash_suffix_hash_dir_is_file_quarantine(self):
df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
FakeLogger())
mkdirs(os.path.dirname(df.datadir))
open(df.datadir, 'wb').close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '0', data_dir)
orig_quarantine_renamer = diskfile.quarantine_renamer
called = [False]
def wrapped(*args, **kwargs):
called[0] = True
return orig_quarantine_renamer(*args, **kwargs)
try:
diskfile.quarantine_renamer = wrapped
diskfile.hash_suffix(whole_path_from, 101)
finally:
diskfile.quarantine_renamer = orig_quarantine_renamer
self.assertTrue(called[0])
def test_hash_suffix_one_file(self):
df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
FakeLogger())
mkdirs(df.datadir)
f = open(
os.path.join(df.datadir,
normalize_timestamp(time() - 100) + '.ts'),
'wb')
f.write('1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '0', data_dir)
diskfile.hash_suffix(whole_path_from, 101)
self.assertEquals(len(os.listdir(self.parts['0'])), 1)
diskfile.hash_suffix(whole_path_from, 99)
self.assertEquals(len(os.listdir(self.parts['0'])), 0)
def test_hash_suffix_multi_file_one(self):
df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
FakeLogger())
mkdirs(df.datadir)
for tdiff in [1, 50, 100, 500]:
for suff in ['.meta', '.data', '.ts']:
f = open(
os.path.join(
df.datadir,
normalize_timestamp(int(time()) - tdiff) + suff),
'wb')
f.write('1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '0', data_dir)
hsh_path = os.listdir(whole_path_from)[0]
whole_hsh_path = os.path.join(whole_path_from, hsh_path)
diskfile.hash_suffix(whole_path_from, 99)
# only the tombstone should be left
self.assertEquals(len(os.listdir(whole_hsh_path)), 1)
def test_hash_suffix_multi_file_two(self):
df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
FakeLogger())
mkdirs(df.datadir)
for tdiff in [1, 50, 100, 500]:
suffs = ['.meta', '.data']
if tdiff > 50:
suffs.append('.ts')
for suff in suffs:
f = open(
os.path.join(
df.datadir,
normalize_timestamp(int(time()) - tdiff) + suff),
'wb')
f.write('1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '0', data_dir)
hsh_path = os.listdir(whole_path_from)[0]
whole_hsh_path = os.path.join(whole_path_from, hsh_path)
diskfile.hash_suffix(whole_path_from, 99)
# only the meta and data should be left
self.assertEquals(len(os.listdir(whole_hsh_path)), 2)
def test_invalidate_hash(self):
def assertFileData(file_path, data):
with open(file_path, 'r') as fp:
fdata = fp.read()
self.assertEquals(pickle.loads(fdata), pickle.loads(data))
df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
FakeLogger())
mkdirs(df.datadir)
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '0', data_dir)
hashes_file = os.path.join(self.objects, '0',
diskfile.HASH_FILE)
# test that non existent file except caught
self.assertEquals(diskfile.invalidate_hash(whole_path_from),
None)
# test that hashes get cleared
check_pickle_data = pickle.dumps({data_dir: None},
diskfile.PICKLE_PROTOCOL)
for data_hash in [{data_dir: None}, {data_dir: 'abcdefg'}]:
with open(hashes_file, 'wb') as fp:
pickle.dump(data_hash, fp, diskfile.PICKLE_PROTOCOL)
diskfile.invalidate_hash(whole_path_from)
assertFileData(hashes_file, check_pickle_data)
def test_get_hashes(self):
df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
FakeLogger())
mkdirs(df.datadir)
with open(
os.path.join(df.datadir,
normalize_timestamp(time()) + '.ts'),
'wb') as f:
f.write('1234567890')
part = os.path.join(self.objects, '0')
hashed, hashes = diskfile.get_hashes(part)
self.assertEquals(hashed, 1)
self.assert_('a83' in hashes)
hashed, hashes = diskfile.get_hashes(part, do_listdir=True)
self.assertEquals(hashed, 0)
self.assert_('a83' in hashes)
hashed, hashes = diskfile.get_hashes(part, recalculate=['a83'])
self.assertEquals(hashed, 1)
self.assert_('a83' in hashes)
def test_get_hashes_bad_dir(self):
df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
FakeLogger())
mkdirs(df.datadir)
with open(os.path.join(self.objects, '0', 'bad'), 'wb') as f:
f.write('1234567890')
part = os.path.join(self.objects, '0')
hashed, hashes = diskfile.get_hashes(part)
self.assertEquals(hashed, 1)
self.assert_('a83' in hashes)
self.assert_('bad' not in hashes)
def test_get_hashes_unmodified(self):
df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
FakeLogger())
mkdirs(df.datadir)
with open(
os.path.join(df.datadir,
normalize_timestamp(time()) + '.ts'),
'wb') as f:
f.write('1234567890')
part = os.path.join(self.objects, '0')
hashed, hashes = diskfile.get_hashes(part)
i = [0]
def _getmtime(filename):
i[0] += 1
return 1
with unit_mock({'swift.obj.diskfile.getmtime': _getmtime}):
hashed, hashes = diskfile.get_hashes(
part, recalculate=['a83'])
self.assertEquals(i[0], 2)
def test_get_hashes_unmodified_and_zero_bytes(self):
df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
FakeLogger())
mkdirs(df.datadir)
part = os.path.join(self.objects, '0')
open(os.path.join(part, diskfile.HASH_FILE), 'w')
# Now the hash file is zero bytes.
i = [0]
def _getmtime(filename):
i[0] += 1
return 1
with unit_mock({'swift.obj.diskfile.getmtime': _getmtime}):
hashed, hashes = diskfile.get_hashes(
part, recalculate=[])
# getmtime will actually not get called. Initially, the pickle.load
# will raise an exception first and later, force_rewrite will
# short-circuit the if clause to determine whether to write out a
# fresh hashes_file.
self.assertEquals(i[0], 0)
self.assertTrue('a83' in hashes)
def test_get_hashes_modified(self):
df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
FakeLogger())
mkdirs(df.datadir)
with open(
os.path.join(df.datadir,
normalize_timestamp(time()) + '.ts'),
'wb') as f:
f.write('1234567890')
part = os.path.join(self.objects, '0')
hashed, hashes = diskfile.get_hashes(part)
i = [0]
def _getmtime(filename):
if i[0] < 3:
i[0] += 1
return i[0]
with unit_mock({'swift.obj.diskfile.getmtime': _getmtime}):
hashed, hashes = diskfile.get_hashes(
part, recalculate=['a83'])
self.assertEquals(i[0], 3)
def test_hash_cleanup_listdir(self):
file_list = []
def mock_listdir(path):
return list(file_list)
def mock_unlink(path):
file_list.remove(os.path.basename(path))
with unit_mock({'os.listdir': mock_listdir, 'os.unlink': mock_unlink}):
# purge .data if there's a newer .ts
file1 = normalize_timestamp(time()) + '.data'
file2 = normalize_timestamp(time() + 1) + '.ts'
file_list = [file1, file2]
self.assertEquals(diskfile.hash_cleanup_listdir('/whatever'),
[file2])
# purge .ts if there's a newer .data
file1 = normalize_timestamp(time()) + '.ts'
file2 = normalize_timestamp(time() + 1) + '.data'
file_list = [file1, file2]
self.assertEquals(diskfile.hash_cleanup_listdir('/whatever'),
[file2])
# keep .meta and .data if meta newer than data
file1 = normalize_timestamp(time()) + '.ts'
file2 = normalize_timestamp(time() + 1) + '.data'
file3 = normalize_timestamp(time() + 2) + '.meta'
file_list = [file1, file2, file3]
self.assertEquals(diskfile.hash_cleanup_listdir('/whatever'),
[file3, file2])
# keep only latest of multiple .ts files
file1 = normalize_timestamp(time()) + '.ts'
file2 = normalize_timestamp(time() + 1) + '.ts'
file3 = normalize_timestamp(time() + 2) + '.ts'
file_list = [file1, file2, file3]
self.assertEquals(diskfile.hash_cleanup_listdir('/whatever'),
[file3])
class TestDiskFile(unittest.TestCase):
"""Test swift.obj.diskfile.DiskFile"""
def setUp(self):
"""Set up for testing swift.obj.diskfile"""
self.testdir = os.path.join(mkdtemp(), 'tmp_test_obj_server_DiskFile')
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
self._orig_tpool_exc = tpool.execute
tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs)
def tearDown(self):
"""Tear down for testing swift.obj.diskfile"""
rmtree(os.path.dirname(self.testdir))
tpool.execute = self._orig_tpool_exc
def _create_ondisk_file(self, df, data, timestamp, ext='.data'):
mkdirs(df.datadir)
timestamp = normalize_timestamp(timestamp)
data_file = os.path.join(df.datadir, timestamp + ext)
with open(data_file, 'wb') as f:
f.write(data)
md = {'X-Timestamp': timestamp}
setxattr(f.fileno(), diskfile.METADATA_KEY,
pickle.dumps(md, diskfile.PICKLE_PROTOCOL))
def _create_test_file(self, data, timestamp=None):
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
if timestamp is None:
timestamp = time()
self._create_ondisk_file(df, data, timestamp)
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
df.open()
return df
def test_get_metadata(self):
df = self._create_test_file('1234567890', timestamp=42)
md = df.get_metadata()
self.assertEquals(md['X-Timestamp'], normalize_timestamp(42))
def test_disk_file_default_disallowed_metadata(self):
# build an object with some meta (ts 41)
orig_metadata = {'X-Object-Meta-Key1': 'Value1',
'Content-Type': 'text/garbage'}
df = self._get_disk_file(ts=41, extra_metadata=orig_metadata)
with df.open():
self.assertEquals('1024', df._metadata['Content-Length'])
# write some new metadata (fast POST, don't send orig meta, ts 42)
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
df.put_metadata({'X-Timestamp': '42', 'X-Object-Meta-Key2': 'Value2'})
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
with df.open():
# non-fast-post updateable keys are preserved
self.assertEquals('text/garbage', df._metadata['Content-Type'])
# original fast-post updateable keys are removed
self.assert_('X-Object-Meta-Key1' not in df._metadata)
# new fast-post updateable keys are added
self.assertEquals('Value2', df._metadata['X-Object-Meta-Key2'])
def test_disk_file_app_iter_corners(self):
df = self._create_test_file('1234567890')
self.assertEquals(''.join(df.app_iter_range(0, None)), '1234567890')
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
with df.open():
self.assertEqual(''.join(df.app_iter_range(5, None)), '67890')
def test_disk_file_app_iter_partial_closes(self):
df = self._create_test_file('1234567890')
with df.open():
it = df.app_iter_range(0, 5)
self.assertEqual(''.join(it), '12345')
self.assertEqual(df.fp, None)
def test_disk_file_app_iter_ranges(self):
df = self._create_test_file('012345678911234567892123456789')
it = df.app_iter_ranges([(0, 10), (10, 20), (20, 30)], 'plain/text',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
self.assert_('0123456789' in value)
self.assert_('1123456789' in value)
self.assert_('2123456789' in value)
def test_disk_file_app_iter_ranges_edges(self):
df = self._create_test_file('012345678911234567892123456789')
it = df.app_iter_ranges([(3, 10), (0, 2)], 'application/whatever',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
self.assert_('3456789' in value)
self.assert_('01' in value)
def test_disk_file_large_app_iter_ranges(self):
"""
This test case is to make sure that the disk file app_iter_ranges
method all the paths being tested.
"""
long_str = '01234567890' * 65536
target_strs = ['3456789', long_str[0:65590]]
df = self._create_test_file(long_str)
it = df.app_iter_ranges([(3, 10), (0, 65590)], 'plain/text',
'5e816ff8b8b8e9a5d355497e5d9e0301', 655360)
"""
the produced string actually missing the MIME headers
need to add these headers to make it as real MIME message.
The body of the message is produced by method app_iter_ranges
off of DiskFile object.
"""
header = ''.join(['Content-Type: multipart/byteranges;',
'boundary=',
'5e816ff8b8b8e9a5d355497e5d9e0301\r\n'])
value = header + ''.join(it)
parts = map(lambda p: p.get_payload(decode=True),
email.message_from_string(value).walk())[1:3]
self.assertEqual(parts, target_strs)
def test_disk_file_app_iter_ranges_empty(self):
"""
This test case tests when empty value passed into app_iter_ranges
When ranges passed into the method is either empty array or None,
this method will yield empty string
"""
df = self._create_test_file('012345678911234567892123456789')
it = df.app_iter_ranges([], 'application/whatever',
'\r\n--someheader\r\n', 100)
self.assertEqual(''.join(it), '')
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
with df.open():
it = df.app_iter_ranges(None, 'app/something',
'\r\n--someheader\r\n', 150)
self.assertEqual(''.join(it), '')
def test_disk_file_mkstemp_creates_dir(self):
tmpdir = os.path.join(self.testdir, 'sda1', 'tmp')
os.rmdir(tmpdir)
with diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c',
'o', FakeLogger()).create():
self.assert_(os.path.exists(tmpdir))
def test_iter_hook(self):
hook_call_count = [0]
def hook():
hook_call_count[0] += 1
df = self._get_disk_file(fsize=65, csize=8, iter_hook=hook)
with df.open():
for _ in df:
pass
self.assertEquals(hook_call_count[0], 9)
def test_quarantine(self):
df = self._create_test_file('') # empty
df.quarantine()
quar_dir = os.path.join(self.testdir, 'sda1', 'quarantined',
'objects', os.path.basename(os.path.dirname(
df.data_file)))
self.assert_(os.path.isdir(quar_dir))
def test_quarantine_same_file(self):
df = self._create_test_file('empty')
new_dir = df.quarantine()
quar_dir = os.path.join(self.testdir, 'sda1', 'quarantined',
'objects', os.path.basename(os.path.dirname(
df.data_file)))
self.assert_(os.path.isdir(quar_dir))
self.assertEquals(quar_dir, new_dir)
# have to remake the datadir and file
self._create_ondisk_file(df, '', time()) # still empty
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
df.open()
double_uuid_path = df.quarantine()
self.assert_(os.path.isdir(double_uuid_path))
self.assert_('-' in os.path.basename(double_uuid_path))
def _get_disk_file(self, invalid_type=None, obj_name='o',
fsize=1024, csize=8, mark_deleted=False, ts=None,
iter_hook=None, mount_check=False,
extra_metadata=None):
'''returns a DiskFile'''
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c',
obj_name, FakeLogger())
data = '0' * fsize
etag = md5()
if ts:
timestamp = ts
else:
timestamp = str(normalize_timestamp(time()))
with df.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer.fd).st_size),
}
metadata.update(extra_metadata or {})
writer.put(metadata)
if invalid_type == 'ETag':
etag = md5()
etag.update('1' + '0' * (fsize - 1))
etag = etag.hexdigest()
metadata['ETag'] = etag
diskfile.write_metadata(writer.fd, metadata)
if invalid_type == 'Content-Length':
metadata['Content-Length'] = fsize - 1
diskfile.write_metadata(writer.fd, metadata)
if mark_deleted:
metadata = {
'X-Timestamp': timestamp,
'deleted': True
}
df.put_metadata(metadata, tombstone=True)
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c',
obj_name, FakeLogger(),
disk_chunk_size=csize,
iter_hook=iter_hook, mount_check=mount_check)
df.open()
if invalid_type == 'Zero-Byte':
fp = open(df.data_file, 'w')
fp.close()
df.unit_test_len = fsize
return df
def test_quarantine_valids(self):
df = self._get_disk_file(obj_name='1')
for chunk in df:
pass
self.assertFalse(df.quarantined_dir)
df = self._get_disk_file(obj_name='2', csize=1)
for chunk in df:
pass
self.assertFalse(df.quarantined_dir)
df = self._get_disk_file(obj_name='3', csize=100000)
for chunk in df:
pass
self.assertFalse(df.quarantined_dir)
def run_quarantine_invalids(self, invalid_type):
df = self._get_disk_file(invalid_type=invalid_type, obj_name='1')
for chunk in df:
pass
self.assertTrue(df.quarantined_dir)
df = self._get_disk_file(invalid_type=invalid_type,
obj_name='2', csize=1)
for chunk in df:
pass
self.assertTrue(df.quarantined_dir)
df = self._get_disk_file(invalid_type=invalid_type,
obj_name='3', csize=100000)
for chunk in df:
pass
self.assertTrue(df.quarantined_dir)
df = self._get_disk_file(invalid_type=invalid_type, obj_name='4')
self.assertFalse(df.quarantined_dir)
df = self._get_disk_file(invalid_type=invalid_type, obj_name='5')
for chunk in df.app_iter_range(0, df.unit_test_len):
pass
self.assertTrue(df.quarantined_dir)
df = self._get_disk_file(invalid_type=invalid_type, obj_name='6')
for chunk in df.app_iter_range(0, df.unit_test_len + 100):
pass
self.assertTrue(df.quarantined_dir)
expected_quar = False
# for the following, Content-Length/Zero-Byte errors will always result
# in a quarantine, even if the whole file isn't check-summed
if invalid_type in ('Zero-Byte', 'Content-Length'):
expected_quar = True
df = self._get_disk_file(invalid_type=invalid_type, obj_name='7')
for chunk in df.app_iter_range(1, df.unit_test_len):
pass
self.assertEquals(bool(df.quarantined_dir), expected_quar)
df = self._get_disk_file(invalid_type=invalid_type, obj_name='8')
for chunk in df.app_iter_range(0, df.unit_test_len - 1):
pass
self.assertEquals(bool(df.quarantined_dir), expected_quar)
df = self._get_disk_file(invalid_type=invalid_type, obj_name='8')
for chunk in df.app_iter_range(1, df.unit_test_len + 1):
pass
self.assertEquals(bool(df.quarantined_dir), expected_quar)
def test_quarantine_invalids(self):
self.run_quarantine_invalids('ETag')
self.run_quarantine_invalids('Content-Length')
self.run_quarantine_invalids('Zero-Byte')
def test_quarantine_deleted_files(self):
df = self._get_disk_file(invalid_type='Content-Length')
df.close()
self.assertTrue(df.quarantined_dir)
df = self._get_disk_file(invalid_type='Content-Length',
mark_deleted=True)
df.close()
self.assertFalse(df.quarantined_dir)
df = self._get_disk_file(invalid_type='Content-Length',
mark_deleted=True)
self.assertRaises(DiskFileNotExist, df.get_data_file_size)
def test_put_metadata(self):
df = self._get_disk_file()
ts = time()
metadata = {'X-Timestamp': ts, 'X-Object-Meta-test': 'data'}
df.put_metadata(metadata)
exp_name = '%s.meta' % str(normalize_timestamp(ts))
dl = os.listdir(df.datadir)
self.assertEquals(len(dl), 2)
self.assertTrue(exp_name in set(dl))
def test_put_metadata_ts(self):
df = self._get_disk_file()
ts = time()
metadata = {'X-Timestamp': ts, 'X-Object-Meta-test': 'data'}
df.put_metadata(metadata, tombstone=True)
exp_name = '%s.ts' % str(normalize_timestamp(ts))
dl = os.listdir(df.datadir)
self.assertEquals(len(dl), 1)
self.assertTrue(exp_name in set(dl))
def test_delete(self):
df = self._get_disk_file()
ts = time()
df.delete(ts)
exp_name = '%s.ts' % str(normalize_timestamp(ts))
dl = os.listdir(df.datadir)
self.assertEquals(len(dl), 1)
self.assertTrue(exp_name in set(dl))
def test_close_error(self):
def err():
raise Exception("bad")
df = self._get_disk_file(fsize=1024 * 2)
df._handle_close_quarantine = err
with df.open():
for chunk in df:
pass
# close is called at the end of the iterator
self.assertEquals(df.fp, None)
self.assertEquals(len(df.logger.log_dict['error']), 1)
def test_quarantine_twice(self):
df = self._get_disk_file(invalid_type='Content-Length')
self.assert_(os.path.isfile(df.data_file))
quar_dir = df.quarantine()
self.assertFalse(os.path.isfile(df.data_file))
self.assert_(os.path.isdir(quar_dir))
self.assertEquals(df.quarantine(), None)
def test_mount_checking(self):
def _mock_ismount(*args, **kwargs):
return False
with mock.patch("os.path.ismount", _mock_ismount):
self.assertRaises(DiskFileDeviceUnavailable, self._get_disk_file,
mount_check=True)
def test_ondisk_search_loop_ts_meta_data(self):
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
self._create_ondisk_file(df, '', ext='.ts', timestamp=10)
self._create_ondisk_file(df, '', ext='.ts', timestamp=9)
self._create_ondisk_file(df, '', ext='.meta', timestamp=8)
self._create_ondisk_file(df, '', ext='.meta', timestamp=7)
self._create_ondisk_file(df, 'B', ext='.data', timestamp=6)
self._create_ondisk_file(df, 'A', ext='.data', timestamp=5)
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
with df.open():
self.assertTrue('X-Timestamp' in df._metadata)
self.assertEquals(df._metadata['X-Timestamp'],
normalize_timestamp(10))
self.assertTrue('deleted' in df._metadata)
self.assertTrue(df._metadata['deleted'])
def test_ondisk_search_loop_meta_ts_data(self):
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
self._create_ondisk_file(df, '', ext='.meta', timestamp=10)
self._create_ondisk_file(df, '', ext='.meta', timestamp=9)
self._create_ondisk_file(df, '', ext='.ts', timestamp=8)
self._create_ondisk_file(df, '', ext='.ts', timestamp=7)
self._create_ondisk_file(df, 'B', ext='.data', timestamp=6)
self._create_ondisk_file(df, 'A', ext='.data', timestamp=5)
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
with df.open():
self.assertTrue('X-Timestamp' in df._metadata)
self.assertEquals(df._metadata['X-Timestamp'],
normalize_timestamp(8))
self.assertTrue('deleted' in df._metadata)
def test_ondisk_search_loop_meta_data_ts(self):
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
self._create_ondisk_file(df, '', ext='.meta', timestamp=10)
self._create_ondisk_file(df, '', ext='.meta', timestamp=9)
self._create_ondisk_file(df, 'B', ext='.data', timestamp=8)
self._create_ondisk_file(df, 'A', ext='.data', timestamp=7)
self._create_ondisk_file(df, '', ext='.ts', timestamp=6)
self._create_ondisk_file(df, '', ext='.ts', timestamp=5)
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
with df.open():
self.assertTrue('X-Timestamp' in df._metadata)
self.assertEquals(df._metadata['X-Timestamp'],
normalize_timestamp(10))
self.assertTrue('deleted' not in df._metadata)
def test_ondisk_search_loop_data_meta_ts(self):
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
self._create_ondisk_file(df, 'B', ext='.data', timestamp=10)
self._create_ondisk_file(df, 'A', ext='.data', timestamp=9)
self._create_ondisk_file(df, '', ext='.ts', timestamp=8)
self._create_ondisk_file(df, '', ext='.ts', timestamp=7)
self._create_ondisk_file(df, '', ext='.meta', timestamp=6)
self._create_ondisk_file(df, '', ext='.meta', timestamp=5)
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
with df.open():
self.assertTrue('X-Timestamp' in df._metadata)
self.assertEquals(df._metadata['X-Timestamp'],
normalize_timestamp(10))
self.assertTrue('deleted' not in df._metadata)
def test_ondisk_search_loop_wayward_files_ignored(self):
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
self._create_ondisk_file(df, 'X', ext='.bar', timestamp=11)
self._create_ondisk_file(df, 'B', ext='.data', timestamp=10)
self._create_ondisk_file(df, 'A', ext='.data', timestamp=9)
self._create_ondisk_file(df, '', ext='.ts', timestamp=8)
self._create_ondisk_file(df, '', ext='.ts', timestamp=7)
self._create_ondisk_file(df, '', ext='.meta', timestamp=6)
self._create_ondisk_file(df, '', ext='.meta', timestamp=5)
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
with df.open():
self.assertTrue('X-Timestamp' in df._metadata)
self.assertEquals(df._metadata['X-Timestamp'],
normalize_timestamp(10))
self.assertTrue('deleted' not in df._metadata)
def test_ondisk_search_loop_listdir_error(self):
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
def mock_listdir_exp(*args, **kwargs):
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
with mock.patch("os.listdir", mock_listdir_exp):
self._create_ondisk_file(df, 'X', ext='.bar', timestamp=11)
self._create_ondisk_file(df, 'B', ext='.data', timestamp=10)
self._create_ondisk_file(df, 'A', ext='.data', timestamp=9)
self._create_ondisk_file(df, '', ext='.ts', timestamp=8)
self._create_ondisk_file(df, '', ext='.ts', timestamp=7)
self._create_ondisk_file(df, '', ext='.meta', timestamp=6)
self._create_ondisk_file(df, '', ext='.meta', timestamp=5)
df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
FakeLogger())
self.assertRaises(OSError, df.open)
def test_exception_in_handle_close_quarantine(self):
df = self._get_disk_file()
def blow_up():
raise Exception('a very special error')
df._handle_close_quarantine = blow_up
df.close()
log_lines = df.logger.get_lines_for_level('error')
self.assert_('a very special error' in log_lines[-1])
| apache-2.0 | 8,313,879,566,718,320,000 | 41.240807 | 79 | 0.54402 | false |
mitocw/edx-platform | common/test/acceptance/pages/studio/video/video.py | 4 | 25115 | """
CMS Video
"""
import os
import os.path
import time
import requests
from bok_choy.javascript import js_defined, wait_for_js
from bok_choy.promise import EmptyPromise, Promise
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from six.moves import range
from common.test.acceptance.pages.common.utils import sync_on_notification
from common.test.acceptance.pages.lms.video.video import VideoPage
from common.test.acceptance.tests.helpers import YouTubeStubConfig
CLASS_SELECTORS = {
'video_container': '.video',
'video_init': '.is-initialized',
'video_xmodule': '.xmodule_VideoBlock',
'video_spinner': '.video-wrapper .spinner',
'video_controls': '.video-controls',
'attach_asset': '.upload-dialog > input[type="file"]',
'upload_dialog': '.wrapper-modal-window-assetupload',
'xblock': '.add-xblock-component',
'slider_range': '.slider-range',
'error': '.transcripts-error-message',
'url_inputs': '.videolist-settings-item input.input',
'collapse_bar': '.videolist-extra-videos',
'status': '.transcripts-message-status',
'attach_transcript': '.file-chooser > input[type="file"]',
'basic_metadata': '.basic_metadata_edit',
}
BUTTON_SELECTORS = {
'create_video': 'button[data-category="video"]',
'handout_download': '.wrapper-handouts .btn-link',
'handout_download_editor': '.wrapper-comp-setting.file-uploader .download-action',
'upload_asset': '.upload-action',
'asset_submit': '.action-upload',
'handout_clear': '.wrapper-comp-setting.file-uploader .setting-clear',
'translations_clear': '.metadata-video-translations .setting-clear',
'translation_add': '.wrapper-translations-settings > a',
'import': '.setting-import',
'download_to_edit': '.setting-download',
'disabled_download_to_edit': '.setting-download.is-disabled',
'upload_new_timed_transcripts': '.setting-upload',
'replace': '.setting-replace',
'choose': '.setting-choose',
'use_existing': '.setting-use-existing',
'collapse_link': '.collapse-action.collapse-setting',
}
DROP_DOWN_SELECTORS = {
'transcript_language': '.wrapper-translations-settings .list-settings .list-settings-item select'
}
DISPLAY_NAME = "Component Display Name"
DEFAULT_SETTINGS = [
# basic
[DISPLAY_NAME, 'Video', False],
['Default Video URL', 'https://www.youtube.com/watch?v=3_yD_cEKoCk, , ', False],
['Video ID', '', False],
# advanced
[DISPLAY_NAME, 'Video', False],
['Download Transcript Allowed', 'False', False],
['Downloadable Transcript URL', '', False],
['Show Transcript', 'True', False],
['Transcript Languages', '', False],
['Upload Handout', '', False],
['Video Available on Web Only', 'False', False],
['Video Download Allowed', 'False', False],
['Video File URLs', '', False],
['Video ID', '', False],
['Video Start Time', '00:00:00', False],
['Video Stop Time', '00:00:00', False],
['YouTube ID', '3_yD_cEKoCk', False],
['YouTube ID for .75x speed', '', False],
['YouTube ID for 1.25x speed', '', False],
['YouTube ID for 1.5x speed', '', False]
]
# field names without clear button
FIELDS_WO_CLEAR = [
'Transcript Languages'
]
# We should wait 300 ms for event handler invocation + 200ms for safety.
DELAY = 0.5
@js_defined('window.Video', 'window.jQuery', 'window.XModule', 'window.XBlock',
'window.MathJax')
class VideoComponentPage(VideoPage):
"""
CMS Video Component Page
"""
url = None
@wait_for_js
def is_browser_on_page(self):
return (
self.q(css='div{0}'.format(CLASS_SELECTORS['video_xmodule'])).present or
self.q(css='div{0}'.format(CLASS_SELECTORS['xblock'])).present
)
def get_element_selector(self, class_name, vertical=False):
return super(VideoComponentPage, self).get_element_selector(class_name, vertical=vertical)
def _wait_for(self, check_func, desc, result=False, timeout=30):
"""
Calls the method provided as an argument until the Promise satisfied or BrokenPromise
Arguments:
check_func (callable): Promise function to be fulfilled.
desc (str): Description of the Promise, used in log messages.
result (bool): Indicates whether we need result from Promise or not
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out.
"""
if result:
return Promise(check_func, desc, timeout=timeout).fulfill()
else:
return EmptyPromise(check_func, desc, timeout=timeout).fulfill()
def wait_for_video_component_render(self):
"""
Wait until video component rendered completely
"""
if not YouTubeStubConfig.get_configuration().get('youtube_api_blocked'):
self._wait_for(lambda: self.q(css=CLASS_SELECTORS['video_init']).present, 'Video Player Initialized')
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['video_spinner']).visible,
'Video Buffering Completed')
self._wait_for(self.is_controls_visible, 'Player Controls are Visible')
def wait_for_message(self, message_type, expected_message):
"""
Wait until the message of the requested type is as expected.
"""
self._wait_for(lambda: self.message(message_type) == expected_message, "Waiting for message update.")
@wait_for_js
def is_controls_visible(self):
"""
Get current visibility sate of all video controls.
Returns:
bool: True means video controls are visible for all videos, False means video controls are not visible
for one or more videos
"""
return self.q(css=CLASS_SELECTORS['video_controls']).visible
def click_button_subtitles(self):
"""
Click .setting-replace button after first hovering to it.
"""
element = self.q(css='.setting-replace')[0]
ActionChains(self.browser).move_to_element(element).click(element).perform()
def click_button(self, button_name, index=0, require_notification=False):
"""
Click on a button as specified by `button_name`
Arguments:
button_name (str): button name
index (int): query index
"""
self.scroll_to_button(button_name, index)
self.q(css=BUTTON_SELECTORS[button_name]).nth(index).click()
if require_notification:
sync_on_notification(self)
self.wait_for_ajax()
def scroll_to_button(self, button_name, index=0):
"""
Scroll to a button specified by `button_name`
Arguments:
button_name (str): button name
index (int): query index
"""
element = self.q(css=BUTTON_SELECTORS[button_name])[index]
self.browser.execute_script("arguments[0].scrollIntoView();", element)
def get_drop_down_items(self, drop_down_name, index=0):
"""
Get the items from a drop down list specified by `drop_down_name`
Arguments:
drop_down_name (str): name of the drop down list
index (int): query index
"""
drop_downs = self.q(css=DROP_DOWN_SELECTORS[drop_down_name])
return drop_downs[index].find_elements_by_tag_name("option")
def is_language_disabled(self, lang_code):
"""
Determine whether or not a lanuage is disabled in a drop down
Arguments:
lang_code (str): two letter language code
"""
language_options = self.get_drop_down_items('transcript_language', index=1)
language = [l for l in language_options if l.get_attribute('value') == lang_code][0]
return language.get_attribute("disabled")
@staticmethod
def file_path(filename):
"""
Construct file path to be uploaded to assets.
Arguments:
filename (str): asset filename
"""
return os.sep.join(os.path.abspath(__file__).split(os.sep)[:-5]) + '/data/uploads/' + filename
def upload_handout(self, handout_filename):
"""
Upload a handout file to assets
Arguments:
handout_filename (str): handout file name
"""
self.upload_asset(handout_filename)
def upload_asset(self, asset_filename, asset_type='handout', index=0):
"""
Upload a asset file to assets
Arguments:
asset_filename (str): asset file name
asset_type (str): one of `handout`, `transcript`
index (int): query index
"""
asset_file_path = self.file_path(asset_filename)
self.scroll_to_button('upload_asset')
self.click_button('upload_asset', index)
self.q(css=CLASS_SELECTORS['attach_asset']).results[0].send_keys(asset_file_path)
# Only srt format transcript files can be uploaded, If an error
# occurs due to incorrect transcript file we will return from here
if asset_type == 'transcript' and self.q(css='#upload_error').present:
return
self.click_button('asset_submit')
# confirm upload completion
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['upload_dialog']).present, 'Upload Completed')
def clear_handout(self):
"""
Clear handout from settings
"""
self.click_button('handout_clear')
def _get_handout(self, url):
"""
Download handout at `url`
"""
kwargs = dict()
session_id = [{i['name']: i['value']} for i in self.browser.get_cookies() if i['name'] == u'sessionid']
if session_id:
kwargs.update({
'cookies': session_id[0]
})
response = requests.get(url, **kwargs)
return response.status_code < 400, response.headers
def download_handout(self, mime_type, is_editor=False):
"""
Download handout with mime type specified by `mime_type`
Arguments:
mime_type (str): mime type of handout file
Returns:
tuple: Handout download result.
"""
selector = BUTTON_SELECTORS['handout_download_editor'] if is_editor else BUTTON_SELECTORS['handout_download']
handout_url = self.q(css=selector).attrs('href')[0]
result, headers = self._get_handout(handout_url)
return result, headers['content-type'] == mime_type
@property
def is_handout_button_visible(self):
"""
Check if handout download button is visible
"""
return self.q(css=BUTTON_SELECTORS['handout_download']).visible
def create_video(self):
"""
Create a Video Component by clicking on Video button and wait for rendering completion.
"""
# Create video
self.click_button('create_video', require_notification=True)
self.wait_for_video_component_render()
def xblocks(self):
"""
Tells the total number of video xblocks present on current unit page.
Returns:
(int): total video xblocks
"""
return len(self.q(css='.xblock-header').filter(
lambda el: 'xblock-header-video' in el.get_attribute('class')).results)
def focus_caption_line(self, line_number):
"""
Focus a caption line as specified by `line_number`
Arguments:
line_number (int): caption line number
"""
caption_line_selector = u".subtitles li span[data-index='{index}']".format(index=line_number - 1)
self.q(css=caption_line_selector).results[0].send_keys(Keys.ENTER)
def is_caption_line_focused(self, line_number):
"""
Check if a caption line focused
Arguments:
line_number (int): caption line number
"""
caption_line_selector = u".subtitles li span[data-index='{index}']".format(index=line_number - 1)
caption_container = self.q(css=caption_line_selector).results[0].find_element_by_xpath('..')
return 'focused' in caption_container.get_attribute('class').split()
@property
def is_slider_range_visible(self):
"""
Return True if slider range is visible.
"""
return self.q(css=CLASS_SELECTORS['slider_range']).visible
def verify_settings(self):
"""
Verify that video component has correct default settings.
"""
def _check_settings_length():
"""Check video settings"""
query = '.wrapper-comp-setting'
settings = self.q(css=query).results
if len(DEFAULT_SETTINGS) == len(settings):
return True, settings
return (False, None)
settings = Promise(_check_settings_length, 'All video fields are present').fulfill()
for counter, setting in enumerate(settings):
is_verified = self._verify_setting_entry(
setting,
DEFAULT_SETTINGS[counter][0],
DEFAULT_SETTINGS[counter][1]
)
if not is_verified:
return is_verified
return True
@staticmethod
def _verify_setting_entry(setting, field_name, field_value):
"""
Verify a `setting` entry.
Arguments:
setting (WebElement): Selenium WebElement
field_name (str): Name of field
field_value (str): Value of field
Returns:
bool: Does `setting` have correct value.
"""
if field_name != setting.find_element_by_class_name('setting-label').get_attribute('innerHTML'):
return False
# Get class attribute values
classes = setting.get_attribute('class').split()
list_type_classes = ['metadata-list-enum', 'metadata-dict', 'metadata-video-translations']
is_list_type = any(list_type in classes for list_type in list_type_classes)
if is_list_type:
current_value = ', '.join(
ele.get_attribute('value') for ele in setting.find_elements_by_class_name('list-settings-item'))
elif 'metadata-videolist-enum' in setting.get_attribute('class'):
current_value = ', '.join(item.find_element_by_tag_name('input').get_attribute('value') for item in
setting.find_elements_by_class_name('videolist-settings-item'))
else:
current_value = setting.find_element_by_class_name('setting-input').get_attribute('value')
if field_value != current_value:
return False
# Verify if clear button is active for expected video fields
if field_name not in FIELDS_WO_CLEAR and 'metadata-videolist-enum' not in setting.get_attribute('class'):
setting_clear_button = setting.find_elements_by_class_name('setting-clear')[0]
if 'active' not in setting_clear_button.get_attribute('class'):
return False
return True
def set_field_value(self, field_name, field_value, field_type='input'):
"""
Set settings input `field` with `value`
Arguments:
field_name (str): Name of field
field_value (str): Name of value
field_type (str): `input`, `select` etc(more to be added later)
"""
query = '.wrapper-comp-setting > label:nth-child(1)'
field_id = ''
if field_type == 'input':
for index, _ in enumerate(self.q(css=query)):
if field_name in self.q(css=query).nth(index).text[0]:
field_id = self.q(css=query).nth(index).attrs('for')[0]
break
self.q(css='#{}'.format(field_id)).fill(field_value)
elif field_type == 'select':
self.q(css=u'select[name="{0}"] option[value="{1}"]'.format(field_name, field_value)).first.click()
def verify_field_value(self, field_name, field_value):
"""
Get settings value of `field_name`
Arguments:
field_name (str): Name of field
field_value (str): Name of value
Returns:
bool: If `field_name` has `field_value`
"""
_, setting = self._get_setting_entry(field_name)
return self._verify_setting_entry(setting, field_name, field_value)
def _get_setting_entry(self, field_name):
"""
Get setting entry of `field_name`
Arguments:
field_name (str): Name of field
Returns:
setting (WebElement): Selenium WebElement
"""
for index, setting in enumerate(self.q(css='.wrapper-comp-setting').results):
if setting.find_element_by_class_name('setting-label').get_attribute('innerHTML') == field_name:
return index, setting
def translations_count(self):
"""
Get count of translations.
"""
return len(self.q(css='.wrapper-translations-settings .list-settings-item').results)
def select_translation_language(self, language_code, index=0):
"""
Select translation language as specified by `language_code`
Arguments:
language_code (str):
index (int): query index
"""
translations_items = '.wrapper-translations-settings .list-settings-item'
language_selector = translations_items + u' select option[value="{}"]'.format(language_code)
self.q(css=language_selector).nth(index).click()
def upload_translation(self, transcript_name, language_code):
"""
Upload a translation file.
Arguments:
transcript_name (str):
language_code (str):
"""
self.click_button('translation_add')
translations_count = self.translations_count()
self.select_translation_language(language_code, translations_count - 1)
self.upload_asset(transcript_name, asset_type='transcript', index=translations_count - 1)
def replace_translation(self, old_lang_code, new_lang_code, transcript_name):
"""
Replace a translation.
Arguments:
old_lang_code (str):
new_lang_code (str):
transcript_name (str):
"""
language_codes = self.translations()
index = language_codes.index(old_lang_code)
self.select_translation_language(new_lang_code, index)
self.upload_asset(transcript_name, asset_type='transcript', index=index)
def translations(self):
"""
Extract translations
Returns:
list: list of translation language codes
"""
translations_selector = '.metadata-video-translations .list-settings-item'
return self.q(css=translations_selector).attrs('data-original-lang')
def download_translation(self, language_code, text_to_search):
"""
Download a translation having `language_code` and containing `text_to_search`
Arguments:
language_code (str): language code
text_to_search (str): text to search in translation
Returns:
bool: whether download was successful
"""
mime_type = 'application/x-subrip'
lang_code = '?language_code={}'.format(language_code)
link = [link for link in self.q(css='.download-action').attrs('href') if lang_code in link]
result, headers, content = self._get_transcript(link[0])
return result is True and mime_type in headers['content-type'] and text_to_search in content.decode('utf-8')
def remove_translation(self, language_code):
"""
Remove a translation having `language_code`
Arguments:
language_code (str): language code
"""
selector = '.metadata-video-translations .list-settings-item'
translation = self.q(css=selector).filter(lambda el: language_code == el.get_attribute('data-original-lang'))
translation[0].find_element_by_class_name('remove-action').click()
@property
def upload_status_message(self):
"""
Get asset upload status message
"""
return self.q(css='#upload_error').text[0]
def captions_lines(self):
"""
Extract partial caption lines.
As all the captions lines are exactly same so only getting partial lines will work.
"""
self.wait_for_captions()
selector = u'.subtitles li:nth-child({})'
return ' '.join([self.q(css=selector.format(i)).text[0] for i in range(1, 6)])
def set_url_field(self, url, field_number):
"""
Set video url field in basic settings tab.
Arguments:
url (str): video url
field_number (int): video url field number
"""
if self.q(css=CLASS_SELECTORS['collapse_bar']).visible is False:
self.click_button('collapse_link')
self.q(css=CLASS_SELECTORS['url_inputs']).nth(field_number - 1).fill(url)
time.sleep(DELAY)
self.wait_for_ajax()
def message(self, message_type):
"""
Get video url field status/error message.
Arguments:
message_type(str): type(status, error) of message
Returns:
str: status/error message
"""
if message_type == 'status':
self.wait_for_element_visibility(CLASS_SELECTORS[message_type],
u'{} message is Visible'.format(message_type.title()))
return self.q(css=CLASS_SELECTORS[message_type]).text[0]
def url_field_status(self, *field_numbers):
"""
Get video url field status(enable/disable).
Arguments:
url (str): video url
field_numbers (tuple or None): field numbers to check status for, None means get status for all.
tuple items will be integers and must start from 1
Returns:
dict: field numbers as keys and field status(bool) as values, False means a field is disabled
"""
if field_numbers:
index_list = [number - 1 for number in field_numbers]
else:
index_list = list(range(3)) # maximum three fields
statuses = {}
for index in index_list:
status = 'is-disabled' not in self.q(css=CLASS_SELECTORS['url_inputs']).nth(index).attrs('class')[0]
statuses[index + 1] = status
return statuses
def clear_field(self, index):
"""
Clear a video url field at index specified by `index`.
"""
self.q(css=CLASS_SELECTORS['url_inputs']).nth(index - 1).fill('')
# Trigger an 'input' event after filling the field with an empty value.
self.browser.execute_script(
"$('{}:eq({})').trigger('{}')".format(CLASS_SELECTORS['url_inputs'], index, 'input'))
time.sleep(DELAY)
self.wait_for_ajax()
def clear_fields(self):
"""
Clear video url fields.
"""
script = u"""
$('{selector}')
.prop('disabled', false)
.removeClass('is-disabled')
.val('')
.trigger('input');
""".format(selector=CLASS_SELECTORS['url_inputs'])
self.browser.execute_script(script)
time.sleep(DELAY)
self.wait_for_ajax()
def revert_field(self, field_name):
"""
Revert a field.
"""
_, setting = self._get_setting_entry(field_name)
setting.find_element_by_class_name('setting-clear').click()
def is_transcript_button_visible(self, button_name, index=0, button_text=None):
"""
Check if a transcript related button is visible.
Arguments:
button_name (str): name of button
index (int): query index
button_text (str or None): text to match with text on a button, if None then don't match texts
Returns:
bool: is button visible
"""
is_visible = self.q(css=BUTTON_SELECTORS[button_name]).nth(index).visible
is_text_matched = True
if button_text and button_text != self.q(css=BUTTON_SELECTORS[button_name]).nth(index).text[0]:
is_text_matched = False
return is_visible and is_text_matched
def upload_transcript(self, transcript_filename):
"""
Upload a Transcript
Arguments:
transcript_filename (str): name of transcript file
"""
# Show the Browse Button
self.browser.execute_script("$('form.file-chooser').show()")
asset_file_path = self.file_path(transcript_filename)
attach_css = CLASS_SELECTORS['attach_transcript']
self.wait_for_element_visibility(attach_css, "The file chooser's input field is visible.")
self.q(css=attach_css).results[0].send_keys(asset_file_path)
# confirm upload completion
self._wait_for(lambda: not self.q(css=attach_css).visible, 'Upload Completed')
| agpl-3.0 | -491,777,324,564,979,600 | 34.17507 | 117 | 0.605256 | false |
googleapis/python-compute | google/cloud/compute_v1/services/backend_buckets/pagers.py | 1 | 3115 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.compute_v1.types import compute
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.BackendBucketList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.BackendBucketList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.BackendBucketList],
request: compute.ListBackendBucketsRequest,
response: compute.BackendBucketList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListBackendBucketsRequest):
The initial request object.
response (google.cloud.compute_v1.types.BackendBucketList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListBackendBucketsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.BackendBucketList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[compute.BackendBucket]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| apache-2.0 | -921,425,963,638,989,400 | 34 | 81 | 0.655538 | false |
Morgan-Stanley/treadmill | lib/python/treadmill/api/authz/group.py | 2 | 3537 | """Implementation of group based authorization API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import fnmatch
import grp # pylint: disable=import-error
import logging
import pwd # pylint: disable=import-error
import os
_LOGGER = logging.getLogger(__name__)
def _group(template, resource, action, proid):
"""Render group template."""
return template.format(
resource=resource,
action=action,
proid=proid
)
class API:
"""Group based authorization REST api."""
def __init__(self, **kwargs):
groups = kwargs.get('groups', [])
for group in groups:
_LOGGER.info('Using authorization template: %s', group)
exclude = kwargs.get('exclude', [])
_LOGGER.info('Unrestricted whitelist: %s', exclude)
me = pwd.getpwuid(os.getuid())[0]
# TODO: add schema validation.
def authorize(user, action, resource, payload):
"""Authorize user/action/resource"""
resource_id = payload.get('pk')
_LOGGER.info(
'Authorize: %s %s %s %s', user, action, resource, resource_id
)
resource_action = '{}:{}'.format(resource, action)
for exclusion in exclude:
if fnmatch.fnmatch(resource_action, exclusion):
_LOGGER.info(
'Access allowed based on exclusion whitelist: %s, %s',
resource_action, exclusion
)
return True, []
username = user.partition('@')[0]
# Special rule - authorize self.
if username == me:
_LOGGER.info('Authorized self: %s', username)
return True, []
proid = None
if resource_id:
proid = resource_id.partition('.')[0]
why = []
for group_template in groups:
group_name = _group(
group_template,
action=action,
resource=resource,
proid=proid
)
_LOGGER.info('Check authorization group: %s', group_name)
try:
group = grp.getgrnam(group_name)
members = group.gr_mem
if username in members:
_LOGGER.info(
'Authorized: User %s is member of %s.',
username,
group_name
)
return True, why
else:
_LOGGER.debug(
'User %s not in %s', username, group_name
)
why.append(
'{} not in {}'.format(
username,
group_name
)
)
except KeyError:
_LOGGER.info(
'Group does not exist: %s', group_name
)
why.append('no such group: {}'.format(group_name))
_LOGGER.info(
'Not authorized: %s %s %s %s',
user, action, resource, resource_id
)
return False, why
self.authorize = authorize
| apache-2.0 | 6,978,785,796,970,534,000 | 30.026316 | 78 | 0.45745 | false |
a10networks/a10sdk-python | a10sdk/core/interface/interface_ethernet_ipv6_stateful_firewall.py | 2 | 1821 | from a10sdk.common.A10BaseClass import A10BaseClass
class StatefulFirewall(A10BaseClass):
"""Class Description::
Configure Stateful Firewall direction.
Class stateful-firewall supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param access_list: {"default": 0, "optional": true, "type": "number", "description": "Access-list for traffic from the outside", "format": "flag"}
:param acl_name: {"description": "Access-list Name", "format": "string", "minLength": 1, "optional": true, "maxLength": 16, "type": "string"}
:param inside: {"default": 0, "optional": true, "type": "number", "description": "Inside (private) interface for stateful firewall", "format": "flag"}
:param outside: {"default": 0, "optional": true, "type": "number", "description": "Outside (public) interface for stateful firewall", "format": "flag"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/interface/ethernet/{ifnum}/ipv6/stateful-firewall`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "stateful-firewall"
self.a10_url="/axapi/v3/interface/ethernet/{ifnum}/ipv6/stateful-firewall"
self.DeviceProxy = ""
self.access_list = ""
self.acl_name = ""
self.inside = ""
self.outside = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| apache-2.0 | 8,242,613,466,896,749,000 | 41.348837 | 168 | 0.639758 | false |
davismathew/netbot-django | bootcamp/feeds/models.py | 1 | 2224 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from bootcamp.activities.models import Activity
from django.utils.html import escape
import bleach
class Feed(models.Model):
user = models.ForeignKey(User)
date = models.DateTimeField(auto_now_add=True)
post = models.TextField(max_length=255)
parent = models.ForeignKey('Feed', null=True, blank=True)
likes = models.IntegerField(default=0)
comments = models.IntegerField(default=0)
class Meta:
verbose_name = _('Feed')
verbose_name_plural = _('Feeds')
ordering = ('-date',)
def __unicode__(self):
return self.post
@staticmethod
def get_feeds(from_feed=None):
if from_feed is not None:
feeds = Feed.objects.filter(parent=None, id__lte=from_feed)
else:
feeds = Feed.objects.filter(parent=None)
return feeds
@staticmethod
def get_feeds_after(feed):
feeds = Feed.objects.filter(parent=None, id__gt=feed)
return feeds
def get_comments(self):
return Feed.objects.filter(parent=self).order_by('date')
def calculate_likes(self):
likes = Activity.objects.filter(activity_type=Activity.LIKE,
feed=self.pk).count()
self.likes = likes
self.save()
return self.likes
def get_likes(self):
likes = Activity.objects.filter(activity_type=Activity.LIKE,
feed=self.pk)
return likes
def get_likers(self):
likes = self.get_likes()
likers = []
for like in likes:
likers.append(like.user)
return likers
def calculate_comments(self):
self.comments = Feed.objects.filter(parent=self).count()
self.save()
return self.comments
def comment(self, user, post):
feed_comment = Feed(user=user, post=post, parent=self)
feed_comment.save()
self.comments = Feed.objects.filter(parent=self).count()
self.save()
return feed_comment
def linkfy_post(self):
return bleach.linkify(escape(self.post))
| mit | 7,168,111,471,717,333,000 | 29.465753 | 71 | 0.617806 | false |
KTanaka101/MazeShortestPath | MazeShortestPath/Components.py | 1 | 2187 | import pygame
from pygame.locals import *
COLOR_PUSH_BUTTON = (255, 140, 0)
COLOR_FREE_BUTTON = (70, 70, 200)
COLOR_TEXT = (255, 255, 255)
COLOR_PUSH_BUTTON_OUTLINE = (205, 90, 0)
COLOR_FREE_BUTTON_OUTLINE = (30, 30, 150)
COLOR_PUSH_BUTTON_MOUSE_OVER = (150, 70, 0)
COLOR_FREE_BUTTON_MOUSE_OVER = (0, 0, 100)
SIZE_BUTTON_OUTLINE = 3
class Button(object):
text = ""
push_flag = False
mouse_over_flag = False
x = 0
y = 0
size_x = 0
size_y = 0
def __init__(self, text):
self.text = text
def is_check_point(self, px, py):
if self.x < px < self.x+self.size_x and self.y < py < self.y+self.size_y:
return True
return False
def draw(self, screen, font):
if self.push_flag:
if self.mouse_over_flag:
pygame.draw.rect(screen, COLOR_PUSH_BUTTON_MOUSE_OVER, Rect(self.x, self.y, self.size_x, self.size_y))
else:
pygame.draw.rect(screen, COLOR_PUSH_BUTTON_OUTLINE,
Rect(self.x-SIZE_BUTTON_OUTLINE, self.y-SIZE_BUTTON_OUTLINE,
self.size_x+SIZE_BUTTON_OUTLINE*2, self.size_y+SIZE_BUTTON_OUTLINE*2))
pygame.draw.rect(screen, COLOR_PUSH_BUTTON, Rect(self.x, self.y, self.size_x, self.size_y))
else:
if self.mouse_over_flag:
pygame.draw.rect(screen, COLOR_FREE_BUTTON_MOUSE_OVER, Rect(self.x, self.y, self.size_x, self.size_y))
else:
pygame.draw.rect(screen, COLOR_FREE_BUTTON_OUTLINE,
Rect(self.x-SIZE_BUTTON_OUTLINE, self.y-SIZE_BUTTON_OUTLINE,
self.size_x+SIZE_BUTTON_OUTLINE*2, self.size_y+SIZE_BUTTON_OUTLINE*2))
pygame.draw.rect(screen, COLOR_FREE_BUTTON, Rect(self.x, self.y, self.size_x, self.size_y))
screen.blit(font.render("%s" % self.text, True, COLOR_TEXT), (self.x, self.y+self.size_y//3))
def get_point_size_x(self):
return self.size_x + self.x
def get_point_size_y(self):
return self.size_y + self.y
def __repr__(self):
return "Button name: " + self.text | lgpl-3.0 | 1,841,675,205,124,004,600 | 34.868852 | 118 | 0.572474 | false |
formiano/enigma2-4.4 | lib/python/Screens/Menu.py | 8 | 7905 | from Screens.Screen import Screen
from Components.Sources.List import List
from Components.ActionMap import NumberActionMap
from Components.Sources.StaticText import StaticText
from Components.config import configfile
from Components.PluginComponent import plugins
from Components.config import config
from Components.SystemInfo import SystemInfo
from Tools.BoundFunction import boundFunction
from Tools.Directories import resolveFilename, SCOPE_SKIN
import xml.etree.cElementTree
from Screens.Setup import Setup, getSetupTitle
mainmenu = _("Main menu")
# read the menu
file = open(resolveFilename(SCOPE_SKIN, 'menu.xml'), 'r')
mdom = xml.etree.cElementTree.parse(file)
file.close()
class MenuUpdater:
def __init__(self):
self.updatedMenuItems = {}
def addMenuItem(self, id, pos, text, module, screen, weight):
if not self.updatedMenuAvailable(id):
self.updatedMenuItems[id] = []
self.updatedMenuItems[id].append([text, pos, module, screen, weight])
def delMenuItem(self, id, pos, text, module, screen, weight):
self.updatedMenuItems[id].remove([text, pos, module, screen, weight])
def updatedMenuAvailable(self, id):
return self.updatedMenuItems.has_key(id)
def getUpdatedMenu(self, id):
return self.updatedMenuItems[id]
menuupdater = MenuUpdater()
class MenuSummary(Screen):
pass
class Menu(Screen):
ALLOW_SUSPEND = True
def okbuttonClick(self):
# print "okbuttonClick"
selection = self["menu"].getCurrent()
if selection is not None:
selection[1]()
def execText(self, text):
exec text
def runScreen(self, arg):
# arg[0] is the module (as string)
# arg[1] is Screen inside this module
# plus possible arguments, as
# string (as we want to reference
# stuff which is just imported)
# FIXME. somehow
if arg[0] != "":
exec "from " + arg[0] + " import *"
self.openDialog(*eval(arg[1]))
def nothing(self): #dummy
pass
def gotoStandby(self, *res):
from Screens.Standby import Standby2
self.session.open(Standby2)
self.close(True)
def openDialog(self, *dialog): # in every layer needed
self.session.openWithCallback(self.menuClosed, *dialog)
def openSetup(self, dialog):
self.session.openWithCallback(self.menuClosed, Setup, dialog)
def addMenu(self, destList, node):
requires = node.get("requires")
if requires:
if requires[0] == '!':
if SystemInfo.get(requires[1:], False):
return
elif not SystemInfo.get(requires, False):
return
MenuTitle = _(node.get("text", "??").encode("UTF-8"))
entryID = node.get("entryID", "undefined")
weight = node.get("weight", 50)
x = node.get("flushConfigOnClose")
if x:
a = boundFunction(self.session.openWithCallback, self.menuClosedWithConfigFlush, Menu, node)
else:
a = boundFunction(self.session.openWithCallback, self.menuClosed, Menu, node)
#TODO add check if !empty(node.childNodes)
destList.append((MenuTitle, a, entryID, weight))
def menuClosedWithConfigFlush(self, *res):
configfile.save()
self.menuClosed(*res)
def menuClosed(self, *res):
if res and res[0]:
self.close(True)
def addItem(self, destList, node):
requires = node.get("requires")
if requires:
if requires[0] == '!':
if SystemInfo.get(requires[1:], False):
return
elif not SystemInfo.get(requires, False):
return
configCondition = node.get("configcondition")
if configCondition and not eval(configCondition + ".value"):
return
item_text = node.get("text", "").encode("UTF-8")
entryID = node.get("entryID", "undefined")
weight = node.get("weight", 50)
for x in node:
if x.tag == 'screen':
module = x.get("module")
screen = x.get("screen")
if screen is None:
screen = module
# print module, screen
if module:
module = "Screens." + module
else:
module = ""
# check for arguments. they will be appended to the
# openDialog call
args = x.text or ""
screen += ", " + args
destList.append((_(item_text or "??"), boundFunction(self.runScreen, (module, screen)), entryID, weight))
return
elif x.tag == 'plugin':
extensions = x.get("extensions")
system = x.get("system")
screen = x.get("screen")
if extensions:
module = extensions
elif system:
module = system
if screen is None:
screen = module
if extensions:
module = "Plugins.Extensions." + extensions + '.plugin'
elif system:
module = "Plugins.SystemPlugins." + system + '.plugin'
else:
module = ""
# check for arguments. they will be appended to the
# openDialog call
args = x.text or ""
screen += ", " + args
destList.append((_(item_text or "??"), boundFunction(self.runScreen, (module, screen)), entryID, weight))
return
elif x.tag == 'code':
destList.append((_(item_text or "??"), boundFunction(self.execText, x.text), entryID, weight))
return
elif x.tag == 'setup':
id = x.get("id")
if item_text == "":
item_text = _(getSetupTitle(id))
else:
item_text = _(item_text)
destList.append((item_text, boundFunction(self.openSetup, id), entryID, weight))
return
destList.append((item_text, self.nothing, entryID, weight))
def __init__(self, session, parent):
Screen.__init__(self, session)
list = []
menuID = None
for x in parent: #walk through the actual nodelist
if not x.tag:
continue
if x.tag == 'item':
item_level = int(x.get("level", 0))
if item_level <= config.usage.setup_level.index:
self.addItem(list, x)
count += 1
elif x.tag == 'menu':
self.addMenu(list, x)
count += 1
elif x.tag == "id":
menuID = x.get("val")
count = 0
if menuID is not None:
# menuupdater?
if menuupdater.updatedMenuAvailable(menuID):
for x in menuupdater.getUpdatedMenu(menuID):
if x[1] == count:
list.append((x[0], boundFunction(self.runScreen, (x[2], x[3] + ", ")), x[4]))
count += 1
if menuID is not None:
# plugins
for l in plugins.getPluginsForMenu(menuID):
# check if a plugin overrides an existing menu
plugin_menuid = l[2]
for x in list:
if x[2] == plugin_menuid:
list.remove(x)
break
if len(l) > 4 and l[4]:
list.append((l[0], boundFunction(l[1], self.session, self.close), l[2], l[3] or 50))
else:
list.append((l[0], boundFunction(l[1], self.session), l[2], l[3] or 50))
# for the skin: first try a menu_<menuID>, then Menu
self.skinName = [ ]
if menuID is not None:
self.skinName.append("menu_" + menuID)
self.skinName.append("Menu")
# Sort by Weight
if config.usage.sort_menus.value:
list.sort()
else:
list.sort(key=lambda x: int(x[3]))
self["menu"] = List(list)
self["actions"] = NumberActionMap(["OkCancelActions", "MenuActions", "NumberActions"],
{
"ok": self.okbuttonClick,
"cancel": self.closeNonRecursive,
"menu": self.closeRecursive,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal
})
a = parent.get("title", "").encode("UTF-8") or None
a = a and _(a)
if a is None:
a = _(parent.get("text", "").encode("UTF-8"))
self["title"] = StaticText(a)
Screen.setTitle(self, a)
self.menu_title = a
def keyNumberGlobal(self, number):
# print "menu keyNumber:", number
# Calculate index
number -= 1
if len(self["menu"].list) > number:
self["menu"].setIndex(number)
self.okbuttonClick()
def closeNonRecursive(self):
self.close(False)
def closeRecursive(self):
self.close(True)
def createSummary(self):
return MenuSummary
class MainMenu(Menu):
#add file load functions for the xml-file
def __init__(self, *x):
self.skinName = "Menu"
Menu.__init__(self, *x)
| gpl-2.0 | -4,069,299,481,772,822,500 | 26.071918 | 109 | 0.658444 | false |
carlosrojas/halfedge_mesh | tests/halfedge_mesh_test.py | 1 | 11956 | import halfedge_mesh
import pytest
import math
import numpy as np
class TestHalfedgeMesh:
@pytest.fixture(scope="module")
def cube_off_mesh(self):
return halfedge_mesh.HalfedgeMesh("tests/data/cube.off")
@pytest.fixture(scope="module")
def cube_large_off_mesh(self):
return halfedge_mesh.HalfedgeMesh("tests/data/cube_large.off")
@pytest.fixture(scope="module")
def cube_negative_off_mesh(self):
return halfedge_mesh.HalfedgeMesh("tests/data/cube4.off")
#------------------------------------------------------------------------------
def test_eq_halfedge_mesh_cube(self, cube_off_mesh, cube_large_off_mesh):
assert cube_off_mesh != cube_large_off_mesh
assert cube_off_mesh == cube_off_mesh
def test_hash_halfedge_mesh_cube(self, cube_off_mesh):
constant_value = 10111970
test_dic = dict()
test_dic[cube_off_mesh] = constant_value
assert test_dic[cube_off_mesh] == constant_value
def test_read_file(self, cube_off_mesh):
assert cube_off_mesh.read_file("tests/data/cube.off") != None
assert cube_off_mesh.read_file("") == None
assert cube_off_mesh.read_file("tests/data/cube.ply") == None
def test_read_off_vertices(self, cube_off_mesh):
with open("tests/data/vertices_test.off") as vertices:
v = cube_off_mesh.read_off_vertices(vertices, 2)
assert np.allclose([v[0].x, v[0].y, v[0].z], [10.3, 42., 20.])
assert np.allclose([v[1].x, v[1].y, v[1].z], [33, 21.3, 94.1])
def test_parse_build_halfedge_off(self, cube_off_mesh):
with open("tests/data/faces_test.off") as faces:
vertices = [halfedge_mesh.Vertex(-1,-1,-1,i) for i in range(3)]
f, e = cube_off_mesh.parse_build_halfedge_off(faces, 1, vertices)
assert len(f) == 1
assert f[0].a == 0 and f[0].b == 1 and f[0].c == 2
assert f[0].index == 0
assert len(e) == 3
#------------------------------------------------------------------------------
def test_halfedge_loop_around_facet(self, cube_off_mesh):
halfedge = cube_off_mesh.facets[0].halfedge
assert halfedge.next.next.next.vertex == halfedge.vertex
def test_vertices_in_facet(self, cube_off_mesh):
halfedge = cube_off_mesh.facets[0].halfedge
vertices = set([halfedge_mesh.Vertex(1.0, -1.0, 1.0, 1),
halfedge_mesh.Vertex(1.0, -1.0, -1.0, 0),
halfedge_mesh.Vertex(-1.0, -1.0, 1.0, 2)])
# make sure all vertices are in the facet described by halfedge
assert halfedge.vertex in vertices
vertices.remove( halfedge.vertex )
assert halfedge.next.vertex in vertices
vertices.discard( halfedge.next.vertex)
assert halfedge.next.next.vertex in vertices
vertices.discard( halfedge.next.next.vertex)
def test_facet_eq_correct_for_same_object_and_diff_objects(self,
cube_off_mesh):
assert cube_off_mesh.facets[0] == cube_off_mesh.facets[0]
assert cube_off_mesh.facets[1] != cube_off_mesh.facets[0]
assert cube_off_mesh.facets[3] == cube_off_mesh.facets[3]
assert cube_off_mesh.facets[0] != cube_off_mesh.facets[3]
def test_halfedgemesh_vertices_are_in_order_with_cubeoff(self,
cube_off_mesh):
# Tests parse_off since Vertex is just a basic class
vertices = cube_off_mesh.vertices
# cube vertices in order
pts = [1, -1, -1,
1, -1, 1,
-1, -1, 1,
-1, -1, -1,
1, 1, -0.999999,
0.999999, 1, 1.000001]
count = 0
for index in range(0, len(vertices), 3):
# Vertex(a,b,c, index)
assert vertices[count] == halfedge_mesh.Vertex(pts[index],
pts[index + 1],
pts[index + 2], count)
count += 1
def test_halfedgemesh_vertices_in_facet_exists_with_cubeoff(self,
cube_off_mesh):
# Tests parse_off since Vertex is just a basic class
facets = cube_off_mesh.facets
vertices = cube_off_mesh.vertices
for index in range(len(facets)):
# check that it's within the range of the number of vertices
assert facets[index].a < len(vertices)
assert (facets[index].a >= 0)
def test_halfedgemesh_get_halfedge_returns_correct_vertices_with_cubeoff(
self, cube_off_mesh):
five_seven = cube_off_mesh.get_halfedge(5, 7)
assert five_seven.vertex.index == 7
assert five_seven.prev.vertex.index == 5
five_six = cube_off_mesh.get_halfedge(5, 6)
assert five_six.vertex.index == 6
assert five_six.prev.vertex.index == 5
one_two = cube_off_mesh.get_halfedge(1, 2)
assert one_two.vertex.index == 2
assert one_two.prev.vertex.index == 1
def test_halfedge_opposite_correct_vertices_with_cubeoff(self,
cube_off_mesh):
zero_two = cube_off_mesh.get_halfedge(0, 2)
assert zero_two.opposite.vertex.index == 0
assert zero_two.opposite.prev.vertex.index == 2
zero_one = cube_off_mesh.get_halfedge(0, 1)
assert zero_one.opposite.vertex.index == 0
assert zero_one.opposite.prev.vertex.index == 1
four_one = cube_off_mesh.get_halfedge(4, 1)
assert four_one.opposite.vertex.index == 4
assert four_one.opposite.prev.vertex.index == 1
def test_halfedge_eq_correct_for_same_and_object_and_diff_objects(self,
cube_off_mesh):
zero_two = cube_off_mesh.get_halfedge(0, 2)
assert zero_two == zero_two
four_one = cube_off_mesh.get_halfedge(4, 1)
assert zero_two != four_one
# test negative angles
def test_get_angle_normal(self, cube_off_mesh, cube_negative_off_mesh):
assert cube_off_mesh.facets[0].halfedge.vertex.index == 1
assert cube_off_mesh.facets[0].halfedge.prev.vertex.index == 0
assert halfedge_mesh.allclose(
cube_off_mesh.facets[0].halfedge.get_angle_normal(),
math.pi/2.0)
assert cube_off_mesh.facets[1].halfedge.vertex.index == 7
assert cube_off_mesh.facets[1].halfedge.prev.vertex.index == 4
assert halfedge_mesh.allclose(
cube_off_mesh.facets[1].halfedge.get_angle_normal(),
math.pi/2.0)
assert cube_off_mesh.facets[3].halfedge.next.vertex.index == 2
assert cube_off_mesh.facets[3].halfedge.next.prev.vertex.index == 5
assert halfedge_mesh.allclose(
cube_off_mesh.facets[3].halfedge.next.get_angle_normal(), 0.0)
assert halfedge_mesh.allclose(cube_negative_off_mesh.get_halfedge(5,7).get_angle_normal(), -0.67967381890824385)
def test_get_vertex(self, cube_off_mesh):
mesh_vertex = cube_off_mesh.vertices[0].get_vertex()
test_vertex = halfedge_mesh.Vertex(1,-1,-1,0).get_vertex()
assert halfedge_mesh.allclose(mesh_vertex,test_vertex)
def test_update_vertices(self, cube_off_mesh, cube_large_off_mesh):
tmp = halfedge_mesh.HalfedgeMesh()
tmp.vertices= cube_off_mesh.vertices[:]
tmp.halfedges = cube_off_mesh.halfedges[:]
tmp.facets= cube_off_mesh.facets[:]
v = []
for vertex in cube_large_off_mesh.vertices:
v.append([ vertex.x, vertex.y, vertex.z ])
tmp.update_vertices(v)
for i in range(len(cube_large_off_mesh.halfedges)):
assert tmp.halfedges[i].get_angle_normal() == cube_large_off_mesh.halfedges[i].get_angle_normal()
def test_internal_norm():
assert halfedge_mesh.norm([0, -1, 0]) == 1.0
assert halfedge_mesh.norm([0, 1, 0]) == 1.0
assert halfedge_mesh.norm([1, 0, 0]) == 1.0
assert halfedge_mesh.norm([0, 0, 1]) == 1.0
assert halfedge_mesh.norm([-1, 0, 0]) == 1.0
assert halfedge_mesh.norm([0, 0, -1]) == 1.0
assert halfedge_mesh.norm([0, -1, 0]) == 1.0
assert halfedge_mesh.norm([1, 0, 0]) == 1.0
assert halfedge_mesh.norm([0, 0, -1]) == 1.0
assert halfedge_mesh.norm([0, 1, 0]) == 1.0
assert halfedge_mesh.norm([-1, 0, 0]) == 1.0
assert halfedge_mesh.norm([0, 0, 1]) == 1.0
assert halfedge_mesh.norm([1, 1, 1]) == math.sqrt(3)
def test_internal_cross_product():
v_i = [1, 0, 0]
v_j = [0, 1, 0]
v_k = [0, 0, 1]
assert halfedge_mesh.cross_product(v_i, v_i) == [0, 0, 0]
assert halfedge_mesh.cross_product(v_i, v_j) == v_k
assert halfedge_mesh.cross_product(v_j, v_k) == v_i
assert halfedge_mesh.cross_product(v_k, v_i) == v_j
assert halfedge_mesh.cross_product(v_j, v_i) == map(lambda x: -x, v_k)
assert halfedge_mesh.cross_product(v_i, v_k) == map(lambda x: -x, v_j)
assert halfedge_mesh.cross_product(v_k, v_j) == map(lambda x: -x, v_i)
def test_allclose_list_int_float():
assert halfedge_mesh.allclose(1, 1)
assert halfedge_mesh.allclose(0, 0)
assert halfedge_mesh.allclose(-1, -1)
assert halfedge_mesh.allclose([1.34, 1.4, 5688.66], [1.34, 1.4, 5688.66])
assert halfedge_mesh.allclose(
[-1.34, -1.4, -5688.66], [-1.34, -1.4, -5688.66])
assert halfedge_mesh.allclose([1.33], [1.33])
assert halfedge_mesh.allclose(1.33, 1.33)
assert halfedge_mesh.allclose([1, 2, 3, 4], [1, 2, 3, 4])
assert halfedge_mesh.allclose([-1, -2, -3, -4], [-1, -2, -3, -4])
def test_dot():
assert halfedge_mesh.dot([1, 2, 3], [1, 2, 3]) == 14
assert halfedge_mesh.dot([-1, -2, -3], [1, 2, 3]) == -14
assert halfedge_mesh.dot([1, 2, 3], [-1, -2, -3]) == -14
assert halfedge_mesh.dot([0, 1, 0], [1, 0, 0]) == 0
assert halfedge_mesh.dot([0, -1, 0], [-1, 0, 0]) == 0
assert halfedge_mesh.dot([1, 0, 0], [0, 0, 1]) == 0
assert halfedge_mesh.dot([1], [2]) == 2
assert halfedge_mesh.dot([3, 4], [10, 8]) == 62
assert halfedge_mesh.allclose((halfedge_mesh.dot([1.23, 4.5, 0.0],
[1.3865, 4.56, 81.3865])),
22.225394999999999)
def test_make_iterable():
assert halfedge_mesh.make_iterable([1]) == [1]
assert halfedge_mesh.make_iterable([-1]) == [-1]
assert halfedge_mesh.make_iterable(2) == [2]
assert halfedge_mesh.make_iterable(-2) == [-2]
assert halfedge_mesh.make_iterable((3)) == [3]
assert halfedge_mesh.make_iterable((-3)) == [-3]
assert halfedge_mesh.make_iterable((3, 2, 1)) == (3, 2, 1)
assert halfedge_mesh.make_iterable((-3, -2, -1)) == (-3, -2, -1)
assert halfedge_mesh.make_iterable(1.2345) == [1.2345]
assert halfedge_mesh.make_iterable(-1.2345) == [-1.2345]
assert halfedge_mesh.make_iterable([1.234, 344.33]) == [1.234, 344.33]
def test_normalize_vectors():
assert halfedge_mesh.allclose(halfedge_mesh.normalize([1, 2, 3]),
[0.26726124, 0.53452248, 0.80178373])
assert halfedge_mesh.allclose(halfedge_mesh.normalize([3.43, 566.7, 9.6]),
[0.00605161, 0.99983824, 0.01693744])
assert halfedge_mesh.allclose(
halfedge_mesh.normalize([100000., 1222222., 30000000]),
[0.00333055, 0.04070674, 0.99916559])
assert halfedge_mesh.allclose(halfedge_mesh.normalize([0,0,0]), [0,0,0])
def test_create_vector():
p1 = [0,0,0]
p2 = [-1,-1,-1]
v = halfedge_mesh.create_vector(p1, p2)
assert halfedge_mesh.allclose(v, [-1,-1,-1])
p3 = [4,4,4]
v = halfedge_mesh.create_vector(p2, p3)
assert halfedge_mesh.allclose(v, [5,5,5])
| mit | -364,929,722,410,441,540 | 40.227586 | 120 | 0.571763 | false |
linickx/crassh | crassh.py | 1 | 20578 | #!/usr/bin/env python
# coding=utf-8
"""Python script to automate running commands on switches.
Cisco Remote Automation via Secure Shell... or C.R.A.SSH for short!
.. currentmodule:: crassh
.. moduleauthor:: Nick Bettison - www.linickx.com
"""
# Import libs
import getpass # Hide Password Entry
import socket # TCP/Network/Socket
import time # Time
import datetime # Date
import sys #
import getopt # Command line options
import os #
import stat # File system
import re # Regex
import paramiko # SSH
# I don't care about long line, deal with it ;)
# pylint: disable=C0301
# Global variables
crassh_version = "2.8" # Version Control in a Variable
remote_conn = "" # Paramiko Remote Connection
remote_conn_pre = "" # Paramiko Remote Connection Settings (pre-connect)
# Python 2 & 3 input compatibility
# pylint: disable=W0622
try:
input = raw_input
except NameError:
pass
"""
Functions
"""
def send_command(command="show ver", hostname="Switch", bail_timeout=60):
"""Sending commands to a switch, router, device, whatever!
Args:
command (str): The Command you wish to run on the device.
hostname (str): The hostname of the device (*expected in the* ``prompt``).
bail_timeout (int): How long to wait for ``command`` to finish before giving up.
Returns:
str. A text blob from the device, including line breaks.
REF: http://blog.timmattison.com/archives/2014/06/25/automating-cisco-switch-interactions/
"""
global remote_conn, remote_conn_pre
# Start with empty var & loop
output = ""
keeplooping = True
# Regex for either config or enable
regex = '^' + hostname[:20] + '(.*)(\ )?#'
theprompt = re.compile(regex)
# Time when the command started, prepare for timeout.
now = int(time.time())
timeout = now + bail_timeout
# Send the command
remote_conn.send(command + "\n")
# loop the output
while keeplooping:
# Setup bail timer
now = int(time.time())
if now == timeout:
print("\n Command %s took %s secs to run, bailing!" % (command, str(bail_timeout)))
output += "crassh bailed on command: " + command
keeplooping = False
break
# update receive buffer whilst waiting for the prompt to come back
if remote_conn.recv_ready():
output += remote_conn.recv(2048).decode('utf-8')
# Search the output for our prompt
theoutput = output.splitlines()
for lines in theoutput:
myregmatch = theprompt.search(lines)
if myregmatch:
keeplooping = False
return output
def do_no_harm(command):
"""Check Commands for dangerous things
Args:
command (str): The Command you wish to run on the device.
Returns:
Nothing
This function will ``sys.exit()`` if an *evil* command is found
>>> crassh.do_no_harm("show ver")
>>>
So, good commands just pass through with no response... maybe I should oneday make it a True/False kind of thing.
"""
# Innocent until proven guilty
harmful = False
# Regex match each "command"
if re.match("rel", command):
harmful = True
error = "reload"
if re.match("wr(.*)\ e", command):
harmful = True
error = "write erase"
if re.match("del", command):
harmful = True
error = "delete"
if harmful:
print("")
print("Harmful Command found - Aborting!")
print(" \"%s\" tripped the do no harm sensor => %s" % (command, error))
print("\n To force the use of dangerous things, use -X")
print_help()
# Simple help print and exit
def print_help(exitcode=0):
"""Prints the Help for the CLI tool
Args:
exit (int): Exit Code
Returns:
None
When called this function will ``sys.exit()``
"""
global crassh_version
print("\n Usage: %s -s switches.txt -c commands.txt -p -w -t 45 -e" % sys.argv[0])
print(" -s supply a text file of switch hostnames or IP addresses [optional]")
print(" -c supply a text file of commands to run on switches [optional]")
print(" -w write the output to a file [optional | Default: True]")
print(" -p print the output to the screen [optional | Default: False]")
print(" -pw is supported, print to both file & screen [optional]")
print(" -t set a command timeout in seconds [optional | Default: 60]")
print(" -T set a connection timeout in seconds [optional | Default: 10]")
print(" -X disable \"do no harm\" [optional]")
print(" -Q disable \"quit on failure\" [optional]")
print(" -e set an enable password [optional]")
print(" -d set a delay (in seconds) between commands [optional]")
print(" -A set an Authentication file for SSH credentials [optional]")
print(" -U set a Username for SSH Authentication [optional]")
print(" -P set a Password for SSH Authentication [optional]")
print(" -B set a BACKUP Username for SSH Authentication [optional]")
print(" -b set a BACKUP Password for SSH Authentication [optional]")
print(" -E set a BACKUP ENABLE Password [optional]")
print(" ")
print("Version: %s" % crassh_version)
print(" ")
sys.exit(exitcode)
def isgroupreadable(filepath):
"""Checks if a file is *Group* readable
Args:
filepath (str): Full path to file
Returns:
bool. True/False
Example:
>>> print(str(isgroupreadable("file.txt")))
True
REF: http://stackoverflow.com/questions/1861836/checking-file-permissions-in-linux-with-python
"""
st = os.stat(filepath)
return bool(st.st_mode & stat.S_IRGRP)
def isotherreadable(filepath):
"""Checks if a file is *Other* readable
Args:
filepath (str): Full path to file
Returns:
bool. True/False
Example:
>>> print(str(isotherreadable("file.txt")))
True
"""
st = os.stat(filepath)
return bool(st.st_mode & stat.S_IROTH)
def readtxtfile(filepath):
"""Read lines of a text file into an array
Each line is stripped of whitepace.
Args:
filepath (str): Full path to file
Returns:
array. Contents of file
Example:
>>> print(readtxtfile("./routers.txt"))
1.1.1.1
1.1.1.2
1.1.1.3
"""
# Check if file exists
if os.path.isfile(filepath) is False:
print("Cannot find %s" % filepath)
sys.exit()
# setup return array
txtarray = []
# open our file
f = open(filepath, 'r')
# Loop thru the array
for line in f:
# Append each line to array
txtarray.append(line.strip())
# Return results
return txtarray
# Read a Crassh Authentication File
def readauthfile(filepath):
"""Read C.R.A.SSH Authentication File
The file format is a simple, one entry per line, colon separated affair::
username: nick
password: cisco
Args:
filepath (str): Full path to file
Returns:
tuple. ``username`` and ``password``
Example:
>>> username, password = readauthfile("~/.crasshrc")
>>> print(username)
nick
>>> print(password)
cisco
"""
# Check if file exists
if os.path.isfile(filepath) is False:
print("Cannot find %s" % filepath)
sys.exit()
# Open file
f = open(filepath, 'r')
# Loop thru the array
for fline in f:
thisline = fline.strip().split(":")
if thisline[0].strip() == "username":
username = thisline[1].strip()
if thisline[0].strip() == "password":
if isgroupreadable(filepath):
print("** Password not read from %s - file is GROUP readable ** " % filepath)
else:
if isotherreadable(filepath):
print("** Password not read from %s - file is WORLD readable **"% filepath)
else:
password = thisline[1].strip()
return username, password
def connect(device="127.0.0.1", username="cisco", password="cisco", enable=False, enable_password="cisco", sysexit=False, timeout=10):
"""Connect and get Hostname of Cisco Device
This function wraps up ``paramiko`` and returns the hostname of the **Cisco** device.
The function creates two global variables ``remote_conn_pre`` and ``remote_conn`` which
are the paramiko objects for direct manipulation if necessary.
Args:
device (str): IP Address or Fully Qualifed Domain Name of Device
username (str): Username for SSH Authentication
password (str): Password for SSH Authentication
enable (bool): Is enable going to be needed?
enable_password (str): The enable password
sysexit (bool): Should the connecton exit the script on failure?
Returns:
str. The hostname of the device
Example:
>>> hostname = connect("10.10.10.10", "nick", "cisco")
>>> print(hostname)
r1
REF:
* https://pynet.twb-tech.com/blog/python/paramiko-ssh-part1.html
* http://yenonn.blogspot.co.uk/2013/10/python-in-action-paramiko-handling-ssh.html
"""
# Global variables - Paramiko Stuff.
global remote_conn_pre, remote_conn
hostname = False
"""
"""
# Create paramiko object
remote_conn_pre = paramiko.SSHClient()
# Change default paramiko object settings
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print("Connecting to %s ... " % device)
try:
remote_conn_pre.connect(
device, username=username, password=password, allow_agent=False, look_for_keys=False, timeout=timeout)
except paramiko.AuthenticationException as e:
print("Authentication Error: %s" % e)
if sysexit:
sys.exit()
return False
except paramiko.SSHException as e:
print("SSH Error: %s" % e)
if sysexit:
sys.exit()
return False
except socket.error as e:
print("Connection Failed: %s" % e)
if sysexit:
sys.exit()
return False
except:
print("Unexpected error:", sys.exc_info()[0])
if sysexit:
sys.exit()
return False
# Connected! (invoke_shell)
remote_conn = remote_conn_pre.invoke_shell()
# Flush buffer.
output = remote_conn.recv(1000).decode('utf-8')
del output
output = ""
# If we have enable password, send it.
if enable:
remote_conn.send("enable\n")
time.sleep(0.5)
remote_conn.send(enable_password + "\n")
# Disable <-- More --> on Output
remote_conn.sendall("terminal length 0\n")
time.sleep(0.5)
while "#" not in output:
# update receive buffer
if remote_conn.recv_ready():
output += remote_conn.recv(1024).decode('utf-8')
# Clear the Var.
del output
output = ""
# Ok, let's find the device hostname
remote_conn.sendall("show run | inc hostname \n")
time.sleep(0.5)
keeplooping = True
while keeplooping:
if remote_conn.recv_ready():
output += remote_conn.recv(1024).decode('utf-8')
for subline in output.splitlines():
if re.match("^hostname", subline):
#print("Match %s" % subline)
thisrow = subline.split()
try:
gotdata = thisrow[1]
if thisrow[0] == "hostname":
hostname = thisrow[1]
#prompt = hostname + "#"
except IndexError:
gotdata = 'null'
keeplooping = False
# Catch looping failures.
if hostname is False:
print("Hostname Lookup Failed: \n %s \n" % output)
if sysexit:
sys.exit()
# Found it! Return it!
return hostname
def disconnect():
"""Disconnect an SSH Session
Crassh wrapper for paramiko disconnect
No Argumanets, disconnects the current global variable ``remote_conn_pre``
"""
global remote_conn_pre
remote_conn_pre.close()
def main():
"""Main Code Block
This is the main script that Network Administrators will run.
No Argumanets. Input is used for missing CLI Switches.
"""
# import Global Vars
global input
# Main Vars (local scope)
switches = [] # Switches, devices, routers, whatever!
commands = []
filenames = []
sfile = '' # Switch File
cfile = '' # Command File
# Default variables (values)
play_safe = True
enable = False
delay_command = False
writeo = True
printo = False
bail_timeout = 60
connect_timeout = 10
sysexit = True
backup_credz = False
backup_enable = False
# Default Authentication File Path
crasshrc = os.path.expanduser("~") + "/.crasshrc"
# Get script options - http://www.cyberciti.biz/faq/python-command-line-arguments-argv-example/
try:
myopts, args = getopt.getopt(sys.argv[1:], "c:s:t:T:d:A:U:P:B:b:E:hpwXeQ")
except getopt.GetoptError as e:
print("\n ERROR: %s" % str(e))
print_help(2)
for o, a in myopts:
if o == '-s':
sfile = a
switches = readtxtfile(sfile)
if o == '-c':
cfile = a
commands = readtxtfile(cfile)
if o == '-t':
bail_timeout = int(a)
if o == '-T':
connect_timeout = int(a)
if o == '-h':
print("\n Nick\'s Cisco Remote Automation via Secure Shell- Script, or C.R.A.SSH for short!")
print_help()
if o == '-p':
writeo = False
printo = True
if o == '-w':
writeo = True
if o == '-X':
play_safe = False
if o == '-Q':
sysexit = False
if o == '-e':
enable = True
if o == '-d':
delay_command = True
delay_command_time = int(a)
if o == '-A':
crasshrc = str(a)
if o == '-U':
username = str(a)
if o == '-P':
password = str(a)
if o == '-B':
backup_credz = True
backup_username = str(a)
if o == '-b':
backup_credz = True
backup_password = str(a)
if o == '-E':
backup_enable = True
backup_enable_password = str(a)
# See if we have an Authentication File
if os.path.isfile(crasshrc) is True:
try:
username, password = readauthfile(crasshrc)
except:
pass
# Do we have any switches?
if sfile == "":
try:
iswitch = input("Enter the switch to connect to: ")
switches.append(iswitch)
except:
sys.exit()
# Do we have any commands?
if cfile == "":
try:
icommand = input("The switch command you want to run: ")
commands.append(icommand)
except:
sys.exit()
"""
Check the commands are safe
"""
if play_safe:
for command in commands:
do_no_harm(command)
else:
print("\n--\n Do no Harm checking DISABLED! \n--\n")
"""
Capture Switch log in credentials...
"""
try:
username
except:
try:
username = input("Enter your username: ")
except:
sys.exit()
try:
password
except:
try:
password = getpass.getpass("Enter your password:")
except:
sys.exit()
if enable:
try:
enable_password = getpass.getpass("Enable password:")
except:
sys.exit()
if backup_credz:
try:
backup_password
except:
try:
backup_password = getpass.getpass("Enter your backup SSH password:")
except:
sys.exit()
"""
Time estimations for those delaying commands
"""
if delay_command:
time_estimate = datetime.timedelta(0, (len(commands) * (len(switches) * 2) * delay_command_time)) + datetime.datetime.now()
print(" Start Time: %s" % datetime.datetime.now().strftime("%H:%M:%S (%y-%m-%d)"))
print(" Estimatated Completion Time: %s" % time_estimate.strftime("%H:%M:%S (%y-%m-%d)"))
"""
Progress calculations - for big jobs only
"""
if (len(commands) * len(switches)) > 100:
counter = 0
"""
Ready to loop thru switches
"""
for switch in switches:
if backup_credz:
tmp_sysexit = sysexit # re-assign so, don't bail on authentication failure
sysexit = False
if enable:
hostname = connect(switch, username, password, enable, enable_password, sysexit, connect_timeout)
else:
hostname = connect(switch, username, password, False, "", sysexit, connect_timeout)
if isinstance(hostname, bool): # Connection failed, function returned False
if backup_credz:
sysexit = tmp_sysexit # put it back, so fail or not (-Q) works as expected on backup credz
print("Trying backup credentials")
if backup_enable:
hostname = connect(switch, backup_username, backup_password, enable, backup_enable_password, sysexit, connect_timeout)
else:
hostname = connect(switch, backup_username, backup_password, False, "", sysexit, connect_timeout)
if isinstance(hostname, bool): # Connection failed, function returned False
continue
else:
continue
# Write the output to a file (optional) - prepare file + filename before CMD loop
if writeo:
filetime = datetime.datetime.now().strftime("%y%m%d-%H%M%S")
filename = hostname + "-" + filetime + ".txt"
filenames.append(filename)
f = open(filename, 'a')
# Command Loop
for cmd in commands:
# Send the Command
print("%s: Running: %s" % (hostname, cmd))
output = send_command(cmd, hostname, bail_timeout)
# Print the output (optional)
if printo:
print(output)
if writeo:
f.write(output)
# delay next command (optional)
if delay_command:
time.sleep(delay_command_time)
# Print progress
try:
counter
# Random calculation to find 10 percent
if (counter % 10) == 0:
completion = ((float(counter) / (float(len(commands)) * float(len(switches)))) * 100)
if int(completion) > 9:
print("\n %s%% Complete" % int(completion))
if delay_command:
time_left = datetime.timedelta(0, (((int(len(commands)) * int(len(switches))) + (len(switches) * 0.5)) - counter)) + datetime.datetime.now()
print(" Estimatated Completion Time: %s" % time_left.strftime("%H:%M:%S (%y-%m-%d)"))
print(" ")
counter += 1
except:
pass
# /end Command Loop
if writeo:
# Close the File
f.close()
# Disconnect from SSH
disconnect()
if writeo:
print("Switch %s done, output: %s" % (switch, filename))
else:
print("Switch %s done" % switch)
# Sleep between SSH connections
time.sleep(1)
print("\n") # Random line break
print(" ********************************** ")
if writeo:
print(" Output files: ")
for ofile in filenames:
print(" - %s" % ofile)
print(" ---------------------------------- ")
print(" Script FINISHED ! ")
if delay_command:
print(" Finish Time: %s" % datetime.datetime.now().strftime("%H:%M:%S (%y-%m-%d)"))
print(" ********************************** ")
# If run from interpreter, run main code function.
if __name__ == "__main__":
main()
| gpl-2.0 | 1,161,227,159,513,577,200 | 27.337466 | 168 | 0.556555 | false |
localprojects/Change-By-Us | tests/unittests/controller_tests.py | 4 | 1275 | """
:copyright: (c) 2011 Local Projects, all rights reserved
:license: Affero GNU GPL v3, see LICENSE for more details.
"""
from unittest2 import TestCase
from paste.fixture import TestApp
from lib import web
import main
from framework.controller import Controller
class I18nTests (TestCase):
def setUp(self):
web.ctx.method = 'GET'
web.ctx.path = '/'
#
# # Set the debug flag to true, despite what is in the config file
# web.config.debug = False
# web.config.session_parameters['cookie_name'] = 'gam'
#
# # Set up the routes
# app = web.application(main.ROUTES, globals())
#
# # Grab a database connection
# db = main.sessionDB()
#
# # Initialize the session holder (I don't know what that is yet)
# #main.SessionHolder.set(web.session.Session(app, web.session.DBStore(db, 'web_session')))
#
# # Finally, create a test app
# self.app = TestApp(app.wsgifunc())
#
#
def test_SupportedLanguagesIsAsExpected(self):
# Yeah, I'm hitting the file system. Deal with it.
controller = Controller()
langs = controller.get_supported_languages()
self.assertEqual(langs, {'en_TEST':'L33t'})
| agpl-3.0 | -8,502,794,825,892,325,000 | 29.357143 | 98 | 0.618039 | false |
AnyBody-Research-Group/AnyPyTools | tests/test_hdf5.py | 1 | 1551 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 10 13:38:15 2015
@author: kpr
"""
import os
import shutil
import pytest
from anypytools import AnyPyProcess
demo_model_path = os.path.join(os.path.dirname(__file__), "Demo.Arm2D.any")
def setup_simple_model(tmpdir):
shutil.copyfile(demo_model_path, str(tmpdir.join("model.main.any")))
@pytest.yield_fixture()
def init_simple_model(tmpdir):
setup_simple_model(tmpdir)
with tmpdir.as_cwd():
yield tmpdir
@pytest.fixture()
def default_macro():
macro = [
[
'load "model.main.any"',
"operation Main.ArmModelStudy.InverseDynamics",
"run",
'classoperation Main.ArmModelStudy.Output.MaxMuscleActivity "Dump"',
'classoperation Main.ArmModel.GlobalRef.t "Dump"',
]
] * 5
return macro
def test_hdf5save(tmpdir, default_macro):
setup_simple_model(tmpdir)
tmpdir.chdir()
app = AnyPyProcess(keep_logfiles=True)
app.start_macro(default_macro)
app.save_to_hdf5("test.hdf5", "LungeTLEM1")
assert os.path.exists("test.hdf5")
if __name__ == "__main__":
macro = [
[
'load "Demo.Arm2D.any"',
"operation Main.ArmModelStudy.InverseDynamics",
"run",
'classoperation Main.ArmModelStudy.Output.MaxMuscleActivity "Dump"',
'classoperation Main.ArmModel.GlobalRef.t "Dump"',
]
] * 5
app = AnyPyProcess(keep_logfiles=True)
app.start_macro(macro)
app.save_to_hdf5("test2.hdf5", "Lunge11dec")
| mit | -2,937,127,854,847,000,600 | 23.234375 | 80 | 0.622824 | false |
3dconv/keras | keras/utils/test_utils.py | 85 | 1091 | import numpy as np
def get_test_data(nb_train=1000, nb_test=500, input_shape=(10,), output_shape=(2,),
classification=True, nb_class=2):
'''
classification=True overrides output_shape
(i.e. output_shape is set to (1,)) and the output
consists in integers in [0, nb_class-1].
Otherwise: float output with shape output_shape.
'''
nb_sample = nb_train + nb_test
if classification:
y = np.random.randint(0, nb_class, size=(nb_sample, 1))
X = np.zeros((nb_sample,) + input_shape)
for i in range(nb_sample):
X[i] = np.random.normal(loc=y[i], scale=1.0, size=input_shape)
else:
y_loc = np.random.random((nb_sample,))
X = np.zeros((nb_sample,) + input_shape)
y = np.zeros((nb_sample,) + output_shape)
for i in range(nb_sample):
X[i] = np.random.normal(loc=y_loc[i], scale=1.0, size=input_shape)
y[i] = np.random.normal(loc=y_loc[i], scale=1.0, size=output_shape)
return (X[:nb_train], y[:nb_train]), (X[nb_train:], y[nb_train:])
| mit | 8,835,985,632,777,276,000 | 39.407407 | 83 | 0.578368 | false |
DeBortoliWines/Bika-LIMS | bika/lims/upgrade/to3013.py | 2 | 1067 | from Acquisition import aq_inner
from Acquisition import aq_parent
from bika.lims.permissions import *
from Products.Archetypes.BaseContent import BaseContent
from bika.lims.upgrade import stub
def upgrade(tool):
portal = aq_parent(aq_inner(tool))
setup = portal.portal_setup
setup.runImportStepFromProfile('profile-bika.lims:default', 'typeinfo')
stub('bika.lims.content.pricelistlineitem', 'PricelistLineItem',
BaseContent)
for pl in portal['pricelists'].objectValues():
pl.pricelist_lineitems = []
for pli in pl.objectValues():
item = dict(
title=pli.title,
ItemDescription=pli.ItemDescription,
Accredited=pli.Accredited,
Subtotal="%d.%d" % (pli.Subtotal[0], pli.Subtotal[1]),
VATAmount="%d.%d" % (pli.VATAmount[0], pli.VATAmount[1]),
Total="%d.%d" % (pli.Total[0], pli.Total[1]),
CategoryTitle=pli.CategoryTitle,
)
pl.pricelist_lineitems.append(item)
return True
| agpl-3.0 | -3,494,125,199,020,480,000 | 35.793103 | 75 | 0.626054 | false |
pchaigno/grreat | parsers/binplist.py | 7 | 30023 | #!/usr/bin/env python
"""Parser for Apple's binary plist (Property List) files.
Based on the specification at:
http://opensource.apple.com/source/CF/CF-550/CFBinaryPList.c
To parse a plist from a file you can use the readPlist(fileOrObj) function
with a file name or a file-like object. It returns the top level object,
which will usually be a dictionary.
Forensic use
If you are using this module, though, chances are you are a forensicator and
you want to be able to get all the information on the plist even when they
are corrupt. For this use case, it's better to create an instance of the
BinaryPlist class and then call the Parse() method on it, with a file-like
object as an argument.
with open("myfile.plist") as fd:
bplist = BinaryPlist(fd)
top_level_object = bplist.Parse(fd)
The Parse method returns the top level object, just as readPlist.
Once parsed, you can check BinaryPlist.is_corrupt to find out whether the plist
had corrupt data. You will find a dictionary with all objects within the bplist
at BinaryPlist.objects. The dictionary keys are the indexes of each object, and
the values are its parsed contents.
So if a binary plist contains this dictionary:
{"a": True}
The binary plist will have 3 objects. A dictionary, a string "a" and a True
value. BinaryPlist.objects will contain 3 entries as well, one for each object.
You have a mapping of object index to offset at BinaryPlist.object_offsets.
And the top_level_element index is available ad BinaryPlist.top_level_index.
Happy bplisting!
"""
import cStringIO
import datetime
import logging
import math
import os
import plistlib
import pprint
import struct
import sys
import xml.parsers.expat
import pytz
class NullValue(object):
"""Identifies the Null object."""
class RawValue(object):
"""Used when objects are corrupt and no sensible value can be extracted."""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return repr(self.value)
def __eq__(self, other):
return self.value == other
class CorruptReference(object):
"""Marks references to objects that are corrupt."""
class UnknownObject(object):
"""Marks objects that we don't know how to parse."""
class Error(Exception):
"""Base exception."""
class FormatError(Error):
"""Error while parsing the bplist format."""
BIG_ENDIAN = 0
LITTLE_ENDIAN = 1
class BinaryPlist(object):
"""Represents a binary plist."""
# Maps markers to human readable object types and their parsing function
KNOWN_MARKERS = {
0x0: ("BOOLFILL", "_ParseBoolFill"),
0x1: ("INT", "_ParseInt"),
0x2: ("REAL", "_ParseReal"),
0x3: ("DATE", "_ParseDate"),
0x4: ("DATA", "_ParseData"),
0x5: ("STRING", "_ParseString"),
0x6: ("UTF16", "_ParseUtf16"),
0x8: ("UID", "_ParseUid"),
0xA: ("ARRAY", "_ParseArray"),
0xC: ("SET", "_ParseSet"),
0xD: ("DICT", "_ParseDict"),
}
# Several data structures in binary plists explicitly define the size of an
# integer that's stored next to the declared size.
# We maintain a mapping of byte sizes to python struct format characters to
# use them in these cases.
bytesize_to_uchar = {1: "B", 2: "H", 4: "L", 8: "Q"}
# Timestamps in binary plists are relative to 2001-01-01T00:00:00.000000Z
plist_epoch = datetime.datetime(2001, 1, 1, 0, 0, 0, tzinfo=pytz.utc)
def __init__(self, file_obj=None, discovery_mode=False):
"""Constructor.
Args:
file_obj: File-like object to read from.
discovery_mode: When activated, it will inform the user when one of the
uncommon objects has been found. It's expected to be used while we finish
validating the parser against real binary plists. Disabled by default.
"""
self.discovery_mode = discovery_mode
self._Initialize()
self.fd = None
if file_obj:
self.Open(file_obj)
def _Initialize(self):
"""Resets all the parsing state information."""
# A list of all the offsets. You can find an object's offset by indexing
# this list with the object index
self.object_offsets = []
# A dictionary of object indexes and parsed object values
self.objects = {}
self.is_corrupt = False
# Header attributes
self.version = ""
# Trailer attributes
self.sort_version = 0
self.offset_int_size = 0
self.object_ref_size = 0
self.object_count = 0
self.top_level_index = None
self.offtable_offset = 0
# List of traversed object indexes to detect circular references
self.objects_traversed = set()
@property
def top_level_object(self):
if self.top_level_index is None:
return None
return self._ParseObjectByIndex(self.top_level_index, self.object_offsets)
def Open(self, file_obj):
data = file_obj.read()
self.fd = cStringIO.StringIO(data)
self._file_size = len(data)
logging.debug("File size is: %d.", self._file_size)
def Parse(self):
"""Parses the file descriptor at file_obj."""
self._Initialize()
if not self.fd:
raise IOError("No data available to parse. Did you call Open() ?")
# Each of these functions will raise if an unrecoverable error is found
self._ReadHeader()
self._ReadTrailer()
self._ReadOffsetTable()
self._ParseObjects()
return self._ParseObjectByIndex(self.top_level_index, self.object_offsets)
def Close(self):
self.fd = None
def _ReadHeader(self):
"""Parses the bplist header.
The binary plist header contains the following structure:
typedef struct {
uint8_t _magic[6];
uint8_t _version[2];
} CFBinaryPlistHeader;
Raises:
FormatError: When the header is too short or the magic value is invalid.
"""
header_struct = struct.Struct(">6s2s")
self.fd.seek(0)
data = self.fd.read(header_struct.size)
if len(data) != header_struct.size:
raise FormatError("Wrong header length (got %d, expected %ld)." %
(len(data), header_struct.size))
magic, self.version = header_struct.unpack(data)
if magic != "bplist":
raise FormatError("Wrong magic '%s', expecting 'bplist'." % magic)
logging.debug("MAGIC = %s", magic)
logging.debug("VERSION = %s", self.version)
if self.version[0] != "0":
logging.warn("Unknown version. Proceeding anyway...")
def _ReadTrailer(self):
"""Parses the trailer.
The binary plist trailer consists of the following structure at the end of
the file.
typedef struct {
uint8_t _unused[5];
uint8_t _sortVersion;
uint8_t _offsetIntSize;
uint8_t _objectRefSize;
uint64_t _numObjects;
uint64_t _topObject;
uint64_t _offsetTableOffset;
} CFBinaryPlistTrailer;
Raises:
IOError: When there is not enough data for the trailer.
"""
trailer_struct = struct.Struct(">5xBBBQQQ") # YUMMY!
trailer_size = trailer_struct.size
self.fd.seek(-trailer_size, os.SEEK_END)
data = self.fd.read(trailer_size)
if len(data) != trailer_size:
raise IOError("Wrong trailer length (got %d, expected %ld." %
(len(data), trailer_struct.size))
(self.sort_version,
self.offset_int_size,
self.object_ref_size,
self.object_count,
self.top_level_index,
self.offtable_offset) = trailer_struct.unpack(data)
logging.debug("Sort: %d", self.sort_version)
logging.debug("int size: %d", self.offset_int_size)
logging.debug("ref size: %d", self.object_ref_size)
logging.debug("obects available: %d", self.object_count)
logging.debug("top object: %d", self.top_level_index)
logging.debug("Offset table: %d", self.offtable_offset)
def _ReadOffsetTable(self):
"""Parses the bplist offset table.
The offset table is a list of integers of size between 1 and 8 bytes. Each
points to the file offset where the relevant object is located.
The integer size comes from the trailer.
Raises:
FormatError: When the offset to the offset table is invalid or the
offset table overflows the file contents.
"""
self.object_offsets = []
if self.offtable_offset >= self._file_size:
raise FormatError("Offset table offset past the file end.")
self.fd.seek(self.offtable_offset)
# Offsets table declared length
data_size = self.object_count * self.offset_int_size
# SANITY CHECK: See if the offset table is contained in the file
if data_size > (self._file_size - self.offtable_offset):
raise FormatError("Length of offsets table larger than the data available"
"in the file (%d vs %ld)." %
(data_size, self._file_size))
for object_index in range(self.object_count):
# We can have offsets of sizes 1 to 8 bytes so we can't just use struct
offset = self._ReadArbitraryLengthInteger(self.offset_int_size)
logging.debug("Object %d offset = %ld.", object_index, offset)
self.object_offsets.append(offset)
def _ReadArbitraryLengthInteger(self, length=0, endianness=BIG_ENDIAN):
"""Returns an integer from self.fd of the given length and endianness."""
logging.debug("read arbitrary length integer length %d", length)
data = self.fd.read(length)
if len(data) < length:
length = len(data)
logging.debug("Not enough data, reading %d instead.", length)
integer = 0
if endianness is BIG_ENDIAN:
for data_index in range(0, length, 1):
integer <<= 8
integer |= ord(data[data_index])
elif endianness is LITTLE_ENDIAN:
for data_index in range(length-1, -1, -1):
integer <<= 8
integer |= ord(data[data_index])
else:
raise ValueError("Unknown endianness requested: %d" % endianness)
return integer
def _ParseObjects(self):
"""Parses the objects at file offsets contained in object_offsets."""
self.objects = {}
for object_index, offset in enumerate(self.object_offsets):
logging.debug(">>> PARSING OBJECT %d AT OFFSET %ld",
object_index, offset)
self._ParseObjectByIndex(object_index, self.object_offsets)
def _ParseObjectByIndex(self, index, offset_list):
"""Returns an object by its index.
If the object has already been parsed, it's not parsed again, but served
from the cached version instead.
Args:
index: The 0-based index of the object in the offset_list.
offset_list: A list of offsets for each of the available objects in the
binary plist file.
Returns:
The object.
Raises:
IndexError: If the index is invalid.
"""
# Add the object to the list of traversed objects
self.objects_traversed.add(index)
offset = offset_list[index]
logging.debug("Getting object at index %d", index)
try:
obj = self.objects[index]
logging.debug("CACHE HIT")
except KeyError:
logging.debug("CACHE MISS")
if offset > self._file_size:
# This only happens when the offset in the offset table is wrong
obj = CorruptReference
else:
self.fd.seek(offset)
obj = self._ParseObject()
self.objects[index] = obj
finally:
# Remove the index from the list of traversed objects
self.objects_traversed.remove(index)
logging.debug("Object %d = %s", index, obj)
return obj
def _ParseObject(self):
"""Parses the binary plist object available in self.fd.
Every object in a plist starts with a marker. The markers is a single byte
where the high nibble indicates the type. The lower nibble meaning depends
on the object type.
Returns:
A python object representing the plist object.
Raises:
IOError: When there's not enough data in self.fd to read a new object.
"""
logging.debug("At offset %d", self.fd.tell())
marker_string = self.fd.read(1)
if len(marker_string) < 1:
raise IOError("Not enough data available to read a new object.")
marker = ord(marker_string[0])
logging.debug(">> MARKER: 0x%02lx", marker)
marker_lo = (marker & 0x0F)
marker_hi = (marker & 0xF0) >> 4
logging.debug(">> MARKER HI: %lx", marker_hi)
logging.debug(">> MARKER LO: %lx", marker_lo)
try:
(marker_name, parsing_function_name) = self.KNOWN_MARKERS[marker_hi]
logging.debug("DATA TYPE: %s", marker_name)
return getattr(self, parsing_function_name)(marker_lo)
except KeyError:
logging.warn("UNKNOWN MARKER %lx", marker)
return UnknownObject
def _ParseBoolFill(self, marker_lo):
"""Parses a null, boolean or fill object.
Args:
marker_lo: The lower nibble of the marker.
Returns:
NullValue, True, False or None when it's a fill byte. If the object type
is unknown, UnknownObject is returned instead.
"""
ret_values = {0x00: NullValue, 0x08: False, 0x09: True, 0xF: None}
# SANITY CHECK: No values outside these are known
try:
return ret_values[marker_lo]
except KeyError:
logging.warn("Simple value type %d unknown.", marker_lo)
return UnknownObject
def _ParseInt(self, marker_lo):
"""Parses an integer object.
Args:
marker_lo: The lower nibble of the marker.
Returns:
The integer value or RawValue when it's corrupt.
"""
int_bytes = 1 << marker_lo
logging.debug("Integer size %d", int_bytes)
# SANITY CHECK: The only allowed integer lengths by OSX seem to be 1, 2, 4,
# 8 or 16 bytes.
# XXX: Revisit this and decide if we should instead accept any length.
if marker_lo not in [0, 1, 2, 3, 4]:
logging.warn("Non-standard integer length (%d).", marker_lo)
data = self.fd.read(int_bytes)
return RawValue(data)
if int_bytes == 8 and self.version == "00":
# 8-byte integers in version 00 are always signed
logging.debug("Signed integer")
int_struct = struct.Struct(">q")
elif int_bytes == 16:
if self.version == "00":
# 16-bytes signed integer
logging.debug("Signed integer")
int_struct = struct.Struct(">qq")
else:
# 16-bytes unsigned integer? That's what the documentation seems to hint
# Sadly, I haven't been able to reproduce this yet as neither plutil nor
# XCode allow me to give integers bigger than the maximum representable
# 8-byte integer.
int_struct = struct.Struct(">QQ")
data = self.fd.read(int_struct.size)
(high, low) = int_struct.unpack(data)
logging.debug("High 8byte: %lx", high)
logging.debug("Low 8byte: %lx", low)
return (high << 64) | low
else:
# All other sizes are unsigned
int_struct = struct.Struct(">%c" % self.bytesize_to_uchar[int_bytes])
data = self.fd.read(int_struct.size)
if len(data) < int_struct.size:
return RawValue(data)
logging.debug("Raw integer: %r", data)
(value,) = int_struct.unpack(data)
return value
def _ParseReal(self, marker_lo):
"""Parses a real object.
Reals are stored as a 4byte float or 8byte double per IEE754's format.
The on-disk length is given by marker_lo.
Args:
marker_lo: The lower nibble of the marker.
Returns:
A float or double object representing the object.
"""
logging.debug("Real size %d", marker_lo)
# SANITY CHECK: Real size must be 4 or 8 bytes on disk
if marker_lo not in [2, 3]:
real_length = 1 << marker_lo
logging.warn("Non-standard real number length (%d).", real_length)
data = self.fd.read(real_length)
return RawValue(data)
if marker_lo == 2:
# Read an IEE754 float
float_struct = struct.Struct(">f")
else:
# Read an IEE754 double precision float
float_struct = struct.Struct(">d")
data = self.fd.read(float_struct.size)
(value,) = float_struct.unpack(data)
return value
def _ParseDate(self, marker_lo):
"""Parses a date object.
Dates are stored as a double precision floating point representing the
seconds since 2001-01-01T01:00:00Z. Interesting enough, negative dates
not only are allowed, but used by the system to represent earlier dates.
Again, the binary format differs from the XML format in that OSX doesn't
seem to allow for microsecond precision in the XML, while the binary
format does.
The following code handles these cases gracefully.
Args:
marker_lo: The lower nibble of the marker.
Returns:
A datetime object representing the stored date or RawValue if the
datetime data was corrupt.
"""
# SANITY CHECK: OSX only writes 8 byte dates. We just warn if the size
# is wrong, but will read and decode 8 bytes anyway hoping only the marker
# was corrupt.
if marker_lo != 3:
logging.warn("Non-standard (8) date length (%d).", 1 << marker_lo)
self.is_corrupt = True
# Read an IEE754 double precision float
date_struct = struct.Struct(">d")
data = self.fd.read(date_struct.size)
if len(data) < date_struct.size:
return RawValue(data)
logging.debug("Raw date: %r.", data)
(float_date,) = date_struct.unpack(data)
logging.debug("Date decoded as: %s.", float_date)
fraction, integer = math.modf(float_date)
try:
date_offset = datetime.timedelta(seconds=int(integer),
microseconds=int(fraction*1000000))
except OverflowError:
return RawValue(data)
return self.plist_epoch + date_offset
def _ParseData(self, marker_lo):
"""Parses a data object.
Data objects are stored as plain byte dumps. As in python 2.7 there's no
distiction between bytes and strings, the same function is used.
Args:
marker_lo: The lower nibble of the marker.
Returns:
A byte string containing the data.
"""
return self._ParseString(marker_lo)
def _ParseString(self, marker_lo, char_size=1):
"""Parses a binary object stored like a string/unicode/data object.
These objects are stored as a bunch of bytes representing the object, the
length of which is defined by the lower nibble of the marker. If the length
is 0xF, however, the first data byte marks the power of 2 bytes used to
store the length of the string.
So the following object is a string of more than 0xE bytes (\x5F), the
length of which is contained in 2^0 (\x00) byte, which is 15 (\x0F).
"\x5F\x00\x0FThis is a string"
Args:
marker_lo: The lower nibble of the marker.
char_size: The amount of bytes to read per each declared length unit.
Returns:
A byte string with the data contained in the object.
"""
strlen = self._GetSizedIntFromFd(marker_lo)
logging.debug("String of size %d", strlen)
return self.fd.read(strlen*char_size)
def _ReadStructFromFd(self, file_obj, structure):
"""Reads the given structucture from file_obj and returns the unpacked data.
Raises:
IOError: When there wasn't enough data in file_obj to acommodate the
requested structure.
Args:
file_obj: A file_like object.
structure: An instance of struct.Struct to read from file_obj.
Returns:
The unpacked structure elements.
"""
logging.debug(">>> Reading %d bytes", structure.size)
data = file_obj.read(structure.size)
if len(data) < structure.size:
raise IOError
return structure.unpack(data)
def _GetSizedIntFromFd(self, marker_lo):
"""Reads a sized integer from self.fd.
Apple tries to use the minimum amount of storage to serialize its data
types. To this end, several object types specify their length through sized
integers. Objects of variable size store its size in the lowest nibble of
their marker (this is expected to be passed as marker_lo). When the length
of an object exceeds 14 (0xE), the lowest nibble of the marker is set to
0xF. Then, the next byte indicates how many bytes the length of the object
occupies. This length bytes must be read and interpreted as an integer to
decode how many elements long the object is.
Example: A dictionary with 20 elements is hex encoded like this:
DF 01 14 [...]
0xDF is the marker and it means it's a dictionary (0xD0) with more than 14
elements (0x0F). That the number of elements can be expressed as a single
byte (0x01). And that the number of elements is 20 (0x14).
Args:
marker_lo: The lower nibble of the marker.
Returns:
The integer value of the sized integer at self.fd.
"""
if marker_lo == 0xF:
logging.debug("marker_lo is 0xF, fetching real size")
# First comes the byte count
size_len_struct = struct.Struct(">B")
(size,) = self._ReadStructFromFd(self.fd, size_len_struct)
size_byte_count = 1 << (size & 0xF)
try:
struct_char = self.bytesize_to_uchar[size_byte_count]
except KeyError:
# TODO(user): Improve this, this is awful
# CORRUPTION
# If the value is not there, we'll default to 2
logging.warn("unknown size found %d, defaulting to 2", size_byte_count)
struct_char = self.bytesize_to_uchar.get(2)
self.is_corrupt = True
strlen_struct = struct.Struct(">%c" % struct_char)
(strlen,) = self._ReadStructFromFd(self.fd, strlen_struct)
return strlen
logging.debug("Found size %s", marker_lo)
return marker_lo
def _ParseUtf16(self, marker_lo):
"""Parses a Unicode object.
Unicode objects are stored with the same format as strings, only that the
specified string length doesn't match the stored length, which has to be
multiplied by 2 due to how characters are encoded.
Args:
marker_lo: The lower nibble of the marker.
Returns:
A unicode string that contains the unicode object data or RawValue
if the data was not a valid UTF_16 string. Note that the RawValue
returned will not have a unicode string but raw bytes.
"""
utf16 = self._ParseString(marker_lo, char_size=2)
logging.debug("RAW UTF16 = %s...", utf16[:min(len(utf16), 10)])
try:
return utf16.decode("utf-16-be")
except UnicodeDecodeError:
return RawValue(utf16)
def _ParseUid(self, marker_lo):
"""Parses a UID object.
UID objects are a rare breed. They do not seem to have an XML
representation and only appear on Keyed archives. OSX only seems to
write them as 1, 2, 4 or 8 bytes long. However, they are defined as
32 bits long on OSX, so they'll hardly be written as 64 bits :/
The low part of the marker is the UID length - 1.
Args:
marker_lo: The lower nibble of the marker.
Returns:
An integer representing the UID.
See:
http://developer.apple.com/library/mac/#documentation/
cocoa/Reference/Foundation/Classes/NSKeyedArchiver_Class/Reference/
Reference.html#//apple_ref/occ/cl/NSKeyedArchiver
"""
# SANITY CHECK: Size
uid_size = marker_lo + 1
if uid_size not in [1, 2, 4, 8]:
logging.warn("Uncommon UID size %d (expected 1, 2, 4 or 8)", uid_size)
self._LogDiscovery("FOUND A UID!")
return self._ReadArbitraryLengthInteger(uid_size)
def _ParseArray(self, marker_lo):
"""Parses an array object.
Arrays are stored on disk as list of references. This function obtains the
references and resolves the objects inside the array, thus presenting a
fully resolved array. Corrupt references are replaced by CorruptReference
objects.
Note that calling this function directly might not protect you against
circular references. Call _ParseObject instead.
Args:
marker_lo: The lower nibble of the marker, indicating the number of
references in the array
Returns:
A list of objects.
"""
array = []
arraylen = self._GetSizedIntFromFd(marker_lo)
references = self._GetObjectReferences(arraylen)
logging.debug(references)
for reference in references:
# We need to avoid circular references...
if reference is CorruptReference:
array.append(CorruptReference)
continue
elif reference in self.objects_traversed:
logging.warn("Circular reference detected at array object.")
self.is_corrupt = True
array.append(CorruptReference)
continue
elif reference >= self.object_count:
logging.warn("Reference %d out of bounds, skipping...", reference)
self.is_corrupt = True
array.append(CorruptReference)
continue
array.append(self._ParseObjectByIndex(reference, self.object_offsets))
logging.debug(array)
return array
def _GetObjectReferences(self, length):
"""Obtains a list of references from the file descriptor fd.
Objects that use object references are dicts, arrays and sets.
An object reference is the index of the object in the offset table.
Args:
length: The amount of object references.
Returns:
A list of references.
"""
references = []
logging.debug("object_ref_size is %d", self.object_ref_size)
struct_char = self.bytesize_to_uchar[self.object_ref_size]
objref_struct = struct.Struct(">%c" % struct_char)
for _ in range(length):
try:
(ref,) = self._ReadStructFromFd(self.fd, objref_struct)
except IOError:
ref = CorruptReference
references.append(ref)
return references
def _ParseSet(self, marker_lo):
"""Parses a set object.
Sets are unordered arrays. They look exactly the same on disk as arrays.
Args:
marker_lo: The lower nibble of the marker.
Returns:
A list representing the stored set object at self.fd.
"""
self._LogDiscovery("FOUND A SET!!!")
return self._ParseArray(marker_lo)
def _ParseDict(self, marker_lo):
"""Parses a dict object.
Dictionaries are stored as a list of key-value pairs. These are pairs of
references. The amount of entries in a dictionary is determined by
marker_lo. If marker_lo is 0xf, then a sized int is used instead.
The list of references contains first all the keys and then all the values.
Note that calling this function directly might not protect you against
circular references. Call _ParseObject instead.
Args:
marker_lo: The lower nibble of the marker.
Returns:
A dictionary representing the stored dictionary object at self.fd.
"""
the_dict = {}
dictlen = self._GetSizedIntFromFd(marker_lo)
logging.debug("Fetching key references.")
keys = self._GetObjectReferences(dictlen)
logging.debug("Fetching value references.")
values = self._GetObjectReferences(dictlen)
logging.debug(zip(keys, values))
for k_ref, v_ref in zip(keys, values):
if k_ref in self.objects_traversed or k_ref >= self.object_count:
# Circular reference at the key or key pointing to a nonexisting object
logging.warn("Circular reference key or invalid object key.")
key = "corrupt:%d" % k_ref
self.is_corrupt = True
else:
key = self._ParseObjectByIndex(k_ref, self.object_offsets)
if v_ref in self.objects_traversed or v_ref >= self.object_count:
# Circular reference at value or value pointing to a nonexisting object
logging.warn("Circular reference value or invalid object value.")
value = CorruptReference
self.is_corrupt = True
else:
value = self._ParseObjectByIndex(v_ref, self.object_offsets)
try:
the_dict[key] = value
except TypeError:
# key is not hashable, so we adjust...
logging.debug("Key %s not hashable... marking as corrupt.", k_ref)
the_dict["corrupt:%d" % k_ref] = value
self.is_corrupt = True
return the_dict
def _LogDiscovery(self, msg, *args, **kwargs):
"""Informs the user that something that requires research was found."""
if self.discovery_mode:
logging.info("DISCOVERY FOUND: %s\nPlease inform %s.",
msg, __author__, *args, **kwargs)
# Named readPlist so that binplist resembles the plistlib standard python module
# pylint: disable=g-bad-name
def readPlist(pathOrFile):
"""Returns the top level object of the plist at pathOrFile.
Args:
pathOrFile: A path or a file-like object to the plist.
Returns:
The top level object of the plist.
Raises:
FormatError: When the given file is not a binary plist or its version
is unknown.
"""
try:
# See if it's a file-like object.
bplist_start_offset = pathOrFile.tell()
file_obj = pathOrFile
except AttributeError:
# Must be a path then
file_obj = open(pathOrFile)
bplist_start_offset = file_obj.tell()
magicversion = file_obj.read(8)
if magicversion.startswith("bplist15"):
logging.info("Binary plist version 1.5 found. Please, inform %s.",
__author__)
raise FormatError("Binary plist version 1.5 found. Not supported yet.")
try:
file_obj.seek(bplist_start_offset)
bplist = BinaryPlist(file_obj)
return bplist.Parse()
except FormatError:
try:
file_obj.seek(bplist_start_offset)
return plistlib.readPlist(file_obj)
except xml.parsers.expat.ExpatError:
raise FormatError("Invalid plist file.")
if __name__ == "__main__":
if len(sys.argv) <= 1:
print "Usage: %s <file>" % sys.argv[0]
sys.exit(-1)
logging.basicConfig(level=logging.WARN)
with open(sys.argv[1]) as fd:
plist = BinaryPlist(file_obj=fd)
try:
res = plist.Parse()
if plist.is_corrupt:
logging.warn("%s LOOKS CORRUPTED. You might not obtain all data!\n",
sys.argv[1])
pprint.PrettyPrinter().pprint(res)
except FormatError, e:
sys.stderr.write("ERROR PARSING %s as a binary plist: %s\n"
% (sys.argv[1], e))
pprint.PrettyPrinter().pprint(plistlib.readPlist(sys.argv[1]))
| apache-2.0 | 1,525,739,950,228,577 | 33.272831 | 80 | 0.664357 | false |
barbour-em/osf.io | website/project/views/tag.py | 4 | 1517 | import httplib as http
from modularodm.exceptions import ValidationError
from framework.auth.decorators import collect_auth
from website.util.sanitize import clean_tag
from website.project.model import Tag
from website.project.decorators import (
must_be_valid_project, must_have_permission, must_not_be_registration
)
# Disabled for now. Should implement pagination, or at least cap the number of
# nodes serialized, before re-enabling.
@collect_auth
def project_tag(tag, auth, **kwargs):
tag_obj = Tag.load(tag)
nodes = tag_obj.node__tagged if tag_obj else []
visible_nodes = [obj for obj in nodes if obj.can_view(auth)]
return {
'nodes': [
{
'title': node.title,
'url': node.url,
}
for node in visible_nodes
],
'tag': tag,
}
@must_be_valid_project # injects project
@must_have_permission('write')
@must_not_be_registration
def project_addtag(auth, node, **kwargs):
tag = clean_tag(kwargs['tag'])
if tag:
try:
node.add_tag(tag=tag, auth=auth)
return {'status': 'success'}, http.CREATED
except ValidationError:
return {'status': 'error'}, http.BAD_REQUEST
@must_be_valid_project # injects project
@must_have_permission('write')
@must_not_be_registration
def project_removetag(auth, node, **kwargs):
tag = clean_tag(kwargs['tag'])
if tag:
node.remove_tag(tag=tag, auth=auth)
return {'status': 'success'}
| apache-2.0 | 786,518,497,924,917,900 | 26.581818 | 78 | 0.641397 | false |
WadeBarnes/TheOrgBook | tob-api/api_v2/serializers/search.py | 2 | 12005 | # TODO: migrate most of these serializers to a UI specific serializer module
import logging
from collections import OrderedDict
from datetime import datetime, timedelta
from api_v2 import utils
from api_v2.models.Address import Address
from api_v2.models.Attribute import Attribute
from api_v2.models.Credential import Credential
from api_v2.models.CredentialType import CredentialType
from api_v2.models.Issuer import Issuer
from api_v2.models.Name import Name
from api_v2.search_indexes import CredentialIndex
from api_v2.serializers.rest import (AddressSerializer, AttributeSerializer,
CredentialAddressSerializer,
CredentialAttributeSerializer,
CredentialNamedTopicSerializer,
CredentialNameSerializer,
CredentialSerializer,
CredentialSetSerializer,
CredentialTopicExtSerializer,
CredentialTypeSerializer,
IssuerSerializer, NameSerializer,
TopicRelationshipSerializer,
TopicSerializer)
from django.db.models.manager import Manager
from drf_haystack.serializers import (FacetFieldSerializer,
HaystackFacetSerializer,
HaystackSerializerMixin)
from rest_framework.serializers import (ListSerializer, ModelSerializer,
SerializerMethodField)
from rest_framework.utils.serializer_helpers import ReturnDict
logger = logging.getLogger(__name__)
class SearchResultsListSerializer(ListSerializer):
@staticmethod
def __camelCase(s):
return s[:1].lower() + s[1:] if s else ""
def __get_keyName(self, instance):
search_index = instance.searchindex
model = search_index.get_model()
return self.__camelCase(model.__name__) + "s"
@property
def data(self):
ret = super(ListSerializer, self).data
return ReturnDict(ret, serializer=self)
def to_representation(self, data):
results = OrderedDict()
iterable = data.all() if isinstance(data, Manager) else data
for item in iterable:
search_index_name = self.__get_keyName(item)
results.setdefault(search_index_name, []).append(
self.child.to_representation(item)
)
return results
class CustomIssuerSerializer(IssuerSerializer):
class Meta(IssuerSerializer.Meta):
fields = ("id", "did", "name", "abbreviation", "email", "url", "has_logo")
exclude = None
class CustomAddressSerializer(AddressSerializer):
last_updated = SerializerMethodField()
inactive = SerializerMethodField()
class Meta(AddressSerializer.Meta):
fields = tuple(AddressSerializer.Meta.fields) + (
"credential_id", "last_updated", "inactive")
def get_last_updated(self, obj):
return obj.credential.effective_date
def get_inactive(self, obj):
return obj.credential.inactive
class CustomAttributeSerializer(AttributeSerializer):
credential_type_id = SerializerMethodField()
last_updated = SerializerMethodField()
inactive = SerializerMethodField()
class Meta(AttributeSerializer.Meta):
fields = (
"id", "credential_id", "credential_type_id",
"last_updated", "inactive",
"type", "format", "value",
)
def get_credential_type_id(self, obj):
return obj.credential.credential_type_id
def get_last_updated(self, obj):
return obj.credential.effective_date
def get_inactive(self, obj):
return obj.credential.inactive
class CustomNameSerializer(NameSerializer):
last_updated = SerializerMethodField()
inactive = SerializerMethodField()
issuer = SerializerMethodField()
class Meta(NameSerializer.Meta):
fields = (
"id", "credential_id", "last_updated", "inactive",
"text", "language", "issuer", "type",
)
def get_last_updated(self, obj):
return obj.credential.effective_date
def get_inactive(self, obj):
return obj.credential.inactive
def get_issuer(self, obj):
serializer = CustomIssuerSerializer(
instance=obj.credential.credential_type.issuer
)
return serializer.data
class CustomTopicSerializer(TopicSerializer):
names = SerializerMethodField()
addresses = SerializerMethodField()
attributes = SerializerMethodField()
class Meta(TopicSerializer.Meta):
depth = 1
fields = (
"id",
"create_timestamp",
"source_id",
"type",
"names",
"addresses",
"attributes",
)
def get_names(self, obj):
names = Name.objects.filter(
credential__topic=obj,
credential__latest=True,
credential__revoked=False,
).order_by('credential__inactive')
serializer = CustomNameSerializer(instance=names, many=True)
return serializer.data
def get_addresses(self, obj):
addresses = Address.objects.filter(
credential__topic=obj,
credential__latest=True,
credential__revoked=False,
).order_by('credential__inactive')
serializer = CustomAddressSerializer(instance=addresses, many=True)
return serializer.data
def get_attributes(self, obj):
attributes = Attribute.objects.filter(
credential__topic=obj,
credential__latest=True,
credential__revoked=False,
).order_by('credential__inactive')
serializer = CustomAttributeSerializer(instance=attributes, many=True)
return serializer.data
class CustomTopicRelationshipSerializer(TopicRelationshipSerializer):
attributes = CredentialAttributeSerializer(source="credential_attributes", many=True)
topic = CustomTopicSerializer()
related_topic = CustomTopicSerializer()
relation_id = SerializerMethodField()
topic_id = SerializerMethodField()
def __init__(self, *args, **kwargs):
super(CustomTopicRelationshipSerializer, self).__init__(*args)
self.relationship_type = kwargs.get('relationship_type', 'to' )
class Meta(TopicRelationshipSerializer.Meta):
depth = 1
fields = (
"topic_id",
"relation_id",
"credential",
"topic",
"related_topic",
"attributes",
)
def get_relation_id(self, obj):
return obj.id
def get_topic_id(self, obj):
if self.relationship_type == 'to':
return obj.topic.id
else:
return obj.related_topic.id
class CredentialSearchSerializer(HaystackSerializerMixin, CredentialSerializer):
addresses = CredentialAddressSerializer(many=True)
attributes = CredentialAttributeSerializer(many=True)
credential_set = CredentialSetSerializer()
credential_type = CredentialTypeSerializer()
names = CredentialNameSerializer(many=True)
topic = CredentialNamedTopicSerializer()
related_topics = CredentialNamedTopicSerializer(many=True)
class Meta(CredentialSerializer.Meta):
fields = (
"id", "create_timestamp", "update_timestamp",
"effective_date",
"inactive", "latest", "revoked", "revoked_date",
"wallet_id",
"credential_set", "credential_type",
"addresses", "attributes", "names",
"topic",
"related_topics",
)
# used by ExactFilter
exact_fields = (
"credential_set_id",
"credential_type_id",
"issuer_id",
"schema_name",
"schema_version",
"topic_id",
"topic_type",
"wallet_id",
)
# used by HaystackFilter
search_fields = (
"location",
"effective_date",
"revoked_date",
"score",
)
# used by StatusFilter
status_fields = {
"inactive": "false",
"latest": "true",
"revoked": "false",
}
class CredentialAutocompleteSerializer(HaystackSerializerMixin, CredentialSerializer):
names = CredentialNameSerializer(many=True)
class Meta(CredentialSerializer.Meta):
fields = (
"id", "names", "inactive",
)
status_fields = {
"inactive": None,
"latest": "true",
"revoked": "false",
}
class CredentialTopicSearchSerializer(CredentialSearchSerializer):
"""
Return credentials with addresses and attributes removed, but
added for the related topic instead
"""
topic = CredentialTopicExtSerializer()
class Meta(CredentialSearchSerializer.Meta):
fields = (
"id", "create_timestamp", "update_timestamp",
"effective_date",
"inactive", "latest", "revoked", "revoked_date",
"wallet_id",
"credential_set",
"credential_type",
"attributes",
"names",
"topic",
"related_topics",
)
class CredentialFacetSerializer(HaystackFacetSerializer):
serialize_objects = True
class Meta:
index_classes = [CredentialIndex]
fields = [
"category",
"credential_type_id",
"issuer_id",
#"inactive",
#"topic_type",
]
field_options = {
"category": {},
"credential_type_id": {},
"issuer_id": {},
#"inactive": {},
#"topic_type": {},
# date faceting isn't working, needs to use Solr range faceting
# https://github.com/django-haystack/django-haystack/issues/1572
# "effective_date": {
# "start_date": datetime.now() - timedelta(days=50000),
# "end_date": datetime.now(),
# "gap_by": "month",
# "gap_amount": 3
# },
}
def get_fields(self):
field_mapping = OrderedDict()
field_mapping["facets"] = SerializerMethodField()
if self.serialize_objects is True:
field_mapping["objects"] = SerializerMethodField()
return field_mapping
def get_facets(self, instance):
result = OrderedDict()
for facet_type, facet_data in instance.items():
serial_data = {}
for field, facets in facet_data.items():
serial_data[field] = self.format_facets(field, facets)
result[facet_type] = serial_data
return result
def format_facets(self, field_name, facets):
result = []
for facet in facets:
row = {'value': facet[0], 'count': facet[1]}
# naive method - can be optimized
if field_name == "issuer_id":
row['text'] = Issuer.objects.get(pk=row['value']).name
elif field_name == "credential_type_id":
row['text'] = CredentialType.objects.get(pk=row['value']).description
result.append(row)
return result
def get_objects(self, instance):
"""
Overriding default behaviour to use more standard pagination info
"""
view = self.context["view"]
queryset = self.context["objects"]
page = view.paginate_queryset(queryset)
if page is not None:
serializer = view.get_facet_objects_serializer(page, many=True)
response = view.paginator.get_paginated_response(serializer.data)
return response.data # unwrap value
return super(CredentialFacetSerializer, self).get_objects()
| apache-2.0 | 1,242,521,563,465,730,800 | 32.347222 | 89 | 0.592003 | false |
adambrenecki/django | django/db/models/sql/compiler.py | 2 | 49329 | import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends.util import truncate_name
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import select_related_descend, QueryWrapper
from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR,
GET_ITERATOR_CHUNK_SIZE, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.six.moves import zip
from django.utils import timezone
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
# When ordering a queryset with distinct on a column not part of the
# select set, the ordering column needs to be added to the select
# clause. This information is needed both in SQL construction and
# masking away the ordering selects from the returned row.
self.ordering_aliases = []
self.ordering_params = []
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.get_meta().db_table, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols, s_params = self.get_columns(with_col_aliases)
ordering, o_params, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
having_group_by = self.query.having.get_cols()
params = []
for val in six.itervalues(self.query.extra_select):
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
params.extend(o_params)
result.append(', '.join(out_cols + self.ordering_aliases))
params.extend(s_params)
params.extend(self.ordering_params)
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if not ordering:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement, as well as
a list any extra parameters that need to be included. If no columns
have been specified, returns all columns relating to fields in the
model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col, _ in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = col.as_sql(qn, self.connection)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, aggregate in self.query.aggregate_select.items():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
for (table, col), _ in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field, model in opts.get_concrete_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.get_meta().ordering
or [])
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
params = []
ordering_params = []
for pos, field in enumerate(ordering):
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((str(field), []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, cols, order in self.find_ordering_name(field,
self.query.get_meta(), default_order=asc):
for col in cols:
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if col not in self.query.extra_select:
sql = "(%s) AS %s" % (self.query.extra[col][0], elt)
ordering_aliases.append(sql)
ordering_params.extend(self.query.extra[col][1])
else:
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
ordering_params.extend(params)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra[col])
self.ordering_aliases = ordering_aliases
self.ordering_params = ordering_params
return result, params, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and path and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(self.query.alias_map[j].table_name for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(alias, [t.column for t in targets], order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct. This method will
call query.setup_joins, handle refcounts and then promote the joins.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
# We will later on need to promote those joins that were added to the
# query afresh above.
joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2]
alias = joins[-1]
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
# Ordering or distinct must not affect the returned set, and INNER
# JOINS for nullable fields could do this.
self.query.promote_joins(joins_to_promote)
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
from_params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = '' if alias == name else (' %s' % alias)
if join_type and not first:
extra_cond = join_field.get_extra_restriction(
self.query.where_class, alias, lhs)
if extra_cond:
extra_sql, extra_params = extra_cond.as_sql(
qn, self.connection)
extra_sql = 'AND (%s)' % extra_sql
from_params.extend(extra_params)
else:
extra_sql = ""
result.append('%s %s%s ON ('
% (join_type, qn(name), alias_str))
for index, (lhs_col, rhs_col) in enumerate(join_cols):
if index != 0:
result.append(' AND ')
result.append('%s.%s = %s.%s' %
(qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
result.append('%s)' % extra_sql)
else:
connector = '' if first else ', '
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = '' if first else ', '
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, from_params
def get_grouping(self, having_group_by, ordering_group_by):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
select_cols = self.query.select + self.query.related_select_cols
# Just the column, not the fields.
select_cols = [s[0] for s in select_cols]
if (len(self.query.get_meta().concrete_fields) == len(self.query.select)
and self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.get_meta().db_table, self.query.get_meta().pk.column)
]
select_cols = []
seen = set()
cols = self.query.group_by + having_group_by + select_cols
for col in cols:
col_params = ()
if isinstance(col, (list, tuple)):
sql = '%s.%s' % (qn(col[0]), qn(col[1]))
elif hasattr(col, 'as_sql'):
sql, col_params = col.as_sql(qn, self.connection)
else:
sql = '(%s)' % str(col)
if sql not in seen:
result.append(sql)
params.extend(col_params)
seen.add(sql)
# Still, we need to add all stuff in ordering (except if the backend can
# group by just by PK).
if ordering_group_by and not self.connection.features.allows_group_by_pk:
for order, order_params in ordering_group_by:
# Even if we have seen the same SQL string, it might have
# different params, so, we add same SQL in "has params" case.
if order not in seen or params:
result.append(order)
params.extend(order_params)
seen.add(order)
# Unconditionally add the extra_select items.
for extra_select, extra_params in self.query.extra_select.values():
sql = '(%s)' % str(extra_select)
result.append(sql)
params.extend(extra_params)
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None, nullable=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
# The get_fields_with_model() returns None for fields that live
# in the field's local model. So, for those fields we want to use
# the f.model - that is the field's local model.
field_model = model or f.model
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
promote = nullable or f.null
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias, outer_if_first=promote)
alias = joins[-1]
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field in zip(columns, f.rel.to._meta.concrete_fields))
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
next, restricted, new_nullable)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.related_query_name()], opts, root_alias, outer_if_first=True)
alias = joins[-1]
from_parent = (opts.model if issubclass(model, opts.model)
else None)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, from_parent=from_parent)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field
in zip(columns, model._meta.concrete_fields))
next = requested.get(f.related_query_name(), {})
# Use True here because we are looking at the _reverse_ side of
# the relation, which is always nullable.
new_nullable = True
self.fill_related_selections(model._meta, alias, cur_depth + 1,
next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_cols isn't populated until
# execute_sql() has been called.
# We also include types of fields of related models that
# will be included via select_related() for the benefit
# of MySQL/MySQLdb when boolean fields are involved
# (#15040).
# This code duplicates the logic for the order of fields
# found in get_columns(). It would be nice to clean this up.
if self.query.select:
fields = [f.field for f in self.query.select]
else:
fields = self.query.get_meta().concrete_fields
fields = fields + [f.field for f in self.query.related_select_cols]
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.get_meta().db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
aggregate_start = len(self.query.extra_select) + len(loaded_fields)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple(
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
) + tuple(row[aggregate_end:])
yield row
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.ordering_aliases:
return cursor.fetchone()[:-len(self.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.ordering_aliases:
result = order_modified_iter(cursor, len(self.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
def as_subquery_condition(self, alias, columns, qn):
inner_qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs = '%s.%s' % (inner_qn(select_col.col[0]), qn2(select_col.col[1]))
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple(v for val in values for v in val))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
cursor = self.connection.cursor()
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
# Recheck the count - it is possible that fiddling with the select
# fields above removes tables from the query. Refs #18304.
count = query.count_active_tables()
if not self.query.related_updates and count == 1:
return
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql, params = [], []
for aggregate in self.query.aggregate_select.values():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
sql.append(agg_sql)
params.extend(agg_params)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateField
fields = [DateField()]
else:
from django.db.backends.util import typecast_date
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_date(str(date))
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = row[offset]
if resolve_columns:
datetime = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
datetime = typecast_timestamp(str(datetime))
# Datetimes are artifically returned in UTC on databases that
# don't support time zone. Restore the zone used in the query.
if settings.USE_TZ:
if datetime is None:
raise ValueError("Database returned an invalid value "
"in QuerySet.dates(). Are time zone "
"definitions and pytz installed?")
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
| bsd-3-clause | -8,292,894,387,023,872,000 | 43.440541 | 136 | 0.559772 | false |
aashish24/VTK-old | Examples/VisualizationAlgorithms/Python/SubsampleGrid.py | 27 | 2314 | #!/usr/bin/env python
# This example demonstrates the subsampling of a structured grid.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Read some structured data.
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
pl3d.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
pl3d_output = pl3d.GetOutput().GetBlock(0)
# Here we subsample the grid. The SetVOI method requires six values
# specifying (imin,imax, jmin,jmax, kmin,kmax) extents. In this
# example we extracting a plane. Note that the VOI is clamped to zero
# (min) and the maximum i-j-k value; that way we can use the
# -1000,1000 specification and be sure the values are clamped. The
# SampleRate specifies that we take every point in the i-direction;
# every other point in the j-direction; and every third point in the
# k-direction. IncludeBoundaryOn makes sure that we get the boundary
# points even if the SampleRate does not coincident with the boundary.
extract = vtk.vtkExtractGrid()
extract.SetInputData(pl3d_output)
extract.SetVOI(30, 30, -1000, 1000, -1000, 1000)
extract.SetSampleRate(1, 2, 3)
extract.IncludeBoundaryOn()
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(extract.GetOutputPort())
mapper.SetScalarRange(.18, .7)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(pl3d_output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0, 0, 0)
# Add the usual rendering stuff.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(outlineActor)
ren.AddActor(actor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(300, 180)
cam1 = ren.GetActiveCamera()
cam1.SetClippingRange(2.64586, 47.905)
cam1.SetFocalPoint(8.931, 0.358127, 31.3526)
cam1.SetPosition(29.7111, -0.688615, 37.1495)
cam1.SetViewUp(-0.268328, 0.00801595, 0.963294)
iren.Initialize()
renWin.Render()
iren.Start()
| bsd-3-clause | -8,708,941,193,275,408,000 | 33.029412 | 70 | 0.782195 | false |
plotly/plotly.py | packages/python/plotly/plotly/validators/scatter/marker/colorbar/title/_font.py | 2 | 1542 | import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="scatter.marker.colorbar.title", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
| mit | -3,696,497,924,585,237,500 | 38.538462 | 87 | 0.538911 | false |
cgimenop/Excel2Testlink | ExcelParser/lib/openpyxl/chart/error_bar.py | 8 | 1796 | from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
Float,
Set,
Alias
)
from openpyxl.descriptors.excel import ExtensionList
from openpyxl.descriptors.nested import (
NestedNoneSet,
NestedSet,
NestedBool,
NestedFloat,
)
from .data_source import NumDataSource
from .shapes import GraphicalProperties
class ErrorBars(Serialisable):
tagname = "errBars"
errDir = NestedNoneSet(values=(['x', 'y']))
direction = Alias("errDir")
errBarType = NestedSet(values=(['both', 'minus', 'plus']))
style = Alias("errBarType")
errValType = NestedSet(values=(['cust', 'fixedVal', 'percentage', 'stdDev', 'stdErr']))
size = Alias("errValType")
noEndCap = NestedBool(nested=True, allow_none=True)
plus = Typed(expected_type=NumDataSource, allow_none=True)
minus = Typed(expected_type=NumDataSource, allow_none=True)
val = NestedFloat(allow_none=True)
spPr = Typed(expected_type=GraphicalProperties, allow_none=True)
graphicalProperties = Alias("spPr")
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('errDir','errBarType', 'errValType', 'noEndCap','minus', 'plus', 'val', 'spPr')
def __init__(self,
errDir=None,
errBarType="both",
errValType="fixedVal",
noEndCap=None,
plus=None,
minus=None,
val=None,
spPr=None,
extLst=None,
):
self.errDir = errDir
self.errBarType = errBarType
self.errValType = errValType
self.noEndCap = noEndCap
self.plus = plus
self.minus = minus
self.val = val
self.spPr = spPr
| mit | -7,321,727,335,167,226,000 | 28.933333 | 99 | 0.615813 | false |
kdani3/searx | searx/engines/openstreetmap.py | 7 | 3113 | """
OpenStreetMap (Map)
@website https://openstreetmap.org/
@provide-api yes (http://wiki.openstreetmap.org/wiki/Nominatim)
@using-api yes
@results JSON
@stable yes
@parse url, title
"""
from json import loads
from searx.utils import searx_useragent
# engine dependent config
categories = ['map']
paging = False
# search-url
base_url = 'https://nominatim.openstreetmap.org/'
search_string = 'search/{query}?format=json&polygon_geojson=1&addressdetails=1'
result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}'
# do search-request
def request(query, params):
params['url'] = base_url + search_string.format(query=query)
# using searx User-Agent
params['headers']['User-Agent'] = searx_useragent()
return params
# get response from search-request
def response(resp):
results = []
json = loads(resp.text)
# parse results
for r in json:
if 'display_name' not in r:
continue
title = r['display_name']
osm_type = r.get('osm_type', r.get('type'))
url = result_base_url.format(osm_type=osm_type,
osm_id=r['osm_id'])
osm = {'type': osm_type,
'id': r['osm_id']}
geojson = r.get('geojson')
# if no geojson is found and osm_type is a node, add geojson Point
if not geojson and osm_type == 'node':
geojson = {u'type': u'Point', u'coordinates': [r['lon'], r['lat']]}
address_raw = r.get('address')
address = {}
# get name
if r['class'] == 'amenity' or\
r['class'] == 'shop' or\
r['class'] == 'tourism' or\
r['class'] == 'leisure':
if address_raw.get('address29'):
address = {'name': address_raw.get('address29')}
else:
address = {'name': address_raw.get(r['type'])}
# add rest of adressdata, if something is already found
if address.get('name'):
address.update({'house_number': address_raw.get('house_number'),
'road': address_raw.get('road'),
'locality': address_raw.get('city',
address_raw.get('town', # noqa
address_raw.get('village'))), # noqa
'postcode': address_raw.get('postcode'),
'country': address_raw.get('country'),
'country_code': address_raw.get('country_code')})
else:
address = None
# append result
results.append({'template': 'map.html',
'title': title,
'content': '',
'longitude': r['lon'],
'latitude': r['lat'],
'boundingbox': r['boundingbox'],
'geojson': geojson,
'address': address,
'osm': osm,
'url': url})
# return results
return results
| agpl-3.0 | 5,360,120,528,442,398,000 | 30.444444 | 79 | 0.499839 | false |
Mj258/weiboapi | srapyDemo/envs/Lib/site-packages/twisted/conch/manhole_tap.py | 8 | 4221 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
TAP plugin for creating telnet- and ssh-accessible manhole servers.
@author: Jp Calderone
"""
from zope.interface import implementer
from twisted.internet import protocol
from twisted.application import service, strports
from twisted.cred import portal, checkers
from twisted.python import usage
from twisted.conch.insults import insults
from twisted.conch import manhole, manhole_ssh, telnet
class makeTelnetProtocol:
def __init__(self, portal):
self.portal = portal
def __call__(self):
auth = telnet.AuthenticatingTelnetProtocol
args = (self.portal,)
return telnet.TelnetTransport(auth, *args)
class chainedProtocolFactory:
def __init__(self, namespace):
self.namespace = namespace
def __call__(self):
return insults.ServerProtocol(manhole.ColoredManhole, self.namespace)
@implementer(portal.IRealm)
class _StupidRealm:
def __init__(self, proto, *a, **kw):
self.protocolFactory = proto
self.protocolArgs = a
self.protocolKwArgs = kw
def requestAvatar(self, avatarId, *interfaces):
if telnet.ITelnetProtocol in interfaces:
return (telnet.ITelnetProtocol,
self.protocolFactory(*self.protocolArgs, **self.protocolKwArgs),
lambda: None)
raise NotImplementedError()
class Options(usage.Options):
optParameters = [
["telnetPort", "t", None, "strports description of the address on which to listen for telnet connections"],
["sshPort", "s", None, "strports description of the address on which to listen for ssh connections"],
["passwd", "p", "/etc/passwd", "name of a passwd(5)-format username/password file"]]
def __init__(self):
usage.Options.__init__(self)
self['namespace'] = None
def postOptions(self):
if self['telnetPort'] is None and self['sshPort'] is None:
raise usage.UsageError("At least one of --telnetPort and --sshPort must be specified")
def makeService(options):
"""Create a manhole server service.
@type options: C{dict}
@param options: A mapping describing the configuration of
the desired service. Recognized key/value pairs are::
"telnetPort": strports description of the address on which
to listen for telnet connections. If None,
no telnet service will be started.
"sshPort": strports description of the address on which to
listen for ssh connections. If None, no ssh
service will be started.
"namespace": dictionary containing desired initial locals
for manhole connections. If None, an empty
dictionary will be used.
"passwd": Name of a passwd(5)-format username/password file.
@rtype: L{twisted.application.service.IService}
@return: A manhole service.
"""
svc = service.MultiService()
namespace = options['namespace']
if namespace is None:
namespace = {}
checker = checkers.FilePasswordDB(options['passwd'])
if options['telnetPort']:
telnetRealm = _StupidRealm(telnet.TelnetBootstrapProtocol,
insults.ServerProtocol,
manhole.ColoredManhole,
namespace)
telnetPortal = portal.Portal(telnetRealm, [checker])
telnetFactory = protocol.ServerFactory()
telnetFactory.protocol = makeTelnetProtocol(telnetPortal)
telnetService = strports.service(options['telnetPort'],
telnetFactory)
telnetService.setServiceParent(svc)
if options['sshPort']:
sshRealm = manhole_ssh.TerminalRealm()
sshRealm.chainedProtocolFactory = chainedProtocolFactory(namespace)
sshPortal = portal.Portal(sshRealm, [checker])
sshFactory = manhole_ssh.ConchFactory(sshPortal)
sshService = strports.service(options['sshPort'],
sshFactory)
sshService.setServiceParent(svc)
return svc
| mit | -176,357,809,138,955,780 | 33.317073 | 115 | 0.641317 | false |
ai-ku/langvis | dependencies/jython-2.1/Lib/xdrlib.py | 4 | 7540 | """Implements (a subset of) Sun XDR -- eXternal Data Representation.
See: RFC 1014
"""
import struct
__all__ = ["Error", "Packer", "Unpacker", "ConversionError"]
# exceptions
class Error:
"""Exception class for this module. Use:
except xdrlib.Error, var:
# var has the Error instance for the exception
Public ivars:
msg -- contains the message
"""
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return repr(self.msg)
def __str__(self):
return str(self.msg)
class ConversionError(Error):
pass
class Packer:
"""Pack various data representations into a buffer."""
def __init__(self):
self.reset()
def reset(self):
self.__buf = ''
def get_buffer(self):
return self.__buf
# backwards compatibility
get_buf = get_buffer
def pack_uint(self, x):
self.__buf = self.__buf + struct.pack('>L', x)
pack_int = pack_uint
pack_enum = pack_int
def pack_bool(self, x):
if x: self.__buf = self.__buf + '\0\0\0\1'
else: self.__buf = self.__buf + '\0\0\0\0'
def pack_uhyper(self, x):
self.pack_uint(x>>32 & 0xffffffffL)
self.pack_uint(x & 0xffffffffL)
pack_hyper = pack_uhyper
def pack_float(self, x):
try: self.__buf = self.__buf + struct.pack('>f', x)
except struct.error, msg:
raise ConversionError, msg
def pack_double(self, x):
try: self.__buf = self.__buf + struct.pack('>d', x)
except struct.error, msg:
raise ConversionError, msg
def pack_fstring(self, n, s):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
n = ((n+3)/4)*4
data = s[:n]
data = data + (n - len(data)) * '\0'
self.__buf = self.__buf + data
pack_fopaque = pack_fstring
def pack_string(self, s):
n = len(s)
self.pack_uint(n)
self.pack_fstring(n, s)
pack_opaque = pack_string
pack_bytes = pack_string
def pack_list(self, list, pack_item):
for item in list:
self.pack_uint(1)
pack_item(item)
self.pack_uint(0)
def pack_farray(self, n, list, pack_item):
if len(list) != n:
raise ValueError, 'wrong array size'
for item in list:
pack_item(item)
def pack_array(self, list, pack_item):
n = len(list)
self.pack_uint(n)
self.pack_farray(n, list, pack_item)
class Unpacker:
"""Unpacks various data representations from the given buffer."""
def __init__(self, data):
self.reset(data)
def reset(self, data):
self.__buf = data
self.__pos = 0
def get_position(self):
return self.__pos
def set_position(self, position):
self.__pos = position
def get_buffer(self):
return self.__buf
def done(self):
if self.__pos < len(self.__buf):
raise Error('unextracted data remains')
def unpack_uint(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
x = struct.unpack('>L', data)[0]
try:
return int(x)
except OverflowError:
return x
def unpack_int(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>l', data)[0]
unpack_enum = unpack_int
unpack_bool = unpack_int
def unpack_uhyper(self):
hi = self.unpack_uint()
lo = self.unpack_uint()
return long(hi)<<32 | lo
def unpack_hyper(self):
x = self.unpack_uhyper()
if x >= 0x8000000000000000L:
x = x - 0x10000000000000000L
return x
def unpack_float(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>f', data)[0]
def unpack_double(self):
i = self.__pos
self.__pos = j = i+8
data = self.__buf[i:j]
if len(data) < 8:
raise EOFError
return struct.unpack('>d', data)[0]
def unpack_fstring(self, n):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
i = self.__pos
j = i + (n+3)/4*4
if j > len(self.__buf):
raise EOFError
self.__pos = j
return self.__buf[i:i+n]
unpack_fopaque = unpack_fstring
def unpack_string(self):
n = self.unpack_uint()
return self.unpack_fstring(n)
unpack_opaque = unpack_string
unpack_bytes = unpack_string
def unpack_list(self, unpack_item):
list = []
while 1:
x = self.unpack_uint()
if x == 0: break
if x != 1:
raise ConversionError, '0 or 1 expected, got ' + `x`
item = unpack_item()
list.append(item)
return list
def unpack_farray(self, n, unpack_item):
list = []
for i in range(n):
list.append(unpack_item())
return list
def unpack_array(self, unpack_item):
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item)
# test suite
def _test():
p = Packer()
packtest = [
(p.pack_uint, (9,)),
(p.pack_bool, (None,)),
(p.pack_bool, ('hello',)),
(p.pack_uhyper, (45L,)),
(p.pack_float, (1.9,)),
(p.pack_double, (1.9,)),
(p.pack_string, ('hello world',)),
(p.pack_list, (range(5), p.pack_uint)),
(p.pack_array, (['what', 'is', 'hapnin', 'doctor'], p.pack_string)),
]
succeedlist = [1] * len(packtest)
count = 0
for method, args in packtest:
print 'pack test', count,
try:
apply(method, args)
print 'succeeded'
except ConversionError, var:
print 'ConversionError:', var.msg
succeedlist[count] = 0
count = count + 1
data = p.get_buffer()
# now verify
up = Unpacker(data)
unpacktest = [
(up.unpack_uint, (), lambda x: x == 9),
(up.unpack_bool, (), lambda x: not x),
(up.unpack_bool, (), lambda x: x),
(up.unpack_uhyper, (), lambda x: x == 45L),
(up.unpack_float, (), lambda x: 1.89 < x < 1.91),
(up.unpack_double, (), lambda x: 1.89 < x < 1.91),
(up.unpack_string, (), lambda x: x == 'hello world'),
(up.unpack_list, (up.unpack_uint,), lambda x: x == range(5)),
(up.unpack_array, (up.unpack_string,),
lambda x: x == ['what', 'is', 'hapnin', 'doctor']),
]
count = 0
for method, args, pred in unpacktest:
print 'unpack test', count,
try:
if succeedlist[count]:
x = apply(method, args)
print pred(x) and 'succeeded' or 'failed', ':', x
else:
print 'skipping'
except ConversionError, var:
print 'ConversionError:', var.msg
count = count + 1
if __name__ == '__main__':
_test()
| mit | -8,896,721,062,047,478,000 | 24.83274 | 78 | 0.495491 | false |
back-to/streamlink | tests/plugins/test_tvrby.py | 8 | 1397 | import unittest
from streamlink.plugins.tvrby import TVRBy
class TestPluginTVRBy(unittest.TestCase):
def test_can_handle_url(self):
# should match
self.assertTrue(TVRBy.can_handle_url("http://www.tvr.by/televidenie/belarus-1/"))
self.assertTrue(TVRBy.can_handle_url("http://www.tvr.by/televidenie/belarus-1"))
self.assertTrue(TVRBy.can_handle_url("http://www.tvr.by/televidenie/belarus-24/"))
self.assertTrue(TVRBy.can_handle_url("http://www.tvr.by/televidenie/belarus-24"))
# shouldn't match
self.assertFalse(TVRBy.can_handle_url("http://www.tv8.cat/algo/"))
self.assertFalse(TVRBy.can_handle_url("http://www.tvcatchup.com/"))
self.assertFalse(TVRBy.can_handle_url("http://www.youtube.com/"))
def test_url_fix(self):
self.assertTrue(
"http://www.tvr.by/televidenie/belarus-1/",
TVRBy("http://www.tvr.by/televidenie/belarus-1/").url)
self.assertTrue(
"http://www.tvr.by/televidenie/belarus-1/",
TVRBy("http://www.tvr.by/televidenie/belarus-1").url)
self.assertTrue(
"http://www.tvr.by/televidenie/belarus-24/",
TVRBy("http://www.tvr.by/televidenie/belarus-24/").url)
self.assertTrue(
"http://www.tvr.by/televidenie/belarus-24/",
TVRBy("http://www.tvr.by/televidenie/belarus-24").url)
| bsd-2-clause | -8,068,501,491,426,433,000 | 44.064516 | 90 | 0.637079 | false |
sparkslabs/kamaelia_ | Sketches/RJL/Util/Chunkifier.py | 3 | 5392 | #!/usr/bin/env python2.3
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=========================
Chunkifier
=========================
A component that fixes the message size of an input stream to a given value,
outputting blocks of that size when sufficient input has accumulated. This
component's input is stream orientated - all messages received are
concatenated to the interal buffer without divisions.
Example Usage
-------------
Chunkifying a console reader::
pipeline(
ConsoleReader(eol=""),
Chunkifier(20),
ConsoleEchoer()
).run()
How does it work?
-----------------
Messages received on the "inbox" are buffered until at least N bytes have
been collected. A message containing those first N bytes is sent out
"outbox". A CharacterFIFO object is used to do this in linear time.
The usual method of sending a producerFinished/shutdown to the "control"
inbox is used to shut it down
This component does not terminate.
"""
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdown
import string
class CharacterFIFO(object):
def __init__(self):
self.queuearray = []
self.length = 0
self.startboundary = 0
def push(self, text):
self.queuearray.append(text)
self.length += len(text)
def __len__(self):
return self.length
def poplength(self, length):
if len(self) < length:
raise IndexError
else:
thischunk = []
sizeneeded = length
while 1:
chunk = self.queuearray[0]
sizeneeded -= len(chunk) - self.startboundary
if sizeneeded < 0: # new start boundary in the middle of this chunk
thischunk.append(chunk[self.startboundary:len(chunk) + sizeneeded])
self.startboundary = len(chunk) + sizeneeded
else: # this chunk is completely within the requested string
if self.startboundary > 0:
thischunk.append(chunk[self.startboundary:])
else:
thischunk.append(chunk)
self.queuearray.pop(0)
self.startboundary = 0
if sizeneeded <= 0:
break
self.length -= length
return string.join(thischunk, "")
class Chunkifier(component):
"""\
Chunkifier([chunksize]) -> new Chunkifier component.
Flow controller - collects incoming data and outputs it only as quanta of
a given length in bytes (chunksize), unless the input stream ends (producerFinished).
Keyword arguments:
- chunksize -- Chunk size in bytes
"""
Inboxes = { "inbox" : "Data stream to be split into chunks",
"control": "UNUSED" }
Outboxes = { "outbox" : "Each message is a chunk",
"signal": "UNUSED" }
def __init__(self, chunksize = 1048576, nodelay = False):
super(Chunkifier, self).__init__()
self.forwardqueue = CharacterFIFO()
self.chunksize = chunksize
self.nodelay = nodelay
def sendPartialChunk(self):
if len(self.forwardqueue) > 0:
self.send(self.forwardqueue.poplength(len(self.forwardqueue)), "outbox")
def sendChunk(self):
self.send(self.forwardqueue.poplength(self.chunksize), "outbox")
def main(self):
while 1:
yield 1
while self.dataReady("inbox"):
msg = self.recv("inbox")
self.forwardqueue.push(msg)
while len(self.forwardqueue) >= self.chunksize:
self.sendChunk()
if self.nodelay:
self.sendPartialChunk()
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished):
self.sendPartialChunk()
self.send(msg, "signal")
return
elif isinstance(msg, shutdown):
self.send(msg, "signal")
return
self.pause()
if __name__ == '__main__':
from Kamaelia.Util.PipelineComponent import pipeline
from Kamaelia.Util.Console import ConsoleEchoer, ConsoleReader
pipeline(
ConsoleReader(eol=""),
Chunkifier(20),
ConsoleEchoer()
).run()
| apache-2.0 | -7,018,564,507,208,332,000 | 32.079755 | 90 | 0.580304 | false |
addition-it-solutions/project-all | addons/hw_escpos/__openerp__.py | 8 | 1713 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'ESC/POS Hardware Driver',
'version': '1.0',
'category': 'Hardware Drivers',
'sequence': 6,
'website': 'https://www.odoo.com/page/point-of-sale',
'summary': 'Hardware Driver for ESC/POS Printers and Cashdrawers',
'description': """
ESC/POS Hardware Driver
=======================
This module allows openerp to print with ESC/POS compatible printers and
to open ESC/POS controlled cashdrawers in the point of sale and other modules
that would need such functionality.
""",
'author': 'OpenERP SA',
'depends': ['hw_proxy'],
'external_dependencies': {
'python' : ['usb.core','serial','qrcode'],
},
'test': [
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 | 3,878,338,141,428,329,500 | 34.6875 | 78 | 0.603619 | false |
apdjustino/DRCOG_Urbansim | src/opus_gui/results_manager/controllers/xml_configuration/xml_controller_results.py | 1 | 11688 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from lxml.etree import Element
import time
from PyQt4.QtGui import QMenu, QCursor, QDialog
from opus_gui.main.controllers.dialogs.message_box import MessageBox
from opus_gui.results_manager.controllers.dialogs.add_indicator_batch import AddIndicatorBatch
from opus_gui.results_manager.controllers.dialogs.get_run_info import GetRunInfo
from opus_gui.abstract_manager.controllers.xml_configuration.xml_controller import XmlController
from opus_gui.results_manager.controllers.dialogs.configure_existing_batch_indicator_visualization import ConfigureExistingBatchIndicatorVisualization
from opus_gui.results_manager.controllers.dialogs.configure_new_batch_indicator_visualization import ConfigureNewBatchIndicatorVisualization
from opus_gui.results_manager.controllers.dialogs.indicator_batch_run_form import IndicatorBatchRunForm
from opus_gui.results_manager.results_manager_functions import get_available_run_nodes, get_run_manager
from opus_gui.results_manager.controllers.dialogs.import_run_dialog import ImportRunDialog
from opus_gui.util.icon_library import IconLibrary
from opus_gui.util import common_dialogs
from opus_gui.main.controllers.instance_handlers import get_manager_instance
from opus_gui.main.controllers.instance_handlers import get_mainwindow_instance
from opus_gui.scenarios_manager.run.run_simulation import OpusModel
from opus_gui.scenarios_manager.controllers.tabs.simulation_gui_element_restart import InputRestartYearsDialog
from opus_gui.scenarios_manager.controllers.tabs.simulation_gui_element_restart import SimulationGuiElementRestart
class XmlController_Results(XmlController):
''' XmlController for the Results Manager '''
def __init__(self, manager):
XmlController.__init__(self, manager)
p = ('add', "Add new indicator batch...", self._addIndicatorBatch)
self.actAddNewIndicatorBatch = self.create_action(*p)
p = ('add', 'Add new indicator visualization...', self._configureNewBatchIndicatorVisualization)
self.actAddVisualizationToBatch = self.create_action(*p)
p = ('configure', "Configure visualization", self._configureExistingBatchIndicatorVisualization)
self.actConfigureExistingBatchIndicatorVis = self.create_action(*p)
p = ('info_small', "Show details", self._getInfoSimulationRuns)
self.actGetInfoSimulationRuns = self.create_action(*p)
p = ('import', "Import run from disk", self._importRun)
self.actImportRun = self.create_action(*p)
p = ('delete', "Remove run and delete from hard drive...", self._delete_selected_run)
self.actDeleteRun = self.create_action(*p)
p = ('restart', "Restart run...", self._restart_selected_run)
self.actRestartRun = self.create_action(*p)
def _restart_selected_run(self):
assert self.has_selected_item()
run_node = self.selected_item().node
# ask for start_year, end_year
# start_year default to the last year of years run
# need to avoid insert auto generate run directory again
# original_start_year = int(run_node.find('start_year').text)
# original_end_year = int(run_node.find('end_year').text)
# restart_year = original_end_year + 1
# end_year = restart_year + 1
run_name = run_node.get('name')
scenario_name = run_node.find('scenario_name').text
run_id = run_node.get('run_id')
try:
run_id = int(run_id)
except ValueError:
raise ValueError, "run_id for run %s is invalid: %s; the run cannot be restarted." % \
(run_name, run_id)
run_manager = get_run_manager()
config = run_manager.get_resources_for_run_id_from_history(run_id)
xml_config = self.project.xml_config
opusgui = get_mainwindow_instance()
scenario_manager = get_manager_instance('scenario_manager')
opusmodel = OpusModel(scenario_manager, xml_config, scenario_name)
tab_widget = SimulationGuiElementRestart(mainwindow=opusgui,
runManager=scenario_manager,
model=opusmodel,
xml_config=xml_config,
run_id=run_id,
run_name=run_name)
tab_widget.config = config
# tab_widget.start_year = start_year
# tab_widget.end_year = end_year
scenario_manager._attach_tab(tab_widget)
opusgui.update()
#time.sleep(1)
#tab_widget._init_run(run_name)
#tab_widget.runThread.restart_run(run_id, config, restart_year,
# run_name=run_name)
def _addIndicatorBatch(self):#, viz = None):
def _add_indicator_batch_callback(batch_name):
# Create a new node with the given name and insert it into the model
node = Element('indicator_batch', {'name': batch_name})
batches_node = self.project.find('results_manager/indicator_batches')
self.model.insert_node(node, batches_node)
assert self.has_selected_item()
window = AddIndicatorBatch(callback = _add_indicator_batch_callback,
parent_widget = self.view)
window.show()
def _configureNewBatchIndicatorVisualization(self):#, viz = None):
assert self.has_selected_item()
batch_node = self.selected_item().node
window = ConfigureNewBatchIndicatorVisualization(self.project, batch_node, self.view)
window.show()
def _configureExistingBatchIndicatorVisualization(self):
assert self.has_selected_item()
viz_node = self.selected_item().node
window = ConfigureExistingBatchIndicatorVisualization(self.project, viz_node, self.view)
window.show()
def _delete_selected_run(self):
assert self.has_selected_item()
run_node = self.selected_item().node
question = 'Do you want to delete %s from cache and services database?' % \
run_node.get('name')
user_answer = common_dialogs.yes_or_cancel(question)
if user_answer == common_dialogs.YES:
self.delete_run(run_node)
def delete_run(self, run_node, force=False):
'''
Remove a run both from the services database and from the model.
@param run_node (Element): the node to remove.
'''
# Prevent the user from removing base years
cache_directory = run_node.find('cache_directory').text
if cache_directory.endswith('base_year_data') and not force:
msg = ('Removing the base year data directory is restricted from '
'within OpusGUI since doing so will make it impossible to '
'run any simulations or estimations.')
MessageBox.warning(mainwindow = self.view,
text = 'Cannot remove base year data',
detailed_text = msg)
return
try:
run_manager = get_run_manager()
run_id = run_node.get('run_id')
try:
run_id = int(run_id)
except:
run_id = -1
run_manager.delete_everything_for_this_run(run_id, cache_directory)
run_manager.close()
self.model.remove_node(run_node)
#was self.project.delete_node(run_node), but it doesn't update the GUI
#self.project.dirty = True
#update_mainwindow_savestate()
# self.model.remove_node(run_node)
except Exception, ex: # TODO catch more specific error?
MessageBox.warning(self.view, 'Could not remove run', str(ex))
def _importRun(self):
dlg = ImportRunDialog(self, self.manager.base_widget)
dlg.show()
def _createBatchRunMenu(self, attach_to_menu):
'''
Create and populate a 'Run indicator batch' menu for all available runs
@param attach_to_menu (QMenu) menu to attach actions to
'''
# TODO Verify that this is correct -- it was based on looking in a xml
# file for 'source_data' and simulation_runs was the only section I
# could find it in.
run_nodes = get_available_run_nodes(self.project)
if not run_nodes:
attach_to_menu.setEnabled(False)
return
#T: the following loop causes a problem where
# only "baseyear_data" will be passed to the _indicatorBatchRun method.
# I don't know why.
for run_node in run_nodes:
cb = lambda x = run_node.get('name'): self._indicatorBatchRun(run_name = x)
action = self.create_action(IconLibrary.icon('add'), run_node.get('name'), cb)
attach_to_menu.addAction(action)
# #T: The following loop works, but its kind of a nasty method.
# for i in range(len(run_nodes)):
# exec 'cb%i=lambda x = run_nodes[%i]: self._indicatorBatchRun(run_name = run_nodes[%i].tag)'%(i,i,i) in locals()
# exec 'action%i=self.create_action(self.model.acceptIcon, run_nodes[%i].tag, cb%i)'%(i,i,i) in locals()
# exec 'attach_to_menu.addAction(action%i)'%i in locals()
def _indicatorBatchRun(self, run_name):
''' NO DOCUMENTATION '''
assert self.has_selected_item()
node = self.selected_item().node
window = IndicatorBatchRunForm(mainwindow = self.view, resultsManagerBase = self.manager,
batch_name = node.get('name'), run_name = run_name)
window.show()
def _getInfoSimulationRuns(self):
''' NO DOCUMENTATION '''
assert self.has_selected_item()
node = self.selected_item().node
window = GetRunInfo(node, self.view)
window.exec_()
def _viewDocumentation(self):
pass
def add_custom_menu_items_for_node(self, node, menu):
# Populate menu
if node.tag == 'run':
menu.addAction(self.actRestartRun) #TODO:disable restart run for base_year_data
menu.addAction(self.actDeleteRun)
menu.addAction(self.actGetInfoSimulationRuns)
elif node.tag == 'indicator_batches':
menu.addAction(self.actAddNewIndicatorBatch)
elif node.tag == 'simulation_runs':
menu.addAction(self.actImportRun)
elif node.tag == 'indicator_batch':
menu.addAction(self.actAddVisualizationToBatch)
run_batch_on_menu = QMenu('Run indicator batch on...', menu)
self._createBatchRunMenu(run_batch_on_menu)
menu.addMenu(run_batch_on_menu)
elif node.tag == 'batch_visualization': # get('type') == 'batch_visualization':
# check to see if the batch_visualization in question is an inherited node,
# if so, don't display the ability to edit it. This essentially forces
# the user to make it local first.
if node.get('inherited'):
pass
else:
menu.addAction(self.actConfigureExistingBatchIndicatorVis)
# In this menu, the first custom action is always the default action
if not menu.isEmpty():
menu.setDefaultAction(menu.actions()[0])
return node.tag == 'run' # default menu items aren't meaningful for simulation run nodes
| agpl-3.0 | -8,745,276,135,714,334,000 | 48.73617 | 150 | 0.633641 | false |
CarlosCorreiaM16e/bismarck | bismarck_cli/servers/srv_factory.py | 1 | 1254 | '''
Created on 16/10/2014
@author: carlos
'''
import ast
import os
from bismarck_cli.servers.server_context import ServerContext
from bismarck_cli.utils import term
from bismarck_cli.utils.storage import storagize
#------------------------------------------------------------------
def get_server_def( server_name ):
sf = ServerFactory( server_name )
return sf.get_srv_def()
#------------------------------------------------------------------
class ServerFactory( object ):
#------------------------------------------------------------------
def __init__( self, server_name ):
self.server_name = server_name
#------------------------------------------------------------------
def get_srv_def( self ):
srv_file = os.path.expanduser( '~/.m16e/servers/%s.py'
% self.server_name )
f = open( srv_file )
text = f.read()
f.close()
data = ast.literal_eval( text )
# term.printDebug( 'data:\n%s' % repr( data ) )
srv_def = ServerContext( data=storagize( data ) )
return srv_def
#------------------------------------------------------------------
#------------------------------------------------------------------
| gpl-3.0 | 8,485,425,901,192,145,000 | 29.585366 | 71 | 0.398724 | false |
nikhilprathapani/python-for-android | python3-alpha/extra_modules/gdata/photos/service.py | 44 | 24305 | #!/usr/bin/env python
# -*-*- encoding: utf-8 -*-*-
#
# This is the service file for the Google Photo python client.
# It is used for higher level operations.
#
# $Id: service.py 144 2007-10-25 21:03:34Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google PhotoService provides a human-friendly interface to
Google Photo (a.k.a Picasa Web) services[1].
It extends gdata.service.GDataService and as such hides all the
nasty details about authenticating, parsing and communicating with
Google Photos.
[1]: http://code.google.com/apis/picasaweb/gdata.html
Example:
import gdata.photos, gdata.photos.service
pws = gdata.photos.service.PhotosService()
pws.ClientLogin(username, password)
#Get all albums
albums = pws.GetUserFeed().entry
# Get all photos in second album
photos = pws.GetFeed(albums[1].GetPhotosUri()).entry
# Get all tags for photos in second album and print them
tags = pws.GetFeed(albums[1].GetTagsUri()).entry
print [ tag.summary.text for tag in tags ]
# Get all comments for the first photos in list and print them
comments = pws.GetCommentFeed(photos[0].GetCommentsUri()).entry
print [ c.summary.text for c in comments ]
# Get a photo to work with
photo = photos[0]
# Update metadata
# Attributes from the <gphoto:*> namespace
photo.summary.text = u'A nice view from my veranda'
photo.title.text = u'Verandaview.jpg'
# Attributes from the <media:*> namespace
photo.media.keywords.text = u'Home, Long-exposure, Sunset' # Comma-separated
# Adding attributes to media object
# Rotate 90 degrees clockwise
photo.rotation = gdata.photos.Rotation(text='90')
# Submit modified photo object
photo = pws.UpdatePhotoMetadata(photo)
# Make sure you only modify the newly returned object, else you'll get
# versioning errors. See Optimistic-concurrency
# Add comment to a picture
comment = pws.InsertComment(photo, u'I wish the water always was this warm')
# Remove comment because it was silly
print "*blush*"
pws.Delete(comment.GetEditLink().href)
"""
__author__ = '[email protected]'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
__version__ = '$Revision: 176 $'[11:-2]
import sys, os.path, io
import time
import gdata.service
import gdata
import atom.service
import atom
import gdata.photos
SUPPORTED_UPLOAD_TYPES = ('bmp', 'jpeg', 'jpg', 'gif', 'png')
UNKOWN_ERROR=1000
GPHOTOS_BAD_REQUEST=400
GPHOTOS_CONFLICT=409
GPHOTOS_INTERNAL_SERVER_ERROR=500
GPHOTOS_INVALID_ARGUMENT=601
GPHOTOS_INVALID_CONTENT_TYPE=602
GPHOTOS_NOT_AN_IMAGE=603
GPHOTOS_INVALID_KIND=604
class GooglePhotosException(Exception):
def __init__(self, response):
self.error_code = response['status']
self.reason = response['reason'].strip()
if '<html>' in str(response['body']): #general html message, discard it
response['body'] = ""
self.body = response['body'].strip()
self.message = "(%(status)s) %(body)s -- %(reason)s" % response
#return explicit error codes
error_map = { '(12) Not an image':GPHOTOS_NOT_AN_IMAGE,
'kind: That is not one of the acceptable values':
GPHOTOS_INVALID_KIND,
}
for msg, code in error_map.items():
if self.body == msg:
self.error_code = code
break
self.args = [self.error_code, self.reason, self.body]
class PhotosService(gdata.service.GDataService):
ssl = True
userUri = '/data/feed/api/user/%s'
def __init__(self, email=None, password=None, source=None,
server='picasaweb.google.com', additional_headers=None,
**kwargs):
"""Creates a client for the Google Photos service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'picasaweb.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
self.email = email
self.client = source
gdata.service.GDataService.__init__(
self, email=email, password=password, service='lh2', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def GetFeed(self, uri, limit=None, start_index=None):
"""Get a feed.
The results are ordered by the values of their `updated' elements,
with the most recently updated entry appearing first in the feed.
Arguments:
uri: the uri to fetch
limit (optional): the maximum number of entries to return. Defaults to what
the server returns.
Returns:
one of gdata.photos.AlbumFeed,
gdata.photos.UserFeed,
gdata.photos.PhotoFeed,
gdata.photos.CommentFeed,
gdata.photos.TagFeed,
depending on the results of the query.
Raises:
GooglePhotosException
See:
http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual
"""
if limit is not None:
uri += '&max-results=%s' % limit
if start_index is not None:
uri += '&start-index=%s' % start_index
try:
return self.Get(uri, converter=gdata.photos.AnyFeedFromString)
except gdata.service.RequestError as e:
raise GooglePhotosException(e.args[0])
def GetEntry(self, uri, limit=None, start_index=None):
"""Get an Entry.
Arguments:
uri: the uri to the entry
limit (optional): the maximum number of entries to return. Defaults to what
the server returns.
Returns:
one of gdata.photos.AlbumEntry,
gdata.photos.UserEntry,
gdata.photos.PhotoEntry,
gdata.photos.CommentEntry,
gdata.photos.TagEntry,
depending on the results of the query.
Raises:
GooglePhotosException
"""
if limit is not None:
uri += '&max-results=%s' % limit
if start_index is not None:
uri += '&start-index=%s' % start_index
try:
return self.Get(uri, converter=gdata.photos.AnyEntryFromString)
except gdata.service.RequestError as e:
raise GooglePhotosException(e.args[0])
def GetUserFeed(self, kind='album', user='default', limit=None):
"""Get user-based feed, containing albums, photos, comments or tags;
defaults to albums.
The entries are ordered by the values of their `updated' elements,
with the most recently updated entry appearing first in the feed.
Arguments:
kind: the kind of entries to get, either `album', `photo',
`comment' or `tag', or a python list of these. Defaults to `album'.
user (optional): whose albums we're querying. Defaults to current user.
limit (optional): the maximum number of entries to return.
Defaults to everything the server returns.
Returns:
gdata.photos.UserFeed, containing appropriate Entry elements
See:
http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual
http://googledataapis.blogspot.com/2007/07/picasa-web-albums-adds-new-api-features.html
"""
if isinstance(kind, (list, tuple) ):
kind = ",".join(kind)
uri = '/data/feed/api/user/%s?kind=%s' % (user, kind)
return self.GetFeed(uri, limit=limit)
def GetTaggedPhotos(self, tag, user='default', limit=None):
"""Get all photos belonging to a specific user, tagged by the given keyword
Arguments:
tag: The tag you're looking for, e.g. `dog'
user (optional): Whose images/videos you want to search, defaults
to current user
limit (optional): the maximum number of entries to return.
Defaults to everything the server returns.
Returns:
gdata.photos.UserFeed containing PhotoEntry elements
"""
# Lower-casing because of
# http://code.google.com/p/gdata-issues/issues/detail?id=194
uri = '/data/feed/api/user/%s?kind=photo&tag=%s' % (user, tag.lower())
return self.GetFeed(uri, limit)
def SearchUserPhotos(self, query, user='default', limit=100):
"""Search through all photos for a specific user and return a feed.
This will look for matches in file names and image tags (a.k.a. keywords)
Arguments:
query: The string you're looking for, e.g. `vacation'
user (optional): The username of whose photos you want to search, defaults
to current user.
limit (optional): Don't return more than `limit' hits, defaults to 100
Only public photos are searched, unless you are authenticated and
searching through your own photos.
Returns:
gdata.photos.UserFeed with PhotoEntry elements
"""
uri = '/data/feed/api/user/%s?kind=photo&q=%s' % (user, query)
return self.GetFeed(uri, limit=limit)
def SearchCommunityPhotos(self, query, limit=100):
"""Search through all public photos and return a feed.
This will look for matches in file names and image tags (a.k.a. keywords)
Arguments:
query: The string you're looking for, e.g. `vacation'
limit (optional): Don't return more than `limit' hits, defaults to 100
Returns:
gdata.GDataFeed with PhotoEntry elements
"""
uri='/data/feed/api/all?q=%s' % query
return self.GetFeed(uri, limit=limit)
def GetContacts(self, user='default', limit=None):
"""Retrieve a feed that contains a list of your contacts
Arguments:
user: Username of the user whose contacts you want
Returns
gdata.photos.UserFeed, with UserEntry entries
See:
http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38
"""
uri = '/data/feed/api/user/%s/contacts?kind=user' % user
return self.GetFeed(uri, limit=limit)
def SearchContactsPhotos(self, user='default', search=None, limit=None):
"""Search over your contacts' photos and return a feed
Arguments:
user: Username of the user whose contacts you want
search (optional): What to search for (photo title, description and keywords)
Returns
gdata.photos.UserFeed, with PhotoEntry elements
See:
http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38
"""
uri = '/data/feed/api/user/%s/contacts?kind=photo&q=%s' % (user, search)
return self.GetFeed(uri, limit=limit)
def InsertAlbum(self, title, summary, location=None, access='public',
commenting_enabled='true', timestamp=None):
"""Add an album.
Needs authentication, see self.ClientLogin()
Arguments:
title: Album title
summary: Album summary / description
access (optional): `private' or `public'. Public albums are searchable
by everyone on the internet. Defaults to `public'
commenting_enabled (optional): `true' or `false'. Defaults to `true'.
timestamp (optional): A date and time for the album, in milliseconds since
Unix epoch[1] UTC. Defaults to now.
Returns:
The newly created gdata.photos.AlbumEntry
See:
http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed
[1]: http://en.wikipedia.org/wiki/Unix_epoch
"""
album = gdata.photos.AlbumEntry()
album.title = atom.Title(text=title, title_type='text')
album.summary = atom.Summary(text=summary, summary_type='text')
if location is not None:
album.location = gdata.photos.Location(text=location)
album.access = gdata.photos.Access(text=access)
if commenting_enabled in ('true', 'false'):
album.commentingEnabled = gdata.photos.CommentingEnabled(text=commenting_enabled)
if timestamp is None:
timestamp = '%i' % int(time.time() * 1000)
album.timestamp = gdata.photos.Timestamp(text=timestamp)
try:
return self.Post(album, uri=self.userUri % self.email,
converter=gdata.photos.AlbumEntryFromString)
except gdata.service.RequestError as e:
raise GooglePhotosException(e.args[0])
def InsertPhoto(self, album_or_uri, photo, filename_or_handle,
content_type='image/jpeg'):
"""Add a PhotoEntry
Needs authentication, see self.ClientLogin()
Arguments:
album_or_uri: AlbumFeed or uri of the album where the photo should go
photo: PhotoEntry to add
filename_or_handle: A file-like object or file name where the image/video
will be read from
content_type (optional): Internet media type (a.k.a. mime type) of
media object. Currently Google Photos supports these types:
o image/bmp
o image/gif
o image/jpeg
o image/png
Images will be converted to jpeg on upload. Defaults to `image/jpeg'
"""
try:
assert(isinstance(photo, gdata.photos.PhotoEntry))
except AssertionError:
raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT,
'body':'`photo` must be a gdata.photos.PhotoEntry instance',
'reason':'Found %s, not PhotoEntry' % type(photo)
})
try:
majtype, mintype = content_type.split('/')
assert(mintype in SUPPORTED_UPLOAD_TYPES)
except (ValueError, AssertionError):
raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE,
'body':'This is not a valid content type: %s' % content_type,
'reason':'Accepted content types: %s' % \
['image/'+t for t in SUPPORTED_UPLOAD_TYPES]
})
if isinstance(filename_or_handle, str) and \
os.path.exists(filename_or_handle): # it's a file name
mediasource = gdata.MediaSource()
mediasource.setFile(filename_or_handle, content_type)
elif hasattr(filename_or_handle, 'read'):# it's a file-like resource
if hasattr(filename_or_handle, 'seek'):
filename_or_handle.seek(0) # rewind pointer to the start of the file
# gdata.MediaSource needs the content length, so read the whole image
file_handle = io.StringIO(filename_or_handle.read())
name = 'image'
if hasattr(filename_or_handle, 'name'):
name = filename_or_handle.name
mediasource = gdata.MediaSource(file_handle, content_type,
content_length=file_handle.len, file_name=name)
else: #filename_or_handle is not valid
raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT,
'body':'`filename_or_handle` must be a path name or a file-like object',
'reason':'Found %s, not path name or object with a .read() method' % \
filename_or_handle
})
if isinstance(album_or_uri, str): # it's a uri
feed_uri = album_or_uri
elif hasattr(album_or_uri, 'GetFeedLink'): # it's a AlbumFeed object
feed_uri = album_or_uri.GetFeedLink().href
try:
return self.Post(photo, uri=feed_uri, media_source=mediasource,
converter=gdata.photos.PhotoEntryFromString)
except gdata.service.RequestError as e:
raise GooglePhotosException(e.args[0])
def InsertPhotoSimple(self, album_or_uri, title, summary, filename_or_handle,
content_type='image/jpeg', keywords=None):
"""Add a photo without constructing a PhotoEntry.
Needs authentication, see self.ClientLogin()
Arguments:
album_or_uri: AlbumFeed or uri of the album where the photo should go
title: Photo title
summary: Photo summary / description
filename_or_handle: A file-like object or file name where the image/video
will be read from
content_type (optional): Internet media type (a.k.a. mime type) of
media object. Currently Google Photos supports these types:
o image/bmp
o image/gif
o image/jpeg
o image/png
Images will be converted to jpeg on upload. Defaults to `image/jpeg'
keywords (optional): a 1) comma separated string or 2) a python list() of
keywords (a.k.a. tags) to add to the image.
E.g. 1) `dog, vacation, happy' 2) ['dog', 'happy', 'vacation']
Returns:
The newly created gdata.photos.PhotoEntry or GooglePhotosException on errors
See:
http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed
[1]: http://en.wikipedia.org/wiki/Unix_epoch
"""
metadata = gdata.photos.PhotoEntry()
metadata.title=atom.Title(text=title)
metadata.summary = atom.Summary(text=summary, summary_type='text')
if keywords is not None:
if isinstance(keywords, list):
keywords = ','.join(keywords)
metadata.media.keywords = gdata.media.Keywords(text=keywords)
return self.InsertPhoto(album_or_uri, metadata, filename_or_handle,
content_type)
def UpdatePhotoMetadata(self, photo):
"""Update a photo's metadata.
Needs authentication, see self.ClientLogin()
You can update any or all of the following metadata properties:
* <title>
* <media:description>
* <gphoto:checksum>
* <gphoto:client>
* <gphoto:rotation>
* <gphoto:timestamp>
* <gphoto:commentingEnabled>
Arguments:
photo: a gdata.photos.PhotoEntry object with updated elements
Returns:
The modified gdata.photos.PhotoEntry
Example:
p = GetFeed(uri).entry[0]
p.title.text = u'My new text'
p.commentingEnabled.text = 'false'
p = UpdatePhotoMetadata(p)
It is important that you don't keep the old object around, once
it has been updated. See
http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency
"""
try:
return self.Put(data=photo, uri=photo.GetEditLink().href,
converter=gdata.photos.PhotoEntryFromString)
except gdata.service.RequestError as e:
raise GooglePhotosException(e.args[0])
def UpdatePhotoBlob(self, photo_or_uri, filename_or_handle,
content_type = 'image/jpeg'):
"""Update a photo's binary data.
Needs authentication, see self.ClientLogin()
Arguments:
photo_or_uri: a gdata.photos.PhotoEntry that will be updated, or a
`edit-media' uri pointing to it
filename_or_handle: A file-like object or file name where the image/video
will be read from
content_type (optional): Internet media type (a.k.a. mime type) of
media object. Currently Google Photos supports these types:
o image/bmp
o image/gif
o image/jpeg
o image/png
Images will be converted to jpeg on upload. Defaults to `image/jpeg'
Returns:
The modified gdata.photos.PhotoEntry
Example:
p = GetFeed(PhotoUri)
p = UpdatePhotoBlob(p, '/tmp/newPic.jpg')
It is important that you don't keep the old object around, once
it has been updated. See
http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency
"""
try:
majtype, mintype = content_type.split('/')
assert(mintype in SUPPORTED_UPLOAD_TYPES)
except (ValueError, AssertionError):
raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE,
'body':'This is not a valid content type: %s' % content_type,
'reason':'Accepted content types: %s' % \
['image/'+t for t in SUPPORTED_UPLOAD_TYPES]
})
if isinstance(filename_or_handle, str) and \
os.path.exists(filename_or_handle): # it's a file name
photoblob = gdata.MediaSource()
photoblob.setFile(filename_or_handle, content_type)
elif hasattr(filename_or_handle, 'read'):# it's a file-like resource
if hasattr(filename_or_handle, 'seek'):
filename_or_handle.seek(0) # rewind pointer to the start of the file
# gdata.MediaSource needs the content length, so read the whole image
file_handle = io.StringIO(filename_or_handle.read())
name = 'image'
if hasattr(filename_or_handle, 'name'):
name = filename_or_handle.name
mediasource = gdata.MediaSource(file_handle, content_type,
content_length=file_handle.len, file_name=name)
else: #filename_or_handle is not valid
raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT,
'body':'`filename_or_handle` must be a path name or a file-like object',
'reason':'Found %s, not path name or an object with .read() method' % \
type(filename_or_handle)
})
if isinstance(photo_or_uri, str):
entry_uri = photo_or_uri # it's a uri
elif hasattr(photo_or_uri, 'GetEditMediaLink'):
entry_uri = photo_or_uri.GetEditMediaLink().href
try:
return self.Put(photoblob, entry_uri,
converter=gdata.photos.PhotoEntryFromString)
except gdata.service.RequestError as e:
raise GooglePhotosException(e.args[0])
def InsertTag(self, photo_or_uri, tag):
"""Add a tag (a.k.a. keyword) to a photo.
Needs authentication, see self.ClientLogin()
Arguments:
photo_or_uri: a gdata.photos.PhotoEntry that will be tagged, or a
`post' uri pointing to it
(string) tag: The tag/keyword
Returns:
The new gdata.photos.TagEntry
Example:
p = GetFeed(PhotoUri)
tag = InsertTag(p, 'Beautiful sunsets')
"""
tag = gdata.photos.TagEntry(title=atom.Title(text=tag))
if isinstance(photo_or_uri, str):
post_uri = photo_or_uri # it's a uri
elif hasattr(photo_or_uri, 'GetEditMediaLink'):
post_uri = photo_or_uri.GetPostLink().href
try:
return self.Post(data=tag, uri=post_uri,
converter=gdata.photos.TagEntryFromString)
except gdata.service.RequestError as e:
raise GooglePhotosException(e.args[0])
def InsertComment(self, photo_or_uri, comment):
"""Add a comment to a photo.
Needs authentication, see self.ClientLogin()
Arguments:
photo_or_uri: a gdata.photos.PhotoEntry that is about to be commented
, or a `post' uri pointing to it
(string) comment: The actual comment
Returns:
The new gdata.photos.CommentEntry
Example:
p = GetFeed(PhotoUri)
tag = InsertComment(p, 'OOOH! I would have loved to be there.
Who's that in the back?')
"""
comment = gdata.photos.CommentEntry(content=atom.Content(text=comment))
if isinstance(photo_or_uri, str):
post_uri = photo_or_uri # it's a uri
elif hasattr(photo_or_uri, 'GetEditMediaLink'):
post_uri = photo_or_uri.GetPostLink().href
try:
return self.Post(data=comment, uri=post_uri,
converter=gdata.photos.CommentEntryFromString)
except gdata.service.RequestError as e:
raise GooglePhotosException(e.args[0])
def Delete(self, object_or_uri, *args, **kwargs):
"""Delete an object.
Re-implementing the GDataService.Delete method, to add some
convenience.
Arguments:
object_or_uri: Any object that has a GetEditLink() method that
returns a link, or a uri to that object.
Returns:
? or GooglePhotosException on errors
"""
try:
uri = object_or_uri.GetEditLink().href
except AttributeError:
uri = object_or_uri
try:
return gdata.service.GDataService.Delete(self, uri, *args, **kwargs)
except gdata.service.RequestError as e:
raise GooglePhotosException(e.args[0])
def GetSmallestThumbnail(media_thumbnail_list):
"""Helper function to get the smallest thumbnail of a list of
gdata.media.Thumbnail.
Returns gdata.media.Thumbnail """
r = {}
for thumb in media_thumbnail_list:
r[int(thumb.width)*int(thumb.height)] = thumb
keys = list(r.keys())
keys.sort()
return r[keys[0]]
def ConvertAtomTimestampToEpoch(timestamp):
"""Helper function to convert a timestamp string, for instance
from atom:updated or atom:published, to milliseconds since Unix epoch
(a.k.a. POSIX time).
`2007-07-22T00:45:10.000Z' -> """
return time.mktime(time.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.000Z'))
## TODO: Timezone aware
| apache-2.0 | -1,827,701,711,872,862,000 | 34.687225 | 105 | 0.676542 | false |
bhdouglass/clickable | setup.py | 1 | 2014 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ast
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.md').read()
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('clickable/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
requirements = [
'cookiecutter',
'requests',
'jsonschema',
]
setup(
name='clickable-ut',
version=version,
description='Compile, build, and deploy Ubuntu Touch click packages all from the command line.',
long_description=readme,
long_description_content_type='text/markdown',
author='Brian Douglass',
url='https://clickable-ut.dev/',
project_urls={
'Documentation': 'https://clickable-ut.dev/en/latest/',
'Source': 'https://gitlab.com/clickable/clickable',
'Bug Tracker': 'https://gitlab.com/clickable/clickable/-/issues',
},
packages=['clickable'],
include_package_data=True,
install_requires=requirements,
license='GPL3',
zip_safe=False,
keywords='click ubuntu touch ubports',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Code Generators',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
entry_points={
'console_scripts': [
'clickable = clickable:main',
],
}
)
| gpl-3.0 | 3,972,962,737,197,527,600 | 29.515152 | 100 | 0.610228 | false |
amol9/redcmd | redcmd/api.py | 1 | 1144 | '''from redlib.api.misc import make_api, Move
moves = []
exclude = ['test', 'version', 'main', '__main__', '__init__']
make_api(__name__, __file__, exclude=exclude, moves=moves)
__path__ = []
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
'''
#-
from importlib import import_module
from .commandline import CommandLine
from .func import execute_commandline
cmdline = execute_commandline
from .subcommand import Subcommand
from .decorators import *
from .exc import*
from types import ModuleType
import sys
class completer:
module = '.autocomp.completer.all'
class arg:
module = '.arg'
def make_api(cls):
source_module = cls.module
m = import_module(source_module, 'redcmd')
for i in m.__all__:
setattr(cls, i, getattr(m, i))
#current_module = sys.modules[__name__]
#setattr(current_module, cls, new_module)
#new_module.__file__ = __file__
#new_module.__package__ = 'redcmd.api'
#new_module.__builtins__ = m.__all__
make_api(completer)
make_api(arg)
| mit | -5,819,277,296,388,335,000 | 19.428571 | 72 | 0.649476 | false |
Akson/RemoteConsolePlus3 | RemoteConsolePlus3/RCP3/Backends/Processors/DataParsers/DefaultParser.py | 1 | 1320 | #Created by Dmytro Konobrytskyi, 2013 (github.com/Akson)
from RCP3 import DefaultParser
class Backend(object):
def __init__(self, parentNode):
self._parentNode = parentNode
def Delete(self):
"""
This method is called when a parent node is deleted.
"""
pass
def GetParameters(self):
"""
Returns a dictionary with object parameters, their values,
limits and ways to change them.
"""
return {}
def SetParameters(self, parameters):
"""
Gets a dictionary with parameter values and
update object parameters accordingly
"""
pass
def ProcessMessage(self, message):
"""
This message is called when a new message comes.
If an incoming message should be processed by following nodes, the
'self._parentNode.SendMessage(message)'
should be called with an appropriate message.
"""
parsedMessage = DefaultParser.ParseMessage(message)
if parsedMessage:
self._parentNode.SendMessage(parsedMessage)
def AppendContextMenuItems(self, menu):
"""
Append backend specific menu items to a context menu that user will see
when he clicks on a node.
"""
pass | lgpl-3.0 | -2,688,199,538,316,170,000 | 29.022727 | 79 | 0.608333 | false |
feureau/Small-Scripts | Blender/Blender config/2.91/scripts/addons/assemblme_v1-4-0/functions/common/wrappers.py | 1 | 2181 | # Copyright (C) 2019 Christopher Gearhart
# [email protected]
# http://bblanimation.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# System imports
import time
# Blender imports
import bpy
# Module imports
from .reporting import stopwatch
# https://github.com/CGCookie/retopoflow
def timed_call(label, precision=2):
def wrapper(fn):
def wrapped(*args, **kwargs):
time_beg = time.time()
ret = fn(*args, **kwargs)
stopwatch(label, time_beg, precision=precision)
return ret
return wrapped
return wrapper
# corrected bug in previous version of blender_version fn wrapper
# https://github.com/CGCookie/retopoflow/commit/135746c7b4ee0052ad0c1842084b9ab983726b33#diff-d4260a97dcac93f76328dfaeb5c87688
def blender_version_wrapper(op, ver):
self = blender_version_wrapper
if not hasattr(self, "init"):
major, minor, rev = bpy.app.version
blenderver = "%d.%02d" % (major, minor)
self.fns = {}
self.ops = {
"<": lambda v: blenderver < v,
">": lambda v: blenderver > v,
"<=": lambda v: blenderver <= v,
"==": lambda v: blenderver == v,
">=": lambda v: blenderver >= v,
"!=": lambda v: blenderver != v,
}
self.init = True
update_fn = self.ops[op](ver)
fns = self.fns
def wrapit(fn):
n = fn.__name__
if update_fn:
fns[n] = fn
def callit(*args, **kwargs):
return fns[n](*args, **kwargs)
return callit
return wrapit
| gpl-3.0 | 2,026,987,312,266,655,200 | 30.608696 | 126 | 0.638698 | false |
ronaldahmed/SLAM-for-ugv | neural-navigation-with-lstm/MARCO/Senses.py | 2 | 18327 | Senses = {
'1' : {'N':[2], 'ADJ':[1],},
'1st' : {'ADJ':[1]},
'2' : {'N':[2], 'ADJ':[1]},
'3' : {'N':[2], 'ADJ':[1]},
'4' : {'N':[2], 'ADJ':[1]},
'5' : {'N':[2], 'ADJ':[1]},
'6' : {'N':[2], 'ADJ':[1]},
'7' : {'N':[2], 'ADJ':[1]},
'90' : {'ADJ':[1],},
'able' : {'ADJ':[1],},
'about' : {'ADV':[1],},
'above' : {'ADJ':[1],},
'accurate' : {'ADJ':[1],},
'aesal' : {'see':'easel'},
'after' : {'ADV':[1,2],},
'again' : {'ADV':[1]},
'ahead' : {'ADV':[1]},
'ahllway' : {'see':'hallway'},
'ahllways' : {'see':'hallways'},
'aisle' : {'N':[1],},
'aisles' : {'N':[('aisle',[1])],},
'aleft' : {'see':'left'},
'alk' : {'see':'walk'},
'all' : {'ADJ':[1], 'ADV':[1]},
'all the way' : {'see':'all'},
'all of the way' : {'see':'all'},
'alley' : {'N':[1]},
'alleys' : {'N':[('alley',[1])]},
'almost' : {'ADV':[1]},
'along' : {'ADV':[1]},
'also' : {'ADV':[1]},
'although' : {'ADV':[1]},
'am' : {'V':[('be',[1,2,3,4,5])]},
'amek' : {'see':'make'},
'anopther' : {'see':'another'},
'another' : {'ADJ':[1,2]},
'antoher' : {'see':'another'},
'any' : {'ADJ':[2]},
'appears' : {'V':[('appear',[1,2])]},
'are' : {'V':[('be',[1,2,3,4,5])]},
'area' : {'N':[1]},
'areas' : {'N':[('area',[1])]},
'aright' : {'see':'right'},
'around' : {'ADV':[6]},
'as' : {'ADV':[1]},
'away' : {'ADJ':[1], 'ADV':[1]},
'back' : {'N':[1], 'ADJ':[1], 'ADV':[1,2]},
'bar' : {'see':'bare'},
'bare' : {'ADJ':[4]},
'be' : {'V':[1,2,3,4,5]},
'begins' : {'V':[('begin',[2])]},
'behind' : {'ADV':[1]},
'believe' : {'V':[1]},
'bench' : {'N':[1]},
'between' : {'ADV':[2]},
'beyond' : {'ADV':[1,2]},
'black' : {'ADJ':[1]},
'blank' : {'ADJ':[1]},
'block' : {'N':[2]},
'blocks' : {'N':[('block',[2])]},
'blue' : {'ADJ':[1]},
'both' : {'ADJ':[1]},
'branch' : {'N':[3], 'V':[2]},
'branches' : {'N':[('branch',[3])], 'V':[('branch',[2])]},
'branching' : {'N':[1], 'V':[('branch',[2])]},
'brick' : {'N':[1],},
'bricked' : {'see':'red'},
'brings' : {'V':[('bring',[1,2,3,5])],},
'brown' : {'ADJ':[1]},
'but' : {'ADV':[1]},
'buterfly' : {'see':'butterfly'},
'butterflies' : {'N':[('butterfly',[1])]},
'butterfly' : {'N':[1]},
'butterflys' : {'N':[('butterfly',[1])]},
'butterlfy' : {'see':'butterfly'},
'bvlue' : {'see':'blue'},
'came' : {'V':[('come',[1,2])]},
'can' : {'V':[1]},
'carpet' : {'N':[1]},
'carpeted' : {'ADJ':[1]},
'carpeting' : {'N':[1]},
'cement' : {'N':[1]},
'certain' : {'ADJ':[1,2]},
'chair' : {'N':[1]},
'chairs' : {'N':[('chair',[1])]},
'charred' : {'ADJ':[1]},
'choice' : {'N':[3]},
'click' : {'N':[2]},
'close' : {'ADJ':[1]},
'closer' : {'ADJ':[('close',[1])], 'ADV':[1]},
'closest' : {'ADJ':[('close',[1])], 'ADV':[1]},
'clownfish' : {'see':'fish'},
'coat' : {'N':[1]},
'coatrack' : {'N':[1]},
'colored' : {'ADJ':[1]},
'come' : {'V':[1,2,3,4]},
'completely' : {'ADV':[1]},
'conataining' : {'see':'containing'},
'conatains' : {'see':'contains'},
'conatins' : {'see':'contains'},
'concrete' : {'N':[1], 'ADJ':[2]},
'contain' : {'V':[1]},
'containa' : {'see':'contains'},
'contained' : {'V':[('contain',[1])]},
'containg' : {'see':'containing'},
'containing' : {'V':[('contain',[1])]},
'contains' : {'V':[('contain',[1])]},
'continue' : {'V':[1,4]},
'continuing' : {'V':[('continue',[1,4])]},
'corner' : {'N':[4]},
'corridor' : {'N':[1]},
'couple' : {'N':[1]},
'course' : {'N':[5]},
'covered' : {'ADJ':[1]},
'crosses' : {'V':[('cross',[1,2])]},
'crossing' : {'N':[4], 'V':[('cross',[1,2])]},
'dark' : {'ADJ':[2]},
'dead end' : {'N':[1]},
'dead' : {'see':'dead end'},
'deadend' : {'see':'dead end'},
'description' : {'N':[1]},
'direction' : {'N':[1,2,3,6]},
'directions' : {'N':[('direction',[6])]},
'directly' : {'ADV':[1]},
'distance' : {'N':[2]},
'down' : {'ADV':[1]},
'each' : {'ADJ':[1]},
'easel' : {'N':[1]},
'easle' : {'see':'easel'},
'edge' : {'N':[6]},
'efficient' : {'ADJ':[1]},
'eiffel' : {'N':[1]},
'eiffel tower' : {'N':[1]},
'either' : {'ADV':[1]},
'empty' : {'ADJ':[1]},
'end' : {'N':[1], 'V':[1]},
'ending' : {'V':[('end',[1])]},
'ends' : {'V':[('end',[1])]},
'enter' : {'V':[1]},
'epink' : {'see':'pink'},
'eventually' : {'ADV':[1]},
'exit' : {'N':[1,2], 'V':[1]},
'expanse' : {'N':[2]},
'face' : {'V':[5]},
'faces' : {'V':[('face',[5])]},
'facing' : {'V':[('face',[3,5])]},
'fail' : {'V':[2]},
'far' : {'ADJ':[1], 'ADV':[2,3]},
'farther' : {'ADJ':[1], 'ADV':[2]},
'farthest' : {'ADJ':[1], 'ADV':[1]},
'fave' : {'see':'face'},
'few' : {'ADJ':[1]},
'finally' : {'ADV':[2]},
'find' : {'V':[3]},
'finding' : {'V':[('find',[3])]},
'first' : {'ADJ':[1], 'ADV':[1]},
'fish' : {'N':[1]},
'fishy' : {'see':'fish'},
'five' : {'N':[1], 'ADJ':[1]},
'flooored' : {'see':'floored'},
'floor' : {'N':[1],},
'floord' : {'see':'floored'},
'floored' : {'ADJ':[1]},
'flooring' : {'N':[1,2]},
'floors' : {'N':[('floor',[1])]},
'flor' : {'see':'floor'},
'florr' : {'see':'floor'},
'flower' : {'N':[2]},
'flowerd' : {'see':'flowered'},
'flowered' : {'ADJ':[1]},
'flowers' : {'N':[('flower',[1])]},
'follow' : {'V':[4]},
'followe' : {'see':'followed'},
'following' : {'ADJ':[1]},
'foored' : {'see':'floored'},
'forces' : {'V':[('force',[1])]},
'forward' : {'ADJ':[1,2], 'ADV':[1]},
'four' : {'N':[1], 'ADJ':[1]},
'fourth' : {'ADJ':[1]},
'foward' : {'see':'forward'},
'fron' : {'see':'front'},
'front' : {'N':[2,3,10], 'ADJ':[1,2]},
'furniture' : {'N':[1]},
'furnitute' : {'see':'furniture'},
'further' : {'ADJ':[2], 'ADV':[3]},
'futon' : {'N':[1]},
'fwd' : {'see':'forward'},
'get' : {'V':[5]},
'go' : {'V':[1]},
'goi' : {'see':'go'},
'goin' : {'see':'going'},
'going' : {'V':[('go',[1,2])]},
'grass' : {'N':[1]},
'grassy' : {'ADJ':[1]},
'gray' : {'ADJ':[1]},
'green' : {'ADJ':[1]},
'grey' : {'ADJ':[1]},
'greyt' : {'see':'grey'},
'hal' : {'see':'hall'},
'halfway' : {'ADJ':[1], 'ADV':[1]},
'hall' : {'N':[1]},
'halllway' : {'see':'hallway'},
'halls' : {'N':[('hall',[1])]},
'hallway' : {'N':[1]},
'hallways' : {'N':[('hallway',[1])]},
'hallwyas' : {'see':'hallways'},
'hang' : {'see':'take'},
'hanging' : {'N':[1]},
'hangings' : {'N':[('hanging',[1])]},
'has' : {'V':[('have',[1])]},
'hat rack' : {'see': 'hatrack'},
'hatrack' : {'N':[1]},
'have' : {'V':[1]},
'having' : {'V':[('have',[1])]},
'head' : {'V':[1]},
'here' : {'N':[1], 'ADJ':[1], 'ADV':[1]},
'hexagonal' : {'ADJ':[1]},
'hexagonally' : {'see':'hexagonal'},
'hit' : {'V':[5]},
'hold' : {'V':[6]},
'honeycomb' : {'N':[1]},
'however' : {'ADV':[2]},
'images' : {'N':[('image',[2])]},
'immediate' : {'ADJ':[1]},
'immediately' : {'ADV':[1]},
'inersection' : {'see':'intersection'},
'infront' : {'see':'front'},
'interection' : {'see':'intersection'},
'interesction' : {'see':'intersection'},
'interesection' : {'see':'intersection'},
'interesects' : {'see':'intersects'},
'interseciton' : {'see':'intersection'},
'intersect' : {'V':[1]},
'intersected' : {'V':[('intersect',[1])]},
'intersecting' : {'V':[('intersect',[1])], 'ADJ':[1]},
'intersectino' : {'see':'intersection'},
'intersection' : {'N':[2]},
'intersections' : {'N':[('intersection',[2])]},
'intersects' : {'V':[('intersect',[1])]},
'intesection' : {'see':'intersection'},
'is' : {'V':[('be',[1,2,3,4,5])]},
'junction' : {'N':[1]},
'juncture' : {'N':[2]},
'just' : {'ADV':[2,5]},
'keep' : {'V':[1,2]},
'key' : {'ADJ':[1]},
'laft' : {'see':'left'},
'lamp' : {'N':[2]},
'lampl' : {'see':'lamp'},
'lampshade' : {'N':[1]},
'last' : {'ADJ':[2], 'ADV':[2]},
'leading' : {'V':[('lead',6)]},
'leads' : {'V':[('lead',6)]},
'left' : {'N':[1,5], 'ADJ':[1], 'ADV':[1]},
'lft' : {'see':'left'},
'like' : {'ADJ':[1]},
'little' : {'N':[1]},
'locate' : {'V':[2]},
'located' : {'V':[('locate',[2])], 'ADJ':[1]},
'long' : {'ADJ':[2]},
'longer' : {'ADJ':[1]},
'longetr' : {'see':'longer'},
'look' : {'V':[1,4]},
'looking' : {'V':[('look',[1,4])], 'ADJ':[1]},
'lovely' : {'ADJ':[1]},
'make' : {'V':[16]},
'makea' : {'see':'make'},
'makes' : {'V':[('make',[16])]},
'many' : {'ADJ':[1]},
'match' : {'V':[1]},
'maybe' : {'ADV':[1]},
'meet' : {'V':[3]},
'meets' : {'V':[('meet',[3])]},
'middle' : {'N':[1,2], 'ADJ':[1,2]},
'monarch' : {'N':[2]},
'more' : {'ADJ':[1,2,3]},
'most' : {'ADJ':[1,2]},
'move' : {'N':[4], 'V':[1]},
'movement' : {'N':[3]},
'movements' : {'N':[('movement',[3])]},
'moving' : {'V':[('move',[1])]},
'near' : {'ADJ':[1]},
'need' : {'V':[1]},
'new' : {'ADJ':[2]},
'next' : {'ADJ':[3], 'ADV':[1]},
'no' : {'ADJ':[1]},
'normal' : {'ADJ':[1]},
'not' : {'ADV':[1]},
'notably' : {'ADV':[1]},
'nothing' : {'N':[1]},
'now' : {'ADV':[4]},
'object' : {'N':[1]},
'objects' : {'N':[('object',[1])]},
'octagon' : {'N':[1]},
'octagons' : {'N':[('octagon',[1])]},
'off' : {'ADV':[1,2]},
'oine' : {'see':'one'},
'olive' : {'ADJ':[1]},
'on' : {'ADV':[1]},
'once' : {'ADV':[1,2]},
'one' : {'ADJ':[1,2]},
'only' : {'ADJ':[1,2], 'ADV':[2]},
'opening' : {'N':[1]},
'opens' : {'V':[('open',[8])]},
'opposite' : {'ADJ':[1]},
'or' : {'N':[1,2]},
'orange' : {'ADJ':[1]},
'orient' : {'V':[3]},
'oriented' : {'ADJ':[1]},
'originated' : {'V':[('originate',[3])]},
'other' : {'ADJ':[1,2]},
'p' : {'see':'position'},
'p4' : {'see':'4'},
'p6' : {'see':'6'},
'painting' : {'N':[1]},
'painting easel' : {'see':'easel'},
'paragraph' : {'N':[1]},
'pass' : {'V':[1]},
'passing' : {'V':[('pass',[1])]},
'patch' : {'N':[2]},
'path' : {'N':[2,3,4]},
'paths' : {'N':[('path',[2,3,4])]},
'pcitures' : {'see':'picaitures'},
'perpendicualr' : {'see':'perpendicular'},
'perpendicular' : {'ADJ':[1]},
'perspective' : {'N':[1]},
'pics' : {'N':[('pic',[2])]},
'pictures' : {'N':[('picture',[2])]},
'pieaces' : {'see':'pieces'},
'piece' : {'N':[2]},
'pieces' : {'N':[('piece',[2])]},
'pink' : {'ADJ':[1]},
'place' : {'N':[1], 'V':[1]},
'placed' : {'ADJ':[1]},
'places' : {'N':[('place',[1])]},
'plain' : {'ADJ':[3]},
'pnk' : {'see':'pink'},
'point' : {'N':[2]},
'pole' : {'N':[1]},
'portion' : {'N':[1]},
'pos' : {'see':'position'},
'position' : {'N':[1], 'V':[1]},
'possible' : {'ADJ':[2]},
'possibly' : {'ADV':[1]},
'post' : {'N':[4]},
'postion' : {'see':'position'},
'postition' : {'see':'position'},
'previous' : {'ADJ':[3]},
'print' : {'see' : 'carpeted'},
'probably' : {'ADV':[1]},
'put' : {'V':[1]},
'quite' : {'ADV':[2]},
'rack' : {'N':[1]},
'reach' : {'V':[1]},
'reached' : {'V':[('reach',[1])]},
'recall' : {'V':[1]},
'rech' : {'see':'reach'},
'red' : {'ADJ':[1]},
'regardless' : {'ADV':[1]},
'regular' : {'ADJ':[3]},
'remember' : {'V':[1]},
'resume' : {'V':[1]},
'right' : {'N':[3,4], 'ADJ':[1,2], 'ADV':[4]},
'roack' : {'see':'rock'},
'road' : {'N':[1]},
'rock' : {'N':[2]},
'room' : {'N':[1]},
'rose' : {'N':[1], 'ADJ':[1]},
'roses' : {'N':[('rose',[1])]},
'rotate' : {'V':[1]},
'route' : {'N':[1]},
'rt' : {'see':'right'},
'running' : {'V':[('run',[3])]},
'same' : {'ADJ':[1,2]},
'second' : {'ADJ':[1], 'ADV':[1]},
'section' : {'N':[4]},
'sections' : {'N':[('section',[4])]},
'see' : {'V':[1]},
'segements' : {'see':'segments'},
'segment' : {'N':[1]},
'segments' : {'N':[('segment',[1])]},
'segmentsm' : {'see':'segments'},
'series' : {'N':[1]},
'seven' : {'N':[1], 'ADJ':[1]},
'several' : {'ADJ':[1]},
'short' : {'ADJ':[2]},
'shorter' : {'ADJ':[('short',[2])]},
'side' : {'N':[1]},
'sides' : {'N':[('side',[1])]},
'simply' : {'ADV':[1]},
'single' : {'ADJ':[1]},
'six' : {'N':[1], 'ADJ':[1]},
'so' : {'ADV':[2]},
'sofa' : {'N':[1]},
'solid' : {'ADJ':[5]},
'some' : {'ADJ':[1],},
'something' : {'N':[1]},
'sort' : {'N':[1]},
'space' : {'N':[5]},
'spaces' : {'N':[('space',[5])]},
'spce' : {'see':'space'},
'spot' : {'N':[1]},
'square' : {'N':[3]},
'stand' : {'N':[5], 'V':[3]},
'standing' : {'V':[('stand',[3])]},
'start' : {'V':[1,2]},
'station' : {'N':[4]},
'stay' : {'V':[1]},
'stem' : {'N':[3]},
'step' : {'N':[2]},
'steps' : {'N':[('step',[2])]},
'still' : {'ADV':[1]},
'stone' : {'N':[2], 'ADJ':[1]},
'stool' : {'N':[1]},
'stop' : {'V':[1]},
'straight' : {'ADJ':[2,8], 'ADV':[1,3]},
'street' : {'N':[1]},
'streets' : {'N':[('street',[1])]},
'such' : {'ADJ':[1,2,3]},
'sure' : {'ADJ':[1]},
't' : {'N':[5]},
"' t '" : {'see':'t'},
'take' : {'V':[1,11,40]},
'then' : {'ADV':[1]},
'there' : {'N':[1], 'ADV':[1]},
'thing' : {'N':[3,9]},
'think' : {'V':[1]},
'third' : {'ADJ':[1], 'ADV':[1]},
'though' : {'ADV':[1]},
'thre' : {'see':'three'},
'three' : {'N':[1], 'ADJ':[1]},
'tiems' : {'see':'times'},
'tile' : {'N':[1]},
'tiled' : {'ADJ':[1]},
'tiles' : {'N':[('tile',[1])]},
'time' : {'N':[1]},
'times' : {'N':[('time',[1])]},
'tke' : {'see':'take'},
'together' : {'ADV':[2]},
'too' : {'ADV':[2]},
'top' : {'N':[1]},
'tower' : {'N':[1]},
'towers' : {'N':[('tower',[1])]},
'travel' : {'V':[1]},
'tricky' : {'ADJ':[2]},
'tunr' : {'see':'turn'},
'turn' : {'N':[1,2], 'V':[1]},
'turned' : {'V':[('turn',[1])]},
'turning' : {'V':[('turn',[1])]},
'turns' : {'N':[('turn',[1,2])], 'V':[('turn',[1])]},
'twice' : {'ADV':[1]},
'two' : {'N':[1], 'ADJ':[1]},
'uncertain' : {'ADJ':[2]},
'up' : {'ADV':[1]},
'vacant' : {'ADJ':[2]},
'very' : {'ADJ':[1], 'ADV':[2]},
'view' : {'V':[1]},
'visible' : {'ADJ':[1]},
'walk' : {'V':[1]},
'walked' : {'V':[('walk',[1])]},
'walking' : {'V':[('walk',[1])]},
'walkway' : {'N':[1]},
'wall' : {'N':[1]},
'wall hanging' : {'N':[('hanging',[1])]},
'walled' : {'V':[('wall',[1])], 'ADJ':[1]},
'walls' : {'N':[('wall',[1])]},
'wander' : {'V':[3]},
'was' : {'V':[('be',[1,2,3,4,5,6])]},
'wawya' : {'see':'way'},
'way' : {'N':[7,8]},
'ways' : {'N':[7,8]},
'when' : {'ADV':[1]},
'where' : {'ADV':[1]},
'white' : {'ADJ':[1]},
'winding' : {'ADJ':[1,2]},
'wlak' : {'see':'walk'},
'wood' : {'N':[1]},
'wooden' : {'ADJ':[1]},
'yellow' : {'ADJ':[1]},
}
Senses2 = {
'2nd' : {'N':[2], 'ADJ':[1]},
'3rd' : {'N':[2], 'ADJ':[1]},
'4th' : {'N':[2], 'ADJ':[1]},
'180' : {'ADJ' : [1]},
'l' : {'N':[5]},
'woodlike' : {'see' : 'wooden'},
'granite' : {'see' : 'rock'},
'hex' : {'see' : 'hexagonal'},
'hexagons' : {'see' : 'hexagonal'},
'hardwood' : {'see' : 'wooden'},
'dirt' : {'see' : 'wooden'},
'flowery' : {'see' : 'flowered'},
'arrive' : {'see' : 'reach'},
'meet' : {'see' : 'reach'},
'meet up' : {'see' : 'intersect'},
'center' : {'see' : 'front'},
'onward' : {'see' : 'forward'},
'onwards' : {'see' : 'forward'},
'forwards' : {'see' : 'forward'},
'step' : {'see' : 'stop'},
'wander' : {'see' : 'find'},
'wind' : {'see' : 'winding'},
'spot' : {'see' : 'square'},
'center' : {'V' : [3]},
'sitting' : {'see' : 'stand'},
'chair with a back' : {'N':[('straight chair',[1])]},
'dining chair' : {'N':[('straight chair',[1])]},
'coat/hat stand' : {'see' : 'coatrack'},
'coat hanger' : {'see' : 'coatrack'},
'coat hanger looking thing' : {'see' : 'coatrack'},
'clothes hanger' : {'see' : 'coatrack'},
'candlestand' : {'see' : 'coatrack'},
'board' : {'see' : 'easel'},
'art board' : {'see' : 'easel'},
'artist board' : {'see' : 'easel'},
'eisle' : {'see' : 'easel'},
"painter ' s board" : {'see' : 'easel'},
'painting board' : {'see' : 'easel'},
'painting eisle' : {'see' : 'easel'},
'painters stand' : {'see' : 'easel'},
'painting stand' : {'see' : 'easel'},
'painting thing' : {'see' : 'easel'},
'thing you put the canvas on to paint' : {'see' : 'easel'},
'thing painters paint on' : {'see' : 'easel'},
'thing you paint pictures on' : {'see' : 'easel'},
'ground' : {'see' : 'floor'},
'floor lamp' : {'see' : 'lamp'},
'lampstand' : {'see' : 'lamp'},
'lamppost' : {'see' : 'lamp'},
'lamp post' : {'see' : 'lamp'},
'poster' : {'see' : 'painting'},
'wallpaper' : {'see' : 'painting'},
'nearest' : {'see' : 'next'},
'make sure' : {'see' : 'orient'},
'be sure' : {'see' : 'orient'},
'heading' : {'see' : 'orient'},
'start out' : {'see' : 'orient'},
'panel' : {'see' : 'segment'},
'rectangle' : {'see' : 'segment'},
'tiling' : {'see' : 'tile'},
'scale' : {'see' : 'tile'},
'leg' :{'see' : 'portion'},
'pathed' : {'see' : 'way'},
'destination' : {'see':'position'},
'location' : {'see':'position'},
'adjoining' : {'see' : 'next'},
'stay' : {'see' : 'stop'},
'remain' : {'see' : 'stop'},
'center' : {'see' : 'middle'},
'opportunity' : {'see' : 'turn'},
'stretch' :{'see' : 'portion'},
'part' :{'see' : 'portion'},
'bit' :{'see' : 'portion'},
'surrounded' : {'V' : [1]},
'longest' : {'ADJ': [('long',[2])]},
'shortest' : {'ADJ':[('short',[2])]},
'proceed' : {'see' : 'head'},
}
Senses3 = {
'army' : {'see' : 'honeycomb'},
'canvas' : {'see' : 'concrete'},
'fishes' : {'see': 'fish'},
'puke' : {'see' : 'honeycomb'},
'turtle shell' : {'see' : 'hexagonal'},
}
| mit | 2,418,539,608,148,175,400 | 31.209139 | 63 | 0.371692 | false |
HMSBeagle1831/rapidscience | rlp/documents/forms.py | 1 | 1288 | from django import forms
from taggit.models import Tag
from .models import Document, File, Image, Link, Video
class BaseDocumentForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Tag.objects.count():
self.fields['tags'] = forms.ModelMultipleChoiceField(
widget=forms.CheckboxSelectMultiple(),
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Document
exclude = [
'owner', 'date_added', 'date_updated', 'project', 'tags',
]
class FileForm(BaseDocumentForm):
class Meta:
model = File
exclude = [
'owner', 'date_added', 'date_updated', 'project', 'tags',
]
class ImageForm(BaseDocumentForm):
class Meta:
model = Image
exclude = [
'owner', 'date_added', 'date_updated', 'project', 'height', 'width', 'tags',
]
class LinkForm(BaseDocumentForm):
class Meta:
model = Link
fields = [
'title', 'url', 'description',
]
class VideoForm(BaseDocumentForm):
class Meta:
model = Video
fields = [
'title', 'share_link', 'description',
]
| mit | -1,312,781,363,678,341,600 | 22.851852 | 88 | 0.543478 | false |
henryr/Impala | tests/custom_cluster/test_admission_controller.py | 6 | 25728 | #!/usr/bin/env python
# Copyright (c) 2014 Cloudera, Inc. All rights reserved.
# Tests admission control
import pytest
import threading
import re
from time import sleep, time
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.impala_cluster import ImpalaCluster
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import create_single_exec_option_dimension
from tests.common.test_dimensions import create_uncompressed_text_dimension
from tests.common.test_vector import TestDimension
import logging
import os
LOG = logging.getLogger('admission_test')
# We set a WAIT debug action so it doesn't complete the execution of this query. The
# limit is a parameter for debugging purposes; each thread will insert its id so
# that running queries can be correlated with the thread that submitted them.
QUERY = "select * from alltypes limit %s"
# Time to sleep (in milliseconds) between issuing queries. The default statestore
# heartbeat is 500ms, so the lower the delay the more we can submit before the global
# state is updated. When the delay is at least the statestore heartbeat frequency, all
# state should be visible by every impalad by the time the next query is submitted.
SUBMISSION_DELAY_MS = [0, 50, 100, 600]
# The number of queries to submit. The test does not support fewer queries than
# MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES to keep some validation logic
# simple.
NUM_QUERIES = [15, 30, 50]
# Whether we will submit queries to all available impalads (in a round-robin fashion)
ROUND_ROBIN_SUBMISSION = [True, False]
# The query pool to use. The impalads should be configured to recognize this
# pool with the parameters below.
POOL_NAME = "default-pool"
# The statestore heartbeat of the impala cluster the test is executing against
STATESTORE_HEARTBEAT_MS = 500
# The number of queries that can execute concurrently in the pool POOL_NAME.
MAX_NUM_CONCURRENT_QUERIES = 5
# The number of queries that can be queued in the pool POOL_NAME
MAX_NUM_QUEUED_QUERIES = 10
# Mem limit (bytes) used in the mem limit test
MEM_TEST_LIMIT = 100000 * 1024 * 1024
_STATESTORED_ARGS = "-statestore_heartbeat_frequency_ms=%s" % (STATESTORE_HEARTBEAT_MS)
def impalad_admission_ctrl_flags(max_requests, max_queued, mem_limit):
return ("-vmodule admission-controller=3 -default_pool_max_requests %s "
"-default_pool_max_queued %s -default_pool_mem_limit %s "
"-disable_admission_control=false" %\
(max_requests, max_queued, mem_limit))
def impalad_admission_ctrl_config_args():
impalad_home = os.environ['IMPALA_HOME']
resources_dir = os.path.join(impalad_home, "fe", "src", "test", "resources")
fs_allocation_path = os.path.join(resources_dir, "fair-scheduler-test2.xml")
llama_site_path = os.path.join(resources_dir, "llama-site-test2.xml")
return ("-vmodule admission-controller=3 -fair_scheduler_allocation_path %s "
"-llama_site_path %s -disable_admission_control=false" %\
(fs_allocation_path, llama_site_path))
def log_metrics(log_prefix, metrics, log_level=logging.DEBUG):
LOG.log(log_level, "%sadmitted=%s, queued=%s, dequeued=%s, rejected=%s, "\
"completed=%s, timed-out=%s", log_prefix, metrics['admitted'], metrics['queued'],
metrics['dequeued'], metrics['rejected'], metrics['completed'],
metrics['timed-out'])
def compute_metric_deltas(m2, m1, metric_names):
"""Returns a dictionary of the differences of metrics in m2 and m1 (m2 - m1)"""
return dict((n, m2.get(n, 0) - m1.get(n, 0)) for n in metric_names)
def metric_key(pool_name, metric_name):
"""Helper method to construct the admission controller metric keys"""
return "admission-controller.%s.%s" % (pool_name, metric_name)
class TestAdmissionController(CustomClusterTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestAdmissionController, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
# There's no reason to test this on other file formats/compression codecs right now
cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload()))
def __check_pool_rejected(self, client, pool, expected_error_re):
try:
client.set_configuration({'request_pool': pool})
client.execute("select 1")
assert False, "Query should return error"
except ImpalaBeeswaxException as e:
assert re.search(expected_error_re, str(e))
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(),
statestored_args=_STATESTORED_ARGS)
def test_set_request_pool(self, vector):
"""Tests setting the REQUEST_POOL with the pool placement policy configured
to require a specific pool (IMPALA-1050)."""
impalad = self.cluster.impalads[0]
client = impalad.service.create_beeswax_client()
try:
for pool in ['', 'not_a_pool_name']:
expected_error =\
"No mapping found for request from user '\w+' with requested pool '%s'"\
% (pool)
self.__check_pool_rejected(client, pool, expected_error)
# Check rejected if user does not have access.
expected_error = "Request from user '\w+' with requested pool 'root.queueC' "\
"denied access to assigned pool 'root.queueC'"
self.__check_pool_rejected(client, 'root.queueC', expected_error)
# Also try setting a valid pool
client.set_configuration({'request_pool': 'root.queueB'})
client.execute("select 1") # Query should execute in queueB
finally:
client.close()
class TestAdmissionControllerStress(TestAdmissionController):
"""Submits a number of queries (parameterized) with some delay between submissions
(parameterized) and the ability to submit to one impalad or many in a round-robin
fashion. The queries are set with the WAIT debug action so that we have more control
over the state that the admission controller uses to make decisions. Each query is
submitted on a separate thread. Depending on the test parameters a varying number of
queries will be admitted, queued, and rejected. Once queries are admitted, the query
execution blocks and we can cancel the query in order to allow another queued query to
be admitted.
The test tracks the state of the admission controller using the metrics from each
impalad to do the following:
(1) After submitting all queries, the change in metrics for the number of admitted,
queued, and rejected requests should sum to the number of queries and that the
values are reasonable given the test parameters.
(2) While there are running queries:
* Cancel the currently running queries (they are blocked with the WAIT debug action)
and verify the metric for the number of completed queries. The threads that
submitted those queries should complete.
* Check that queued requests are then dequeued and verify using the metric for the
number of dequeued requests. The threads that were waiting to submit the query
should then insert themselves into a list of currently running queries and then
fetch() the results (which will block).
(3) After all queries have completed, check that the final number of admitted,
queued, and rejected requests are reasonable given the test parameters. When
submitting to a single impalad, we know exactly what the values should be,
otherwise we just check that they are within reasonable bounds.
"""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestAdmissionControllerStress, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(TestDimension('num_queries', *NUM_QUERIES))
cls.TestMatrix.add_dimension(
TestDimension('round_robin_submission', *ROUND_ROBIN_SUBMISSION))
cls.TestMatrix.add_dimension(
TestDimension('submission_delay_ms', *SUBMISSION_DELAY_MS))
if cls.exploration_strategy() == 'core':
cls.TestMatrix.add_constraint(lambda v: v.get_value('submission_delay_ms') == 0)
cls.TestMatrix.add_constraint(lambda v: v.get_value('num_queries') == 30)
cls.TestMatrix.add_constraint(\
lambda v: v.get_value('round_robin_submission') == True)
def setup(self):
# All threads are stored in this list and it's used just to make sure we clean up
# properly in teardown.
self.all_threads = list()
# Each submission thread will append() itself to this list if the query begins
# execution. The main thread will access this list to determine which threads are
# executing queries that can be cancelled (it will pop() elements from the front of
# the list). The individual operations on the list are atomic and thread-safe thanks
# to the GIL.
self.executing_threads = list()
def teardown(self):
for thread in self.all_threads:
try:
thread.lock.acquire()
thread.shutdown = True
if thread.query_handle is not None:
LOG.debug("Attempt to clean up thread executing query %s (state %s)",
thread.query_num, thread.query_state)
client = thread.impalad.service.create_beeswax_client()
try:
client.cancel(thread.query_handle)
finally:
client.close()
finally:
thread.lock.release()
thread.join(5)
LOG.debug("Join thread for query num %s %s", thread.query_num,
"TIMED OUT" if thread.isAlive() else "")
def get_admission_metrics(self):
"""
Returns a map of the admission metrics, aggregated across all of the impalads.
The metrics names are shortened for brevity: 'admitted', 'queued', 'dequeued',
'rejected', 'completed', and 'timed-out'.
"""
metrics = {'admitted': 0, 'queued': 0, 'dequeued': 0, 'rejected' : 0,
'completed': 0, 'timed-out': 0}
for impalad in self.impalads:
for short_name in metrics.keys():
metrics[short_name] += impalad.service.get_metric_value(\
metric_key(self.pool_name, 'local-%s' % short_name), 0)
return metrics
def wait_for_metric_changes(self, metric_names, initial, expected_delta, timeout=30):
"""
Waits for the sum of metrics in metric_names to change by at least expected_delta.
This is similar to ImpalaService.wait_for_metric_value(), but it uses one or more
metrics aggregated across all impalads, e.g. we want to wait for the total number of
admitted, queued, and rejected metrics to change some amount in total, but we don't
know exactly how the metrics will change individually.
'metric_names' is a list of the keys returned by get_admission_metrics() which are
expected to change.
'initial' is the initial set of metrics returned by get_admission_metrics() to
compare against.
'expected_delta' is the total change expected across all impalads for the specified
metrics.
"""
log_metrics("wait_for_metric_changes, initial=", initial)
current = initial
start_time = time()
while True:
current = self.get_admission_metrics()
log_metrics("wait_for_metric_changes, current=", current)
deltas = compute_metric_deltas(current, initial, metric_names)
delta_sum = sum([ deltas[x] for x in metric_names ])
LOG.debug("DeltaSum=%s Deltas=%s (Expected=%s for metrics=%s)",\
delta_sum, deltas, expected_delta, metric_names)
if delta_sum >= expected_delta:
LOG.debug("Found all %s metrics after %s seconds", delta_sum,
round(time() - start_time, 1))
return (deltas, current)
assert (time() - start_time < timeout),\
"Timed out waiting %s seconds for metrics" % (timeout,)
sleep(1)
def wait_for_heartbeats(self, heartbeats, timeout=30):
"""Waits for a number of statestore heartbeats from all impalads."""
start_time = time()
num_impalads = len(self.impalads)
init = dict()
curr = dict()
for impalad in self.impalads:
init[impalad] = impalad.service.get_metric_value(\
'statestore-subscriber.topic-update-interval-time')['count']
curr[impalad] = init[impalad]
while True:
LOG.debug("wait_for_heartbeats: curr=%s, init=%s, d=%s", curr.values(),
init.values(), [curr[i] - init[i] for i in self.impalads])
if all([curr[i] - init[i] >= heartbeats for i in self.impalads]): break
for impalad in self.impalads:
curr[impalad] = impalad.service.get_metric_value(\
'statestore-subscriber.topic-update-interval-time')['count']
assert (time() - start_time < timeout),\
"Timed out waiting %s seconds for heartbeats" % (timeout,)
sleep(0.5)
LOG.debug("Waited %s for %s heartbeats", round(time() - start_time, 1), heartbeats)
def wait_for_admitted_threads(self, num_threads, timeout=30):
"""
Wait for query submission threads to update after being admitted, as determined
by observing metric changes. This is necessary because the metrics may change
before the execute_async() calls on the query threads return and add themselves
to self.executing_threads.
"""
start_time = time()
LOG.debug("Waiting for %s threads to begin execution", num_threads)
# All individual list operations are thread-safe, so we don't need to use a
# lock to synchronize before checking the list length (on which another thread
# may call append() concurrently).
while len(self.executing_threads) < num_threads:
assert (time() - start_time < timeout),\
"Timed out waiting %s seconds for %s admitted client rpcs to return" %\
(timeout, num_threads)
sleep(0.1)
LOG.debug("Found all %s admitted threads after %s seconds", num_threads,
round(time() - start_time, 1))
def cancel_admitted_queries(self, num_queries):
"""
Cancels queries on threads that are currently blocked on query execution.
"""
assert len(self.executing_threads) >= num_queries
LOG.debug("Cancelling %s queries", num_queries)
for i in xrange(num_queries):
# pop() is thread-safe, it's OK if another thread is appending concurrently.
thread = self.executing_threads.pop(0)
LOG.debug("Cancelling query %s", thread.query_num)
# The other thread sets the query_state before appending itself to the list,
# and will not change its state until it is cancelled by this thread.
assert thread.query_state == 'ADMITTED'
client = thread.impalad.service.create_beeswax_client()
try:
cancel_result = client.cancel(thread.query_handle)
assert cancel_result.status_code == 0,\
'Unexpected status code from cancel request: %s' % cancel_result
# Wait for the query to be cancelled and return
thread.join(20)
LOG.debug("Cancelled admitted query %s %s",
thread.query_num, "TIMED OUT" if thread.isAlive() else "")
assert not thread.isAlive()
assert thread.query_state == 'COMPLETED'
finally:
client.close()
class SubmitQueryThread(threading.Thread):
def __init__(self, impalad, additional_query_options, vector, query_num,
executing_threads):
"""
executing_threads must be provided so that this thread can add itself when the
query is admitted and begins execution.
"""
super(self.__class__, self).__init__()
self.executing_threads = executing_threads
self.vector = vector
self.additional_query_options = additional_query_options
self.query_num = query_num
self.impalad = impalad
self.error = None
# query_state is defined and used only by the test code, not a property exposed by
# the server
self.query_state = 'NOT_SUBMITTED'
# lock protects query_handle and shutdown, used by the main thread in teardown()
self.lock = threading.RLock()
self.query_handle = None
self.shutdown = False # Set by the main thread when tearing down
def run(self):
client = None
try:
try:
# Take the lock while query_handle is being created to avoid an unlikely race
# condition with teardown() (i.e. if an error occurs on the main thread), and
# check if the test is already shut down.
self.lock.acquire()
if self.shutdown:
return
exec_options = self.vector.get_value('exec_option')
exec_options['debug_action'] = '0:GETNEXT:WAIT'
exec_options.update(self.additional_query_options)
query = QUERY % (self.query_num,)
self.query_state = 'SUBMITTING'
client = self.impalad.service.create_beeswax_client()
ImpalaTestSuite.change_database(client, self.vector.get_value('table_format'))
client.set_configuration(exec_options)
LOG.debug("Submitting query %s", self.query_num)
self.query_handle = client.execute_async(query)
except ImpalaBeeswaxException as e:
if "Rejected" in str(e):
LOG.debug("Rejected query %s", self.query_num)
self.query_state = 'REJECTED'
return
elif "exceeded timeout" in str(e):
LOG.debug("Query %s timed out", self.query_num)
self.query_state = 'TIMED OUT'
return
else:
raise e
finally:
self.lock.release()
LOG.debug("Admitted query %s", self.query_num)
self.query_state = 'ADMITTED'
# The thread becomes visible to the main thread when it is added to the
# shared list of executing_threads. append() is atomic and thread-safe.
self.executing_threads.append(self)
try:
# fetch() will block until we cancel the query from the main thread
# (unless an unexpected error occurs). If an error occurs on the main therad,
# it is possible that teardown() cancels this query before we call fetch(). In
# that case a different exception is thrown and we handle it gracefully.
client.fetch(query, self.query_handle)
except ImpalaBeeswaxException as e:
if "Cancelled" in str(e):
LOG.debug("Query %s completed", self.query_num)
self.query_state = 'COMPLETED'
self.query_handle = None
elif "Invalid or unknown query handle" in str(e):
# May happen if the test is being torn down early (i.e. an error occurred).
LOG.debug("Query %s already cancelled in test shutdown.")
else:
raise e
except Exception as e:
LOG.exception(e)
# Unknown errors will be raised later
self.error = e
self.query_state = 'ERROR'
finally:
LOG.debug("Thread terminating in state=%s", self.query_state)
if client is not None:
client.close()
def run_admission_test(self, vector, additional_query_options):
LOG.debug("Starting test case with parameters: %s", vector)
self.impalads = self.cluster.impalads
round_robin_submission = vector.get_value('round_robin_submission')
submission_delay_ms = vector.get_value('submission_delay_ms')
if not round_robin_submission:
self.impalads = [self.impalads[0]]
num_queries = vector.get_value('num_queries')
assert num_queries >= MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
initial_metrics = self.get_admission_metrics();
log_metrics("Initial metrics: ", initial_metrics);
# Want query_num to start at 1 because this gets used as the limit in the query to
# help debugging (we can associate a running query with a thread). If we start at 0,
# that query would be evaluated as a constant expression and never hit the WAIT debug
# action.
for query_num in xrange(1, num_queries + 1):
impalad = self.impalads[query_num % len(self.impalads)]
thread = self.SubmitQueryThread(impalad, additional_query_options, vector,
query_num, self.executing_threads)
thread.start()
self.all_threads.append(thread)
sleep(submission_delay_ms / 1000.0)
# Wait for all of the queries to be admitted, queued, or rejected (as reported
# by the impalad metrics).
LOG.debug("Wait for initial admission decisions")
(metric_deltas, curr_metrics) = self.wait_for_metric_changes(\
['admitted', 'queued', 'rejected'], initial_metrics, num_queries)
# Also wait for the threads that submitted the queries to start executing
self.wait_for_admitted_threads(metric_deltas['admitted'])
# Check that the admission decisions are reasonable given the test parameters
# The number of admitted and queued requests should be at least the configured limits
# but less than or equal to those limits times the number of impalads.
assert metric_deltas['admitted'] >= MAX_NUM_CONCURRENT_QUERIES
assert metric_deltas['admitted'] <= MAX_NUM_CONCURRENT_QUERIES * len(self.impalads)
assert metric_deltas['queued'] >=\
min(num_queries - metric_deltas['admitted'], MAX_NUM_QUEUED_QUERIES)
assert metric_deltas['queued'] <= MAX_NUM_QUEUED_QUERIES * len(self.impalads)
assert metric_deltas['rejected'] ==\
num_queries - metric_deltas['admitted'] - metric_deltas['queued']
initial_metric_deltas = metric_deltas
while len(self.executing_threads) > 0:
curr_metrics = self.get_admission_metrics();
log_metrics("Main loop, curr_metrics: ", curr_metrics);
num_to_cancel = len(self.executing_threads)
LOG.debug("Main loop, will cancel %s queries", num_to_cancel)
self.cancel_admitted_queries(num_to_cancel)
self.wait_for_metric_changes(['completed'], curr_metrics, num_to_cancel)
num_queued_remaining =\
curr_metrics['queued'] - curr_metrics['dequeued'] - curr_metrics['timed-out']
expected_admitted = min(num_queued_remaining, MAX_NUM_CONCURRENT_QUERIES)
(metric_deltas, _) = self.wait_for_metric_changes(['admitted'], curr_metrics,
expected_admitted)
self.wait_for_admitted_threads(metric_deltas['admitted'])
# Wait a few heartbeats to ensure the admission controllers have reached a steady
# state or we may find an impalad dequeue more requests after we capture metrics.
self.wait_for_heartbeats(4)
final_metrics = self.get_admission_metrics();
log_metrics("Final metrics: ", final_metrics, logging.INFO);
metric_deltas = compute_metric_deltas(final_metrics, initial_metrics,
final_metrics.keys())
assert metric_deltas['timed-out'] == 0
if round_robin_submission:
min_expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
assert metric_deltas['admitted'] >= min_expected_admitted
assert metric_deltas['admitted'] <= min_expected_admitted * len(self.impalads)
assert metric_deltas['admitted'] ==\
initial_metric_deltas['admitted'] + initial_metric_deltas['queued']
assert metric_deltas['queued'] == initial_metric_deltas['queued']
assert metric_deltas['rejected'] == initial_metric_deltas['rejected']
else:
# We shouldn't go over the max number of queries or queue size so we can compute
# the expected number of queries that should have been admitted (which includes the
# number queued as they eventually get admitted as well), queued, and rejected
expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
assert metric_deltas['admitted'] == expected_admitted
assert metric_deltas['queued'] == MAX_NUM_QUEUED_QUERIES
assert metric_deltas['rejected'] == num_queries - expected_admitted
for thread in self.all_threads:
if thread.error is not None:
raise thread.error
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(MAX_NUM_CONCURRENT_QUERIES,
MAX_NUM_QUEUED_QUERIES, -1),
statestored_args=_STATESTORED_ARGS)
def test_admission_controller_with_flags(self, vector):
self.pool_name = 'default-pool'
self.run_admission_test(vector, {'request_pool': self.pool_name})
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(),
statestored_args=_STATESTORED_ARGS)
def test_admission_controller_with_configs(self, vector):
self.pool_name = 'root.queueB'
self.run_admission_test(vector, {'request_pool': self.pool_name})
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(MAX_NUM_CONCURRENT_QUERIES * 100,
MAX_NUM_QUEUED_QUERIES, MEM_TEST_LIMIT),
statestored_args=_STATESTORED_ARGS)
def test_mem_limit(self, vector):
self.pool_name = 'default-pool'
# Each query mem limit (set the query option to override the per-host memory
# estimate) should use a bit less than (total pool mem limit) / #queries so that
# once #queries are running, the total pool mem usage is about at the limit and
# additional incoming requests will be rejected. The actual pool limit on the number
# of running requests is very high so that requests are only queued/rejected due to
# the mem limit.
num_impalads = len(self.cluster.impalads)
query_mem_limit = (MEM_TEST_LIMIT / MAX_NUM_CONCURRENT_QUERIES / num_impalads) - 1
self.run_admission_test(vector,
{'request_pool': self.pool_name, 'mem_limit': query_mem_limit})
| apache-2.0 | 8,781,226,377,336,120,000 | 46.207339 | 90 | 0.688277 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/en/m4ufree.py | 3 | 4591 | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: Supremacy
import re,urllib,urlparse,json,base64,time
from resources.lib.modules import cleantitle
from resources.lib.modules import dom_parser2
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['m4ufree.net']
self.base_link = 'http://m4ufree.net'
self.search_link = '/watch/%s-%s-online-m4ufree.html'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,year)))
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season)
url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,url['year'])))
r = client.request(url)
r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
for i in r[0]:
if i.content == 'Episode %s'%episode:
url = i.attrs['href']
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
r = client.request(url)
quality = re.findall(">(\w+)<\/p",r)
if quality[0] == "HD":
quality = "720p"
else:
quality = "SD"
r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
for i in r[0]:
url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'], 'data-name' : i.attrs['data-name']}
url = urllib.urlencode(url)
sources.append({'source': i.content, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
try:
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
post = {'ipplugins': 1,'ip_film': urldata['data-film'], 'ip_server': urldata['data-server'], 'ip_name': urldata['data-name'],'fix': "0"}
p1 = client.request('http://m4ufree.net/ip.file/swf/plugins/ipplugins.php', post=post, referer=urldata['url'], XHR=True)
p1 = json.loads(p1)
p2 = client.request('http://m4ufree.net/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0' %(p1['s'],urldata['data-server']))
p2 = json.loads(p2)
p3 = client.request('http://m4ufree.net/ip.file/swf/ipplayer/api.php?hash=%s' %(p2['hash']))
p3 = json.loads(p3)
n = p3['status']
if n == False:
p2 = client.request('http://m4ufree.net/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1' %(p1['s'],urldata['data-server']))
p2 = json.loads(p2)
url = "https:%s" %p2["data"].replace("\/","/")
return url
except:
return | gpl-2.0 | -357,584,317,202,120,700 | 43.582524 | 156 | 0.507515 | false |
korylprince/pyLibraryTracker | controllers/check.py | 1 | 2704 | # coding: utf8
#Fix authentication paths
auth.settings.controller = 'check'
auth.settings.login_url = URL('check','user')
auth.settings.login_next = URL('check','index')
auth.settings.logout_next = URL('check','user')
#Only let staff log in
auth.settings.login_methods = [ldap_auth(mode='ad',
allowed_groups = ['Domain Staff'], #group name
bind_dn = 'CN=Admin,OU=Technology Staff,OU=root,DC=example,DC=com',
bind_pw = 'supersecret',
group_dn = 'OU=Domain Groups,OU=root,DC=example,DC=com',
group_name_attrib = 'cn',
group_member_attrib = 'member',
group_filterstr = 'objectClass=Group',
server='example.com',
base_dn='OU=root,DC=example,DC=com')]
response.title = "Library Logins"
@auth.requires_login()
def index():
#get last id then select 10 before
try:
lastid = db(db.logins).select().last().id
except AttributeError:
# no logins yet
return dict(form=DIV("No Records"))
query = db.logins.id > lastid-10
form = rowTable(query)
return dict(form=form)
def user():
if auth.is_logged_in() and len(request.args) == 0:
redirect(URL('index'))
response.subtitle = "Please Log In"
return dict(form=auth())
@auth.requires_login()
def search():
if db(db.logins).count() == 0:
redirect(URL('index'))
form = SQLFORM.factory(Field('name'),Field('start_time','datetime'),Field('end_time','datetime'),Field('type'))
if form.process().accepted:
#we don't want blank entries
for k,v in request.post_vars.items():
if v == '' or v is None:
del request.post_vars[k]
if len(request.post_vars.values()) == 2:
response.flash = 'You must specify at least one parameter'
else:
response.flash = ''
session.gridvars = {}
if 'name' in request.post_vars.keys():
session.gridvars['name'] = form.vars.name
if 'type' in request.post_vars.keys():
session.gridvars['type'] = form.vars.type
if 'start_time' in request.post_vars.keys():
session.gridvars['start_time'] = form.vars.start_time
if 'end_time' in request.post_vars.keys():
session.gridvars['end_time'] = form.vars.end_time
redirect(URL('result'))
return dict(form=form)
@auth.requires_login()
def result():
if db(db.logins).count() == 0:
redirect(URL('index'))
if session.gridvars is not None:
return dict(grid=rowTable(makeQuery()))
else:
redirect(URL('search'))
| gpl-3.0 | -4,228,872,156,147,075,000 | 36.041096 | 141 | 0.579142 | false |
yongtang/tensorflow | tensorflow/python/tools/api/generator/create_python_api_test.py | 9 | 5959 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for create_python_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import sys
from tensorflow.python.platform import test
from tensorflow.python.tools.api.generator import create_python_api
from tensorflow.python.util.tf_export import tf_export
@tf_export('test_op', 'test_op1', 'test.test_op2')
def test_op():
pass
@tf_export('test1.foo', v1=['test.foo'])
def deprecated_test_op():
pass
@tf_export('TestClass', 'NewTestClass')
class TestClass(object):
pass
_TEST_CONSTANT = 5
_MODULE_NAME = 'tensorflow.python.test_module'
class CreatePythonApiTest(test.TestCase):
def setUp(self):
# Add fake op to a module that has 'tensorflow' in the name.
sys.modules[_MODULE_NAME] = imp.new_module(_MODULE_NAME)
setattr(sys.modules[_MODULE_NAME], 'test_op', test_op)
setattr(sys.modules[_MODULE_NAME], 'deprecated_test_op', deprecated_test_op)
setattr(sys.modules[_MODULE_NAME], 'TestClass', TestClass)
test_op.__module__ = _MODULE_NAME
TestClass.__module__ = _MODULE_NAME
tf_export('consts._TEST_CONSTANT').export_constant(
_MODULE_NAME, '_TEST_CONSTANT')
def tearDown(self):
del sys.modules[_MODULE_NAME]
def testFunctionImportIsAdded(self):
imports, _, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
packages_to_ignore=[],
output_package='tensorflow',
api_name='tensorflow',
api_version=1)
if create_python_api._LAZY_LOADING:
expected_import = (
'\'test_op1\': '
'(\'tensorflow.python.test_module\','
' \'test_op\')')
else:
expected_import = (
'from tensorflow.python.test_module '
'import test_op as test_op1')
self.assertTrue(
expected_import in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
if create_python_api._LAZY_LOADING:
expected_import = (
'\'test_op\': '
'(\'tensorflow.python.test_module\','
' \'test_op\')')
else:
expected_import = (
'from tensorflow.python.test_module '
'import test_op')
self.assertTrue(
expected_import in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
# Also check that compat.v1 is not added to imports.
self.assertFalse('compat.v1' in imports,
msg='compat.v1 in %s' % str(imports.keys()))
def testClassImportIsAdded(self):
imports, _, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
packages_to_ignore=[],
output_package='tensorflow',
api_name='tensorflow',
api_version=2)
if create_python_api._LAZY_LOADING:
expected_import = (
'\'NewTestClass\':'
' (\'tensorflow.python.test_module\','
' \'TestClass\')')
else:
expected_import = (
'from tensorflow.python.test_module '
'import TestClass')
self.assertTrue(
'TestClass' in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
def testConstantIsAdded(self):
imports, _, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
packages_to_ignore=[],
output_package='tensorflow',
api_name='tensorflow',
api_version=1)
if create_python_api._LAZY_LOADING:
expected = ('\'_TEST_CONSTANT\':'
' (\'tensorflow.python.test_module\','
' \'_TEST_CONSTANT\')')
else:
expected = ('from tensorflow.python.test_module '
'import _TEST_CONSTANT')
self.assertTrue(expected in str(imports),
msg='%s not in %s' % (expected, str(imports)))
def testCompatModuleIsAdded(self):
imports, _, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
packages_to_ignore=[],
output_package='tensorflow',
api_name='tensorflow',
api_version=2,
compat_api_versions=[1])
self.assertTrue('compat.v1' in imports,
msg='compat.v1 not in %s' % str(imports.keys()))
self.assertTrue('compat.v1.test' in imports,
msg='compat.v1.test not in %s' % str(imports.keys()))
def testNestedCompatModulesAreAdded(self):
imports, _, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
packages_to_ignore=[],
output_package='tensorflow',
api_name='tensorflow',
api_version=2,
compat_api_versions=[1, 2])
self.assertIn('compat.v1.compat.v1', imports,
msg='compat.v1.compat.v1 not in %s' % str(imports.keys()))
self.assertIn('compat.v1.compat.v2', imports,
msg='compat.v1.compat.v2 not in %s' % str(imports.keys()))
self.assertIn('compat.v2.compat.v1', imports,
msg='compat.v2.compat.v1 not in %s' % str(imports.keys()))
self.assertIn('compat.v2.compat.v2', imports,
msg='compat.v2.compat.v2 not in %s' % str(imports.keys()))
if __name__ == '__main__':
test.main()
| apache-2.0 | -5,926,330,587,379,672,000 | 34.260355 | 80 | 0.613526 | false |
calebfoss/tensorflow | tensorflow/contrib/training/python/training/feeder.py | 58 | 14416 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A mechanism to strongly decouple input generation from consumption.
This helper handles the plumbing in order to set up a feeder task to
push generated inputs to a pool of remote consumers; or to run an
identical feeding mechanism in a seperate thread in the same process.
Example usage for distributed feeding:
```
# In the consumer job:
dtypes = [tf.int32, tf.string]
shapes = [[5], []]
with tf.Graph().as_default():
feeder = tf.contrib.training.Feeder(dtypes, shapes)
int_inputs, str_inputs = feeder.get_many_fed_tensors(batch_size=10)
# ... go on to use inputs and a training/eval/etc loop as usual ...
# In the feeder job:
with tf.Graph().as_default():
input_ints = tf.constant([[1, 2, 3, 4, 5], [2, 4, 6, 8, 10]])
input_strs = tf.constant(['one_x', 'two_x'])
# Important: constructor arguments must be the same as in the consumer job!
feeder = tf.contrib.training.Feeder(dtypes, shapes)
feeder.set_many_fed_tensors([input_ints, input_strs])
feeder.add_remote_devices(
['/job:consumer/replica:0', '/job:consumer/replica:1'])
# ...or use the add_remote_replicas helper.
feeder.run_feeding_forever(lambda: tf.Session(FLAGS.master))
```
For feeding in-process, a Feeder acts similarly to a Queue, with a
QueueRunner automatically registered:
```
dtypes = [tf.int32, tf.string]
shapes = [[5], []]
# ... in main():
with tf.Graph().as_default():
feeder = tf.contrib.training.Feeder(dtypes, shapes)
feeder.set_many_fed_tensors([tf.constant([[1, 2, 3, 4, 5], [2, 4, 6, 8, 10]]),
tf.constant(['one_x', 'two_x'])])
int_inputs, str_inputs = feeder.get_many_fed_tensors(batch_size=10)
# ... go on to use inputs and a training/eval/etc loop as usual ...
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import threading
from tensorflow.contrib.training.python.training import failure_tolerator
from tensorflow.python.framework import device
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
class Feeder(object):
"""Helper to manage the plumbing for externally-fed graphs."""
REMOTE_QUEUE_RUNNERS = 'feeder_remote_queue_runners'
def __init__(
self, dtypes, shapes=None, capacity=10, shared_name='feeding_queue'):
self._dtypes = dtypes
self._shapes = shapes
self._shared_name = shared_name
self._capacity = capacity
self._local_q = data_flow_ops.FIFOQueue(capacity=self._capacity,
dtypes=self._dtypes,
shapes=self._shapes,
name=self._shared_name,
shared_name=self._shared_name)
self._num_remote_feeds = 0
# Fake do-nothing operation that's used to prevent remote queues
# from being closed, and as a workaround for b/32749157
self._fake_op = array_ops.constant('dummy close', name='feeder_fake_op').op
self._feeding_event = threading.Event()
def get_fed_tensors(self):
"""Returns fed tensor values."""
return self._local_q.dequeue()
def get_many_fed_tensors(self, batch_size):
"""Returns a batch of fed tensor values."""
return self._local_q.dequeue_many(batch_size)
def set_fed_tensors(self, tensors):
"""Sets fed tensors."""
enq_op = self._local_q.enqueue(tensors)
queue_runner.add_queue_runner(queue_runner.QueueRunner(
self._local_q, [enq_op]))
def set_many_fed_tensors(self, tensors):
"""Sets batches fed tensors."""
enq_op = self._local_q.enqueue_many(tensors)
queue_runner.add_queue_runner(queue_runner.QueueRunner(
self._local_q, [enq_op]))
def add_remote_device(self, remote_device):
"""Requests that fed values are sent to `remote_device`."""
local_value = self.get_fed_tensors()
self._num_remote_feeds += 1
with ops.device(None): # Bypass any existing device() calls
with ops.device(remote_device):
remote_q = data_flow_ops.FIFOQueue(capacity=self._capacity,
dtypes=self._dtypes,
shapes=self._shapes,
name=self._shared_name,
shared_name=self._shared_name)
remote_enq_op = remote_q.enqueue(local_value)
# Add a remote queue runner to feed the remote queue.
self._add_remote_queue_runner(remote_q, [remote_enq_op])
def add_remote_devices(self, devices):
for d in devices:
self.add_remote_device(d)
def add_remote_replicas(self, job_name, replica_count, feeder_task_num=None,
replicas_per_feeder=None,
base_device_spec=None):
"""Adds feeding for a range of replicas from `job_name`.
Args:
job_name: The job name portion of the remote jobs
replica_count: The total number of remote jobs
feeder_task_num: Optional; if there is more than one feeder job
in the flock this is the task # of the current process.
replicas_per_feeder: How many replicas each feeder job should
push to. If present, `feeder_task_num` is required.
base_device_spec: Optional base device spec. If present, then
each replica device spec is derived from `base_device_spec`,
with the job and replica properties set.
Raises:
ValueError: On invalid arguments.
"""
if replicas_per_feeder is not None and feeder_task_num is None:
raise ValueError(
'Must set feeder_task_num if replicas_per_feeder is provided.')
if replicas_per_feeder is None:
replicas_per_feeder = replica_count
feeder_task_num = 0
if isinstance(base_device_spec, device.DeviceSpec):
device_spec = copy.copy(base_device_spec)
else:
device_spec = device.DeviceSpec.from_string(base_device_spec or '')
device_spec.job = job_name
start_index = feeder_task_num * replicas_per_feeder
end_index = start_index + replicas_per_feeder
for idx in range(start_index, end_index):
device_spec.replica = (idx % replica_count)
self.add_remote_device(device_spec.to_string())
def run_feeding_forever(self,
sess_callback,
outer_coordinator=None,
tolerator=None,
start_queue_runners=True):
"""Runs feeding forever.
This method exits only if `outer_coordinator` has a stop requested
or if a remote feed encounters an un-tolerated error. The most
likely cause of `outer_coordinator` stopping besides a manual call
to `request_stop()` is a `QueueRunner` thread reaching the end of
its queue or encountering an error.
Returns only after joining `outer_coordinator`.
Args:
sess_callback: A function which, when called, returns a Session
to use for feeding. Can be called multiple times due to retries.
outer_coordinator: If present, a `Coordinator` which the feeding
process will respect. Will be created if omitted.
tolerator: If present, a `failure_tolerator.FailureTolerator` which is
used to manage retries of feeding the remote devices.
start_queue_runners: Whether to start queue runners before
beginning to feed the remote devices. Defaults to True. If
False and no other mechanism is used to start queue runners, this
method will hang forever without doing work.
"""
# We use /two/ coordinators: one which runs normal queue
# runners (outer_coordinator), and one which runs the remote
# enqueues (using an inner coordinator) with retries and failure
# tolerance. By using two coordinators, errors
# encountered while running the remote enqueue ops don't cause the
# outer_coordinator to be shut down.
if outer_coordinator is None:
outer_coordinator = coordinator.Coordinator()
# Start the outer queue runners:
if start_queue_runners:
session = sess_callback()
# Work around b/32749157 by running an operation before proceeding --
# this way the session used for queue runners will be fully established
# before we create another session with the same target.
session.run(self._fake_op)
queue_runner.start_queue_runners(sess=session,
coord=outer_coordinator)
if self._num_remote_feeds == 0:
self._feeding_event.set()
outer_coordinator.join()
return
else:
try:
self._feed_remote_queues_forever(
sess_callback, outer_coordinator, tolerator)
finally:
self._feeding_event.set()
outer_coordinator.join()
def wait_until_feeding(self, timeout=None):
"""Waits until run_feeding_forever() is entered.
Does not return until it is safe to create new sessions against
the same target as the feeder is using; see b/32749157.
Args:
timeout: An optional timeout in seconds.
Returns:
True if feeding has begun; False if the timeout was reached.
"""
return self._feeding_event.wait(timeout=timeout)
def _feed_remote_queues_forever(
self, sess_callback, outer_coordinator, tolerator):
if tolerator is None:
tolerator = failure_tolerator.FailureTolerator(limit=5)
# In a retry loop, keep the remote queue runners going:
while True:
if outer_coordinator.should_stop():
return
inner_coordinator = coordinator.Coordinator()
# Make sure inner_coordinator stops when outer_coordinator does:
_link_coordinators(inner_coordinator, outer_coordinator)
# Create a fresh session to use for remote queues:
inner_session = sess_callback()
inner_session.run(self._fake_op) # Work around b/32749157, as above
queue_runner.start_queue_runners(sess=inner_session,
coord=inner_coordinator,
collection=Feeder.REMOTE_QUEUE_RUNNERS)
self._feeding_event.set() # Notify that feeding has begun
try:
with tolerator.forgive():
# Wait for a stop to be requested.
inner_coordinator.wait_for_stop()
# TODO(shoutis): If outer_coordinator.should_stop(), it
# would be nice to interrupt the remote queue runners (which
# may be blocked if their remote queue is full) -- but
# there's no way currently; see b/32774422.
# Cause any exceptions from the remote queue runners to be
# reraised immediately, without waiting for their associated
# threads to terminate like join() would. This means a retry
# can begin immediately after any remote device fails,
# rather than having to wait for any pending enqueues to
# other remote devices to finish first.
inner_coordinator.raise_requested_exception()
# If this line is reached, there was a graceful shutdown
# requested.
# Request the outer coordinator to stop. Since
# outer_coordinator.request_stop() is the currently only way
# for inner_coordinator() to finish without failure, this is
# redundant, but it's harmless and defends against infinite
# hangs should code changes make it possible for
# inner_coordinator to finish in other ways.
outer_coordinator.request_stop()
return
except Exception as e:
# Pass non-forgiven errors along to outer_coordinator:
outer_coordinator.request_stop(e)
raise
def _add_remote_queue_runner(self, queue, enq_ops):
"""Adds a remote queue runner to the graph.
These queue runners differ from the standard in two ways: First,
they never close their queue. Second, they are added to the
`Feeder.REMOTE_QUEUE_RUNNERS` collection, rather than
`ops.GraphKeys.QUEUE_RUNNERS`, so they can be started/stopped
seperately.
Args:
queue: The queue.
enq_ops: A list of ops which perform enqueues (each on its own thread).
"""
runner = queue_runner.QueueRunner(
queue,
enq_ops,
cancel_op=self._fake_op,
close_op=self._fake_op)
queue_runner.add_queue_runner(
runner, collection=Feeder.REMOTE_QUEUE_RUNNERS)
def _link_coordinators(inner_coord, outer_coord, start=True, wait_time=5):
"""Returns a thread which stops `inner_coord` whenever `outer_coord` stops.
The thread is also registered with `inner_coord`.
Args:
inner_coord: The `Coordinator` to stop.
outer_coord: The `Coordinator` to watch for stopping.
start: Whether to start the thread before returning.
wait_time: The number of seconds for each `outer_coord.wait_for_stop` call.
Returns:
A `Thread` which links the coordinators.
"""
def _link_thread():
while True:
if inner_coord.should_stop():
# The inner coordinator is stopping, so this thread's done.
return
if outer_coord.wait_for_stop(wait_time):
# The outer coordinator stopped; we should stop the inner.
with inner_coord.stop_on_exception():
# Causes a re-raise, but without waiting for registered threads
outer_coord.raise_requested_exception()
inner_coord.request_stop()
return
result = threading.Thread(target=_link_thread)
inner_coord.register_thread(result)
if start:
result.start()
return result
| apache-2.0 | 6,258,930,024,841,397,000 | 36.83727 | 80 | 0.655452 | false |
Edgar324/GeoVis | weathervis/views.py | 1 | 22041 | from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
from netCDF4 import Dataset
from django.http.response import JsonResponse
import numpy as np
import json
from overlayprocessor import OverlayProcessor
from candidateselector import CandidateSelector
from uncertaintyprocessor import UncertaintyProcessor
from consistencyprocessor import ConsistencyProcessor
from skimage import measure
import cv2
import math
# Create your views here.
def index(request):
return render_to_response('weathervis/weathervis.html')
def getApcpForecast(request):
cached = 1
jsData = []
if cached == 0:
infoPath = 'E:/Data/refcstv2_precip_ccpav2_000_to_024.nc'
rootgrp = Dataset(infoPath, format='NETCDF4')
height = len(rootgrp.dimensions['yf'])
width = len(rootgrp.dimensions['xf'])
lons_fcst = np.ma.getdata(rootgrp.variables['lons_fcst'][:][:])
lats_fcst = np.ma.getdata(rootgrp.variables['lats_fcst'][:][:])
ens = len(rootgrp.dimensions['ens'])
apcp = np.ma.getdata(rootgrp.variables['apcp_fcst_ens'][100][:][:][:])
rootgrp.close()
isoValues = [5, 10, 25]
for index in isoValues:
averageApcp = np.zeros((height, width), np.float32)
for h in range(height):
for w in range(width):
temp = 0
for e in range(ens):
temp += apcp[e][h][w]
averageApcp[h][w] = float(temp / ens)
contours = measure.find_contours(averageApcp, index)
data = []
for n, contour in enumerate(contours):
contourLen = len(contour[:, 1])
coutourData = []
for i in range(contourLen):
point = contour[i, :]
x = int(point[1]);
next_x = x + 1;
if (next_x >= width): next_x = width - 1;
y = int(point[0]);
next_y = y + 1;
if (next_y >= height): next_y = height - 1;
tempArray = []
if (x == next_x):
tempArray.append(float(lons_fcst[y][x]))
else:
tempArray.append(float((point[1] - x) * lons_fcst[y][next_x] + (next_x - point[1]) * lons_fcst[y][x]))
if (y == next_y):
tempArray.append(float(lats_fcst[y][x]))
else:
tempArray.append(float((point[0] - y) * lats_fcst[next_y][x] + (next_y - point[0]) * lats_fcst[y][x]))
coutourData.append(tempArray);
data.append(coutourData)
jsData.append(data)
# save the data
f = open('E:/GeoVis/weathervis/fcst.txt', 'w')
f.write(str(len(jsData)) + "\n")
for data in jsData:
f.write(str(len(data)) + "\n")
for line in data:
f.write(str(len(line)) +"\n")
for p in line:
f.write(str(p[0]) + "\n")
f.write(str(p[1]) + "\n")
f.close()
else:
# load the data
f = open('E:/GeoVis/weathervis/fcst.txt', 'r')
dataNum = int(f.readline())
for i in range(dataNum):
data = []
lineNum = int(f.readline())
for j in range(lineNum):
line = []
pNum = int(f.readline())
for k in range(pNum):
p = []
p.append(float(f.readline()))
p.append(float(f.readline()))
line.append(p)
data.append(line)
jsData.append(data)
try:
json_data = json.dumps(jsData)
except Exception as ex:
print ex
return JsonResponse(json_data, safe=False)
def generateCand(request):
candNumber = int(request.GET['n'])
infoPath = 'E:/Data/refcstv2_precip_ccpav2_000_to_024.nc'
rootgrp = Dataset(infoPath, format='NETCDF4')
height = len(rootgrp.dimensions['yf'])
width = len(rootgrp.dimensions['xf'])
ens = len(rootgrp.dimensions['ens'])
variableData = []
apcp = np.ma.getdata(rootgrp.variables['apcp_fcst_ens'][100][:][:][:])
variableData.append(apcp)
rootgrp.close()
averageData = np.zeros((height, width), np.float32)
for e in range(ens):
for h in range(height):
for w in range(width):
averageData[h][w] += apcp[e][h][w]
for h in range(height):
for w in range(width):
averageData[h][w] /= ens
#img = cv2.imread('E:/Geovis/weathervis/test.png')
#img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = np.zeros((height, width), np.uint8)
max_value = np.amax(averageData)
for h in range(height):
for w in range(width):
img[h][w] = np.uint8(averageData[h][w] / max_value * 255)
candSelector = CandidateSelector(img)
candSelector.GenerateKMeansCandidates(candNumber, 0.1)
#save the data
f = open('E:/GeoVis/weathervis/cand.txt', 'w')
f.write(str(len(candSelector.candidates)) + "\n")
for index in candSelector.candidates:
f.write(str(index['x']) + "\n")
f.write(str(index['y']) + "\n")
f.write(str(index['r']) + "\n")
f.close()
return HttpResponse('success')
def updateUncertaintyValues(request):
# load parameters
#levelNumber = int(request.GET['ln'])
levelNumber = 4
# load data
infoPath = 'E:/Data/refcstv2_precip_ccpav2_000_to_024.nc'
rootgrp = Dataset(infoPath, format='NETCDF4')
height = len(rootgrp.dimensions['yf'])
width = len(rootgrp.dimensions['xf'])
ens = len(rootgrp.dimensions['ens'])
variableData = []
apcp = np.ma.getdata(rootgrp.variables['apcp_fcst_ens'][100][:][:][:])
variableData.append(apcp)
rootgrp.close()
# load candidates
candidatePos = []
f = open('E:/GeoVis/weathervis/cand.txt', 'r')
candNumber = int(f.readline())
for i in range(0, candNumber):
cand = {}
cand['x'] = float(f.readline())
cand['y'] = float(f.readline())
cand['r'] = float(f.readline())
candidatePos.append(cand)
f.close()
unValues = []
unProcessor = UncertaintyProcessor(ens, height, width, variableData[0])
# process level 0
zeroUn = [[0 for x in range(width)] for x in range(height)]
for y in range(0, height):
for x in range(0, width):
zeroUn[y][x] = unProcessor.ProcessUnVolume(x, y, 0)
unValues.append(zeroUn)
# process level >= 1
for level in range(1, levelNumber + 1):
tempValue = [0 for x in range(candNumber)]
for i in range(0, candNumber):
tempValue[i] = unProcessor.ProcessUnVolume(candidatePos[i]['x'], candidatePos[i]['y'], level)
unValues.append(tempValue)
# save uncertainty values
f = open('E:/GeoVis/weathervis/unValue.txt', 'w')
f.write(str(levelNumber) + '\n')
for i in range(height):
for j in range(width):
f.write(str(unValues[0][i][j]['mean']) + '\n')
f.write(str(unValues[0][i][j]['var']) + '\n')
for level in range(levelNumber):
for i in range(candNumber):
f.write(str(unValues[level + 1][i]['mean']) + '\n')
f.write(str(unValues[level + 1][i]['var']) + '\n')
f.close()
return HttpResponse('success')
def singleLevelOpt(request):
# load parameters
alpha = float(request.GET['a'])
beta = float(request.GET['b'])
theta = float(request.GET['c'])
# load data
infoPath = 'E:/Data/refcstv2_precip_ccpav2_000_to_024.nc'
rootgrp = Dataset(infoPath, format='NETCDF4')
height = len(rootgrp.dimensions['yf'])
width = len(rootgrp.dimensions['xf'])
ens = len(rootgrp.dimensions['ens'])
lons_fcst = np.ma.getdata(rootgrp.variables['lons_fcst'][:][:])
lats_fcst = np.ma.getdata(rootgrp.variables['lats_fcst'][:][:])
variableData = []
apcp = np.ma.getdata(rootgrp.variables['apcp_fcst_ens'][100][:][:][:])
variableData.append(apcp)
rootgrp.close()
# load candidates
candidatePos = []
f = open('E:/GeoVis/weathervis/cand.txt', 'r')
candNumber = int(f.readline())
for i in range(0, candNumber):
cand = {}
cand['x'] = float(f.readline())
cand['y'] = float(f.readline())
cand['r'] = float(f.readline())
candidatePos.append(cand)
f.close()
# load uncertainty values
f = open('E:/GeoVis/weathervis/unValue.txt', 'r')
levelNumber = int(f.readline())
unValues = []
zeroUn = [[0 for x in range(width)] for x in range(height)]
for i in range(height):
for j in range(width):
tempDict = {}
tempDict['mean'] = float(f.readline())
tempDict['var'] = float(f.readline())
zeroUn[i][j] = tempDict
unValues.append(zeroUn)
for level in range(levelNumber):
tempValue = [0 for x in range(candNumber)]
for i in range(candNumber):
tempDict = {}
tempDict['mean'] = float(f.readline())
tempDict['var'] = float(f.readline())
tempValue[i] = tempDict
unValues.append(tempValue)
f.close()
# solve single level optimization
levelResult = []
overlayProc = OverlayProcessor(variableData, candidatePos, unValues)
overlayProc.setParameters(alpha, beta, theta)
for level in range(0, levelNumber):
levelResult.append(overlayProc.heuristicSolve(level + 1))
# save level optimization result
f = open('E:/GeoVis/weathervis/optresult.txt', 'w')
f.write(str(levelNumber) + '\n')
f.write(str(len(levelResult[0])) + '\n')
for i in range(0, levelNumber):
for j in range(len(levelResult[i])):
f.write(str(int(levelResult[i][j])) + '\n')
f.close()
f = open('E:/GeoVis/weathervis/optresultcons.txt', 'w')
f.write(str(levelNumber) + '\n')
f.write(str(len(levelResult[0])) + '\n')
for i in range(0, levelNumber):
for j in range(len(levelResult[i])):
f.write(str(int(levelResult[i][j])) + '\n')
f.close()
return HttpResponse('success')
def consistencyOpt(request):
alpha = float(request.GET['a'])
beta = float(request.GET['b'])
theta = float(request.GET['c'])
gamma = float(request.GET['gamma'])
# load data
infoPath = 'E:/Data/refcstv2_precip_ccpav2_000_to_024.nc'
rootgrp = Dataset(infoPath, format='NETCDF4')
height = len(rootgrp.dimensions['yf'])
width = len(rootgrp.dimensions['xf'])
variableData = []
apcp = np.ma.getdata(rootgrp.variables['apcp_fcst_ens'][100][:][:][:])
variableData.append(apcp)
rootgrp.close()
# load candidates
candidatePos = []
f = open('E:/GeoVis/weathervis/cand.txt', 'r')
candNumber = int(f.readline())
for i in range(0, candNumber):
cand = {}
cand['x'] = float(f.readline())
cand['y'] = float(f.readline())
cand['r'] = float(f.readline())
candidatePos.append(cand)
f.close()
# load uncertainty values
f = open('E:/GeoVis/weathervis/unValue.txt', 'r')
levelNumber = int(f.readline())
unValues = []
zeroUn = [[0 for x in range(width)] for x in range(height)]
for i in range(height):
for j in range(width):
tempDict = {}
tempDict['mean'] = float(f.readline())
tempDict['var'] = float(f.readline())
zeroUn[i][j] = tempDict
unValues.append(zeroUn)
for level in range(levelNumber):
tempValue = [0 for x in range(candNumber)]
for i in range(candNumber):
tempDict = {}
tempDict['mean'] = float(f.readline())
tempDict['var'] = float(f.readline())
tempValue[i] = tempDict
unValues.append(tempValue)
f.close()
f = open('E:/GeoVis/weathervis/optresult.txt', 'r')
levelNumber = int(f.readline())
siteNumber = int(f.readline())
levelResult = np.zeros((levelNumber, siteNumber), np.int)
for i in range(0, levelNumber):
for j in range(siteNumber):
levelResult[i][j] = int(f.readline())
f.close()
consProc = ConsistencyProcessor(candidatePos, levelNumber)
# update weights
candWeights = []
for i in range(levelNumber):
candWeights.append(consProc.updateLevelData(i + 1, levelResult, gamma))
levelResult = []
overlayProc = OverlayProcessor(variableData, candidatePos, unValues)
overlayProc.setParameters(alpha, beta, theta)
for level in range(0, levelNumber):
levelResult.append(overlayProc.heuristicSolveWeight(level + 1, candWeights[level]))
# save level optimization result
f = open('E:/GeoVis/weathervis/optresultcons.txt', 'w')
f.write(str(levelNumber) + '\n')
f.write(str(len(levelResult[0])) + '\n')
for i in range(0, levelNumber):
for j in range(len(levelResult[i])):
f.write(str(int(levelResult[i][j])) + '\n')
f.close()
# single level optimization
return HttpResponse('success')
def getOptResult(request):
currentLevel = int(request.GET['level'])
# load data
infoPath = 'E:/Data/refcstv2_precip_ccpav2_000_to_024.nc'
rootgrp = Dataset(infoPath, format='NETCDF4')
height = len(rootgrp.dimensions['yf'])
width = len(rootgrp.dimensions['xf'])
lons_fcst = np.ma.getdata(rootgrp.variables['lons_fcst'][:][:])
lats_fcst = np.ma.getdata(rootgrp.variables['lats_fcst'][:][:])
rootgrp.close()
if currentLevel == 0:
f = open('E:/GeoVis/weathervis/unValue.txt', 'r')
levelNumber = int(f.readline())
jsData = []
nodeCount = 0
for i in range(height):
for j in range(width):
glyphData = {}
glyphData['r'] = 0.05
glyphData['lon'] = float(lons_fcst[i][j])
glyphData['lat'] = float(lats_fcst[i][j])
jsData.append(glyphData)
nodeCount += 1
try:
json_data = json.dumps(jsData)
except Exception as ex:
print ex
print nodeCount
return JsonResponse(json_data, safe=False)
#load level optimization data
# load candidates
candidatePos = []
f = open('E:/GeoVis/weathervis/cand.txt', 'r')
candNumber = int(f.readline())
for i in range(0, candNumber):
cand = {}
cand['x'] = float(f.readline())
cand['y'] = float(f.readline())
cand['r'] = float(f.readline())
candidatePos.append(cand)
f.close()
f = open('E:/GeoVis/weathervis/optresultcons.txt', 'r')
levelNumber = int(f.readline())
siteNumber = int(f.readline())
levelResult = np.zeros((levelNumber, siteNumber), np.int)
for i in range(0, levelNumber):
for j in range(siteNumber):
levelResult[i][j] = int(f.readline())
f.close()
if (currentLevel >= levelNumber): currentLevel = levelNumber
jsData = []
nodeCount = 0
for i in range(siteNumber):
if (levelResult[currentLevel - 1][i] == 1):
glyphData = {}
r = pow(2, int(i / candNumber))
if r < candidatePos[i % candNumber]['r']:
r = candidatePos[i % candNumber]['r']
glyphData['r'] = r * (lons_fcst[1][2] - lons_fcst[1][1])
x = int(float(candidatePos[i % candNumber]['x']))
y = int(float(candidatePos[i % candNumber]['y']))
glyphData['lon'] = float(lons_fcst[y][x])
glyphData['lat'] = float(lats_fcst[y][x])
jsData.append(glyphData)
nodeCount += 1
try:
json_data = json.dumps(jsData)
except Exception as ex:
print ex
print nodeCount
return JsonResponse(json_data, safe=False)
# def glyphValues(request):
# #level = Math.round(request.GET['level'])
# #model = request.GET['model']
# #variable = request.GET['var']
# #date = request.GET['date']
# level = 3
# infoPath = 'E:/Data/refcstv2_precip_ccpav2_000_to_024.nc'
# rootgrp = Dataset(infoPath, format='NETCDF4')
# xfLen = len(rootgrp.dimensions['xf'])
# yfLen = len(rootgrp.dimensions['yf'])
# ensLen = len(rootgrp.dimensions['ens'])
# lons_fcst = np.ma.getdata(rootgrp.variables['lons_fcst'][:][:])
# lats_fcst = np.ma.getdata(rootgrp.variables['lats_fcst'][:][:])
# rootgrp.close()
# f = open('E:/Data/00_24/apcp_ens/2002010200.apcp', 'rb')
# dataValues = np.fromfile(f, dtype=np.float32).reshape((ensLen, yfLen, xfLen))
# f.close()
# jsData = []
# xNum = int(round(xfLen / level))
# yNum = int(round(yfLen / level))
# meanArray = np.zeros(ensLen)
# for i in range(0, yNum):
# for j in range(0, xNum):
# varMean = 0
# for k in range(0, ensLen):
# meanArray[k] = 0
# for bi in range(0, level):
# for bj in range(0, level):
# meanArray[k] += dataValues[k][i * level + bi][j * level + bj]
# meanArray[k] /= (level * level)
# varMean += meanArray[k]
# varMean /= ensLen
# varVari = 0
# for k in range(0, ensLen):
# varVari += pow(meanArray[k] - varMean, 2)
# varVari /= ensLen
# glyphData = {}
# glyphData['mean'] = varMean
# glyphData['var'] = varVari
# glyphData['lon'] = float(lons_fcst[i * level][j * level])
# glyphData['lat'] = float(lats_fcst[i * level][j * level])
# jsData.append(glyphData)
# try:
# json_data = json.dumps(jsData)
# except Exception as ex:
# print ex
# return JsonResponse(json_data, safe=False)
# def kmeansValues(request):
# level = int(request.GET['level'])
# infoPath = 'E:/Data/refcstv2_precip_ccpav2_000_to_024.nc'
# rootgrp = Dataset(infoPath, format='NETCDF4')
# lons_fcst = np.ma.getdata(rootgrp.variables['lons_fcst'][:][:])
# lats_fcst = np.ma.getdata(rootgrp.variables['lats_fcst'][:][:])
# rootgrp.close()
#
# img = cv2.imread('E:/Geovis/weathervis/test.png')
# img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# nodeCluster = NodeCluster()
# nodeCluster.processImage(img, level * 50)
# jsData = []
# for index in nodeCluster.labelIndex:
# x = round(index['x'])
# y = round(index['y'])
# pixelNum = index['nodeNum']
# glyphData = {}
# glyphData['r'] = math.sqrt(pixelNum) * (lons_fcst[1][1] - lons_fcst[1][0]) / 3
# glyphData['lon'] = float(lons_fcst[y][x])
# glyphData['lat'] = float(lats_fcst[y][x])
# jsData.append(glyphData)
# try:
# json_data = json.dumps(jsData)
# except Exception as ex:
# print ex
# return JsonResponse(json_data, safe=False)
#
# def hierarValues(request):
# level = int(request.GET['level'])
# infoPath = 'E:/Data/refcstv2_precip_ccpav2_000_to_024.nc'
# rootgrp = Dataset(infoPath, format='NETCDF4')
# lons_fcst = np.ma.getdata(rootgrp.variables['lons_fcst'][:][:])
# lats_fcst = np.ma.getdata(rootgrp.variables['lats_fcst'][:][:])
# rootgrp.close()
#
# img = cv2.imread('E:/Geovis/weathervis/test.png')
# img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# test = img[0, 0]
# nodeCluster = NodeCluster()
# nodeCluster.processImageHier(img, level * 50)
# jsData = []
# for index in nodeCluster.labelIndex:
# x = round(index['x'])
# y = round(index['y'])
# pixelNum = index['nodeNum']
# glyphData = {}
# glyphData['r'] = math.sqrt(pixelNum) * (lons_fcst[1][1] - lons_fcst[1][0]) / 3
# glyphData['lon'] = float(lons_fcst[y][x])
# glyphData['lat'] = float(lats_fcst[y][x])
# jsData.append(glyphData)
# try:
# json_data = json.dumps(jsData)
# except Exception as ex:
# print ex
# return JsonResponse(json_data, safe=False)
#
# def linearOpt(request):
# level = int(request.GET['level'])
# alpha = float(request.GET['a'])
# beta = float(request.GET['b'])
# theta = float(request.GET['c'])
# infoPath = 'E:/Data/refcstv2_precip_ccpav2_000_to_024.nc'
# rootgrp = Dataset(infoPath, format='NETCDF4')
# lons_fcst = np.ma.getdata(rootgrp.variables['lons_fcst'][:][:])
# lats_fcst = np.ma.getdata(rootgrp.variables['lats_fcst'][:][:])
# rootgrp.close()
#
# img = cv2.imread('E:/Geovis/weathervis/test.png')
# img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# test = img[0, 0]
# nodeCluster = NodeCluster()
# nodeCluster.processImageHier(img, 200)
# # update node x y coordinate
# for index in nodeCluster.labelIndex:
# x = int(index['x'])
# y = int(index['y'])
# index['x'] = lons_fcst[y][x]
# index['y'] = lats_fcst[y][x]
# proc = OverlayProcessor(nodeCluster)
# proc.setCurrentLevel(level)
# proc.setParameters(alpha, beta, theta)
# proc.heuristicSolve()
# jsData = []
# nodeCount = 0
# for i in range(0, len(proc.S)):
# if (proc.z[i] == 1):
# index = proc.S[i]
# pixelNum = index['nodeNum']
# glyphData = {}
# glyphData['r'] = (index['level']) * 0.2
# glyphData['lon'] = float(index['x'])
# glyphData['lat'] = float(index['y'])
# jsData.append(glyphData)
# nodeCount += 1
# try:
# json_data = json.dumps(jsData)
# except Exception as ex:
# print ex
# print nodeCount
# return JsonResponse(json_data, safe=False)
| apache-2.0 | 4,298,894,791,917,222,000 | 36.296108 | 126 | 0.561635 | false |
zombiecollin/MySQLdatagen | MySQLdatagen/__init__.py | 1 | 3423 | import os
import sys
import MySQLdb
import pprint
from globals import MYSQL_CONFIG_PATH
from commonIO import commonIO
from configparser import ConfigParser
from faker import Faker
def joke():
return (u'Wenn ist das Nunstck git und Slotermeyer? Ja! ... '
u'Beiherhund das Oder die Flipperwaldt gersput.')
class MySQLdatagen():
def __init__(self):
self.main()
def main(self):
modes = {
"M": "[M]ySQL configuration",
"G": "[G]enerate a row of data",
}
functions = {}
for key, mode in modes.iteritems():
print mode
functions[key] = mode.translate(None, '[]').replace(' ', '_').lower()
modeSelection = ""
noSelection = True
while modeSelection not in modes:
#TODO: i dont like this here
if noSelection == False:
print "Invalid selection"
modeSelection = raw_input("Please select a mode [M]")
if modeSelection == "":
modeSelection = "M"
noSelection = False
try:
func = getattr(self, "mode_" + functions[modeSelection])
except:
print 'function not found "%s"' % (functions[modeSelection])
else:
func()
def mode_mysql_configuration(self):
dbConfig = self.select_db_config()
print dbConfig
db = MySQLdb.connect(db=dbConfig['database'], user=dbConfig['user'])
c = db.cursor()
c.execute("SHOW TABLES")
print c.fetchall()
def mode_generate_a_row_of_data(self):
return False
def select_db_config(self):
mysqlConfigs = os.listdir(MYSQL_CONFIG_PATH)
mysqlConfigs.remove('default.ini')
for mysqlConfig in mysqlConfigs:
mysqlConfigs[mysqlConfigs.index(mysqlConfig)] = mysqlConfig.replace('.ini', '')
mysqlConfig = commonIO.selection_from_list(mysqlConfigs)
while mysqlConfig not in mysqlConfigs:
mysqlConfig = commonIO.selection_from_list(mysqlConfigs)
parser = ConfigParser()
parser.read(MYSQL_CONFIG_PATH + "/" + mysqlConfig + ".ini")
db = {}
if parser.has_section(mysqlConfig):
items = parser.items(mysqlConfig)
for item in items:
db[item[0]] = item[1]
else:
raise Exception('{0} not found in the {1} file'.format(mysqlConfig, filename))
return db
def createMysqlConfig(mysqlConfigsDir = os.path.dirname(os.path.realpath(__file__)) + "/mysql-configs"):
print("You need to configure a connection...")
configName = raw_input('Enter your config name: ')
configFileName = configName + ".ini"
#[dMySQL]
#host = [INSERTHOSTHERE]
#database = [INSERTDBHERE]
#user = [INSERTUSERHERE]
#password = [INSERTPASSHERE]
configHost = "localhost"
configDb = "pnds"
configUser = "root"
configPassword = ""
file = open(mysqlConfigsDir + "/" + configFileName, "w")
file.write("[" + configName + "]\n")
file.write("host = " + configHost + "\n")
file.write("database = " + configDb + "\n")
file.write("user = " + configUser + "\n")
file.write("password = " + configPassword + "\n")
file.close()
if __name__ == "__main__":
print getMysqlCreds()
| mit | 4,190,857,089,743,426,000 | 28.508621 | 108 | 0.570844 | false |
SRI-CSL/radler | radler/exports.py | 1 | 1984 | # This file is part of Radler.
#
# Radler is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Radler is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radler. If not, see <http://www.gnu.org/licenses/>.
'''
@license: GPLv3
Created on Jul, 2015
@author: Léonard Gérard [email protected]
'''
from pydot import Dot, Graph, Node, Edge, Cluster
from radler.radlr.rast import AstVisitor, follow_links
def _n2c(n):
qn = str(n._qname)
return Cluster(qn, id=qn, label=str(n._name))
def _n2n(n):
qn = str(n._qname)
return Node(qn, id=qn, label=str(n._name))
def node(visitor, n, parentg):
parentg.add_node(_n2n(n))
def machine(visitor, m, parentg):
md = _n2c(m)
parentg.add_subgraph(md)
visitor.node_bf(m, md)
def plant2dot(plantinfo):
qn = str(plantinfo.plant._qname)
# Create the hierarchical graph with all the nodes (no edges for now)
plantd = Dot(qn,
simplify=True,
comment="Generated by radler for {}".format(qn))
v = AstVisitor({'node' : node,
'lynxsecure_vm' : machine,
'certikos_vm' : machine,
'machine' : machine },
onleaf=follow_links(AstVisitor.leaf_bf), #@UndefinedVariable
kind='bf')
v.visit(plantinfo.plant, plantd)
# Add all edges
for cl in plantinfo.channels.values():
for c in cl:
if c.incoming:
plantd.add_edge(Edge(str(c.pub), str(c.sub)))
print(plantd.to_string())
| gpl-3.0 | 2,242,103,763,710,334,700 | 28.58209 | 79 | 0.638244 | false |
dethos/cloudroutes-service | src/web/domains.py | 6 | 3058 | ######################################################################
# Cloud Routes Web Application
# -------------------------------------------------------------------
# Domains Class
######################################################################
import rethinkdb as r
class Domain(object):
def __init__(self, did=None):
''' Create a domain object and set attributes as None for now '''
self.did = did
self.domain = None
self.apikey = None
self.failover = None
self.uid = None
self.email = None
def createDomain(self, domaindata, rdb):
''' This will create a domain with the supplied domain information '''
if self.exists(domaindata['domain'], rdb):
return 'exists'
else:
results = r.table('domains').insert(domaindata).run(rdb)
if results['inserted'] == 1:
return results['generated_keys'][0]
else:
return False
def deleteDomain(self, uid, did, rdb):
''' This will delete a specified domain id '''
check = r.table('domains').get(did).run(rdb)
if check['uid']:
delete = r.table('domains').get(did).delete().run(rdb)
if delete['deleted'] == 1:
return True
else:
return False
else:
return False
def exists(self, domain, rdb):
'''
This will check to see if a domain with
this name already exists, across all users
'''
result = r.table('domains').filter({'domain': domain}).count().run(rdb)
if result >= 1:
return True
else:
return False
def getDID(self, domain, rdb):
'''
This will lookup a domain by the domain name and return the domain id
'''
result = r.table('domains').filter({'domain': domain}).run(rdb)
xdata = {}
for x in result:
xdata[x['domain']] = x['did']
return xdata[domain]
def get(self, method, lookup, rdb):
'''
This will return a domains information based on the data provided
'''
if method == 'did':
did = lookup
else:
did = self.getDID(lookup, rdb)
results = r.table('domain').get(did).run(rdb)
if results:
self.did = did
self.domain = results['domain']
self.apikey = results['apikey']
self.failover = results['failover']
self.uid = results['uid']
self.email = results['email']
return self
else:
return False
def getMonitors(self, did, rdb):
'''
This will lookup and return all of the
monitors available for this domain
'''
results = r.table('monitors').filter({'did': did}).run(rdb)
xdata = {}
for x in results:
if x['id']:
xdata[x['id']] = x
return xdata
if __name__ == '__main__':
pass
| agpl-3.0 | -1,138,986,112,556,459,500 | 29.888889 | 79 | 0.482668 | false |
eduNEXT/edunext-platform | lms/djangoapps/courseware/rules.py | 3 | 6254 | """
django-rules and Bridgekeeper rules for courseware related features
"""
import logging
import traceback
import laboratory
import rules
import six
from bridgekeeper.rules import EMPTY, Rule
from django.conf import settings
from django.db.models import Q
from opaque_keys.edx.django.models import CourseKeyField
from opaque_keys.edx.keys import CourseKey, UsageKey
from xblock.core import XBlock
from course_modes.models import CourseMode
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from student.models import CourseAccessRole, CourseEnrollment
from student.roles import CourseRole, OrgRole
from xmodule.course_module import CourseDescriptor
from xmodule.error_module import ErrorDescriptor
from xmodule.x_module import XModule
from .access import has_access
LOG = logging.getLogger(__name__)
@rules.predicate
def is_track_ok_for_exam(user, exam):
"""
Returns whether the user is in an appropriate enrollment mode
"""
course_id = CourseKey.from_string(exam['course_id'])
mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_id)
return is_active and mode in (CourseMode.VERIFIED, CourseMode.MASTERS, CourseMode.PROFESSIONAL)
# The edx_proctoring.api uses this permission to gate access to the
# proctored experience
can_take_proctored_exam = is_track_ok_for_exam
rules.set_perm('edx_proctoring.can_take_proctored_exam', is_track_ok_for_exam)
class HasAccessRule(Rule):
"""
A rule that calls `has_access` to determine whether it passes
"""
def __init__(self, action):
self.action = action
def check(self, user, instance=None):
return has_access(user, self.action, instance)
def query(self, user):
# Return an always-empty queryset filter so that this always
# fails permissions, but still passes the is_possible_for check
# that is used to determine if the rule should allow a user
# into django admin
return Q(pk__in=[])
class StaffAccessExperiment(laboratory.Experiment):
def compare(self, control, candidate):
return bool(control.value) == candidate.value
def publish(self, result):
if not result.match:
LOG.warning(
u"StaffAccessExperiment: control=%r, candidate=%r\n%s",
result.control,
result.candidates[0],
"".join(traceback.format_stack(limit=10))
)
class HasStaffAccessToContent(Rule):
"""
Check whether a user has `staff` access in a course.
Expects to be used to filter a CourseOverview queryset
"""
def check(self, user, instance=None):
"""
Return True if the supplied user has staff-level access to the supplied content.
"""
staff_sql_experiment = StaffAccessExperiment(
raise_on_mismatch=settings.DEBUG,
context={'userid': user.id, 'instance': repr(instance)}
)
staff_sql_experiment.control(self._check_with_has_access, args=(user, instance))
staff_sql_experiment.candidate(self._check_with_query, args=(user, instance))
return staff_sql_experiment.conduct()
def _check_with_has_access(self, user, instance=None):
return has_access(user, 'staff', instance)
def _check_with_query(self, user, instance=None):
"""
Use the query method to check whether a single user has access to the supplied object.
"""
# delegate the work to type-specific functions.
# (start with more specific types, then get more general)
if isinstance(instance, (CourseDescriptor, CourseOverview)):
course_key = instance.id
elif isinstance(instance, (ErrorDescriptor, XModule, XBlock)):
course_key = instance.scope_ids.usage_id.course_key
elif isinstance(instance, CourseKey):
course_key = instance
elif isinstance(instance, UsageKey):
course_key = instance.course_key
elif isinstance(instance, six.string_types):
course_key = CourseKey.from_string(instance)
return self.filter(user, CourseOverview.objects.filter(id=course_key)).exists()
def query(self, user):
"""
Returns a Q object that expects to be used to filter CourseOverview queries.
"""
if not user.is_authenticated:
return EMPTY
masq_settings = getattr(user, 'masquerade_settings', {})
masq_as_student = [
course_key for
(course_key, masq_setting) in masq_settings.items()
if masq_setting.role == 'student'
]
not_masquerading_as_student = ~Q(id__in=masq_as_student)
is_global_staff = user.is_staff
course_staff_or_instructor_courses = CourseAccessRole.objects.filter(
user=user,
role__in=('staff', 'instructor')
).exclude(
course_id=CourseKeyField.Empty,
).values('course_id')
org_staff_or_instructor_courses = CourseAccessRole.objects.filter(
user=user,
role__in=('staff', 'instructor'),
course_id=CourseKeyField.Empty,
org__isnull=False
).values('org')
query = not_masquerading_as_student
if not is_global_staff:
query &= Q(id__in=course_staff_or_instructor_courses) | Q(org__in=org_staff_or_instructor_courses)
return query
class HasRolesRule(Rule):
def __init__(self, *roles):
self.roles = roles
def check(self, user, instance=None):
if not user.is_authenticated:
return False
if isinstance(instance, CourseKey):
course_key = instance
elif isinstance(instance, (CourseDescriptor, CourseOverview)):
course_key = instance.id
elif isinstance(instance, (ErrorDescriptor, XModule, XBlock)):
course_key = instance.scope_ids.usage_id.course_key
else:
course_key = CourseKey.from_string(str(instance))
for role in self.roles:
if CourseRole(role, course_key).has_user(user):
return True
if OrgRole(role, course_key.org).has_user(user):
return True
return False
| agpl-3.0 | -2,321,258,058,686,820,400 | 33.938547 | 110 | 0.654781 | false |
pshen/ansible | contrib/inventory/gce.py | 32 | 18024 | #!/usr/bin/env python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
GCE external inventory script
=================================
Generates inventory that Ansible can understand by making API requests
Google Compute Engine via the libcloud library. Full install/configuration
instructions for the gce* modules can be found in the comments of
ansible/test/gce_tests.py.
When run against a specific host, this script returns the following variables
based on the data obtained from the libcloud Node object:
- gce_uuid
- gce_id
- gce_image
- gce_machine_type
- gce_private_ip
- gce_public_ip
- gce_name
- gce_description
- gce_status
- gce_zone
- gce_tags
- gce_metadata
- gce_network
- gce_subnetwork
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name examples are us-central1-b, europe-west1-a, etc.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- machine type
types follow a pattern like n1-standard-4, g1-small, etc.
- running status:
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- image:
when using an ephemeral/scratch disk, this will be set to the image name
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
your instance was created with a root persistent disk it will be set to
'persistent_disk' since there is no current way to determine the image.
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <[email protected]>
Contributors: Matt Hite <[email protected]>, Tom Melendez <[email protected]>
Version: 0.0.3
'''
__requires__ = ['pycrypto>=2.6']
try:
import pkg_resources
except ImportError:
# Use pkg_resources to find the correct versions of libraries and set
# sys.path appropriately when there are multiversion installs. We don't
# fail here as there is code that better expresses the errors where the
# library is used.
pass
USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
USER_AGENT_VERSION = "v2"
import sys
import os
import argparse
from time import time
import ConfigParser
import logging
logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
try:
import json
except ImportError:
import simplejson as json
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except:
sys.exit("GCE inventory script requires libcloud >= 0.13")
class CloudInventoryCache(object):
def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
cache_max_age=300):
cache_dir = os.path.expanduser(cache_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = os.path.join(cache_dir, cache_name)
self.cache_max_age = cache_max_age
def is_valid(self, max_age=None):
''' Determines if the cache files have expired, or if it is still valid '''
if max_age is None:
max_age = self.cache_max_age
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + max_age) > current_time:
return True
return False
def get_all_data_from_cache(self, filename=''):
''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
data = ''
if not filename:
filename = self.cache_path_cache
with open(filename, 'r') as cache:
data = cache.read()
return json.loads(data)
def write_to_cache(self, data, filename=''):
''' Writes data to file as JSON. Returns True. '''
if not filename:
filename = self.cache_path_cache
json_data = json.dumps(data)
with open(filename, 'w') as cache:
cache.write(json_data)
return True
class GceInventory(object):
def __init__(self):
# Cache object
self.cache = None
# dictionary containing inventory read from disk
self.inventory = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.config = self.get_config()
self.driver = self.get_gce_driver()
self.ip_type = self.get_inventory_options()
if self.ip_type:
self.ip_type = self.ip_type.lower()
# Cache management
start_inventory_time = time()
cache_used = False
if self.args.refresh_cache or not self.cache.is_valid():
self.do_api_calls_update_cache()
else:
self.load_inventory_from_cache()
cache_used = True
self.inventory['_meta']['stats'] = {'use_cache': True}
self.inventory['_meta']['stats'] = {
'inventory_load_time': time() - start_inventory_time,
'cache_used': cache_used
}
# Just display data for specific host
if self.args.host:
print(self.json_format_dict(
self.inventory['_meta']['hostvars'][self.args.host],
pretty=self.args.pretty))
else:
# Otherwise, assume user wants all instances grouped
zones = self.parse_env_zones()
print(self.json_format_dict(self.inventory,
pretty=self.args.pretty))
sys.exit(0)
def get_config(self):
"""
Reads the settings from the gce.ini file.
Populates a SafeConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = ConfigParser.SafeConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'gce_zone': '',
'libcloud_secrets': '',
'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp',
'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
if 'inventory' not in config.sections():
config.add_section('inventory')
if 'cache' not in config.sections():
config.add_section('cache')
config.read(gce_ini_path)
#########
# Section added for processing ini settings
#########
# Set the instance_states filter based on config file options
self.instance_states = []
if config.has_option('gce', 'instance_states'):
states = config.get('gce', 'instance_states')
# Ignore if instance_states is an empty string.
if states:
self.instance_states = states.split(',')
# Caching
cache_path = config.get('cache', 'cache_path')
cache_max_age = config.getint('cache', 'cache_max_age')
# TOOD(supertom): support project-specific caches
cache_name = 'ansible-gce.cache'
self.cache = CloudInventoryCache(cache_path=cache_path,
cache_max_age=cache_max_age,
cache_name=cache_name)
return config
def get_inventory_options(self):
"""Determine inventory options. Environment variables always
take precedence over configuration files."""
ip_type = self.config.get('inventory', 'inventory_ip_type')
# If the appropriate environment variables are set, they override
# other configuration
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
return ip_type
def get_gce_driver(self):
"""Determine the GCE authorization settings and return a
libcloud driver.
"""
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found:
args = [
self.config.get('gce', 'gce_service_account_email_address'),
self.config.get('gce', 'gce_service_account_pem_file_path')
]
kwargs = {'project': self.config.get('gce', 'gce_project_id'),
'datacenter': self.config.get('gce', 'gce_zone')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
# Retrieve and return the GCE driver.
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
)
return gce
def parse_env_zones(self):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call'''
import csv
reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
zones = [r for r in reader]
return [z for z in zones[0]]
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
parser.add_argument(
'--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
if inst is None:
return {}
if 'items' in inst.extra['metadata']:
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
subnet = None
if 'subnetwork' in inst.extra['networkInterfaces'][0]:
subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
# default to exernal IP unless user has specified they prefer internal
if self.ip_type == 'internal':
ssh_host = inst.private_ips[0]
else:
ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
'gce_zone': inst.extra['zone'].name,
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
'gce_subnetwork': subnet,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': ssh_host
}
def load_inventory_from_cache(self):
''' Loads inventory from JSON on disk. '''
try:
self.inventory = self.cache.get_all_data_from_cache()
hosts = self.inventory['_meta']['hostvars']
except Exception as e:
print(
"Invalid inventory file %s. Please rebuild with -refresh-cache option."
% (self.cache.cache_path_cache))
raise
def do_api_calls_update_cache(self):
''' Do API calls and save data in cache. '''
zones = self.parse_env_zones()
data = self.group_instances(zones)
self.cache.write_to_cache(data)
self.inventory = data
def list_nodes(self):
all_nodes = []
params, more_results = {'maxResults': 500}, True
while more_results:
self.driver.connection.gce_params = params
all_nodes.extend(self.driver.list_nodes())
more_results = 'pageToken' in params
return all_nodes
def group_instances(self, zones=None):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
for node in self.list_nodes():
# This check filters on the desired instance states defined in the
# config file with the instance_states config option.
#
# If the instance_states list is _empty_ then _ALL_ states are returned.
#
# If the instance_states list is _populated_ then check the current
# state against the instance_states list
if self.instance_states and not node.extra['status'] in self.instance_states:
continue
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
# To avoid making multiple requests per zone
# we list all nodes and then filter the results
if zones and zone not in zones:
continue
if zone in groups:
groups[zone].append(name)
else:
groups[zone] = [name]
tags = node.extra['tags']
for t in tags:
if t.startswith('group-'):
tag = t[6:]
else:
tag = 'tag_%s' % t
if tag in groups:
groups[tag].append(name)
else:
groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if net in groups:
groups[net].append(name)
else:
groups[net] = [name]
machine_type = node.size
if machine_type in groups:
groups[machine_type].append(name)
else:
groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
if image in groups:
groups[image].append(name)
else:
groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if stat in groups:
groups[stat].append(name)
else:
groups[stat] = [name]
groups["_meta"] = meta
return groups
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
if __name__ == '__main__':
GceInventory()
| gpl-3.0 | -4,572,851,495,682,398,000 | 35.33871 | 100 | 0.585996 | false |
oxc/Flexget | flexget/plugins/modify/path_by_ext.py | 4 | 1722 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
import mimetypes
from flexget import plugin
from flexget.event import event
log = logging.getLogger('path_by_ext')
class PluginPathByExt(object):
"""
Allows specifying path based on content-type
Example:
path_by_ext:
torrent: ~/watch/torrent/
nzb: ~/watch/nzb/
"""
schema = {'type': 'object'}
def on_task_modify(self, task, config):
self.ext(task, config, self.set_path)
def set_path(self, entry, path):
log.debug('Setting %s path to %s' % (entry['title'], path))
entry['path'] = path
def ext(self, task, config, callback):
for entry in task.entries:
if 'mime-type' in entry:
# check if configuration has mimetype that entry has
if entry['mime-type'] in config:
callback(entry, config[entry['mime-type']])
# check if entry mimetype extension matches in config
ext = mimetypes.types_map.get(entry['mime-type'])
path = config.get(ext) or config.get(ext[1:])
if path:
callback(entry, path)
else:
log.debug('Unknown mimetype %s' % entry['mime-type'])
else:
# try to find from url
for ext, path in config.items():
if entry['url'].endswith('.' + ext):
callback(entry, path)
@event('plugin.register')
def register_plugin():
plugin.register(PluginPathByExt, 'path_by_ext', api_ver=2)
| mit | 5,651,375,410,693,585,000 | 30.309091 | 74 | 0.566202 | false |
joshuamckenty/yolo-octo-wookie | nova/volume/__init__.py | 1 | 1251 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nova.volume` -- Nova Block Storage
=====================================================
.. automodule:: nova.volume
:platform: Unix
.. moduleauthor:: Jesse Andrews <[email protected]>
.. moduleauthor:: Devin Carlen <[email protected]>
.. moduleauthor:: Vishvananda Ishaya <[email protected]>
.. moduleauthor:: Joshua McKenty <[email protected]>
.. moduleauthor:: Manish Singh <[email protected]>
.. moduleauthor:: Andy Smith <[email protected]>
"""
| apache-2.0 | -743,769,945,700,646,000 | 39.354839 | 78 | 0.706635 | false |
PwnArt1st/searx | searx/engines/xpath.py | 1 | 3626 | from lxml import html
from urllib import urlencode, unquote
from urlparse import urlparse, urljoin
from lxml.etree import _ElementStringResult, _ElementUnicodeResult
from searx.utils import html_to_text
search_url = None
url_xpath = None
content_xpath = None
title_xpath = None
suggestion_xpath = ''
results_xpath = ''
# parameters for engines with paging support
#
# number of results on each page
# (only needed if the site requires not a page number, but an offset)
page_size = 1
# number of the first page (usually 0 or 1)
first_page_num = 1
'''
if xpath_results is list, extract the text from each result and concat the list
if xpath_results is a xml element, extract all the text node from it
( text_content() method from lxml )
if xpath_results is a string element, then it's already done
'''
def extract_text(xpath_results):
if type(xpath_results) == list:
# it's list of result : concat everything using recursive call
if not xpath_results:
raise Exception('Empty url resultset')
result = ''
for e in xpath_results:
result = result + extract_text(e)
return result.strip()
elif type(xpath_results) in [_ElementStringResult, _ElementUnicodeResult]:
# it's a string
return ''.join(xpath_results)
else:
# it's a element
return html_to_text(xpath_results.text_content()).strip()
def extract_url(xpath_results, search_url):
url = extract_text(xpath_results)
if url.startswith('//'):
# add http or https to this kind of url //example.com/
parsed_search_url = urlparse(search_url)
url = parsed_search_url.scheme + url
elif url.startswith('/'):
# fix relative url to the search engine
url = urljoin(search_url, url)
# normalize url
url = normalize_url(url)
return url
def normalize_url(url):
parsed_url = urlparse(url)
# add a / at this end of the url if there is no path
if not parsed_url.netloc:
raise Exception('Cannot parse url')
if not parsed_url.path:
url += '/'
# FIXME : hack for yahoo
if parsed_url.hostname == 'search.yahoo.com'\
and parsed_url.path.startswith('/r'):
p = parsed_url.path
mark = p.find('/**')
if mark != -1:
return unquote(p[mark + 3:]).decode('utf-8')
return url
def request(query, params):
query = urlencode({'q': query})[2:]
fp = {'query': query}
if paging and search_url.find('{pageno}') >= 0:
fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num
params['url'] = search_url.format(**fp)
params['query'] = query
return params
def response(resp):
results = []
dom = html.fromstring(resp.text)
if results_xpath:
for result in dom.xpath(results_xpath):
url = extract_url(result.xpath(url_xpath), search_url)
title = extract_text(result.xpath(title_xpath)[0])
content = extract_text(result.xpath(content_xpath)[0])
results.append({'url': url, 'title': title, 'content': content})
else:
for url, title, content in zip(
(extract_url(x, search_url) for
x in dom.xpath(url_xpath)),
map(extract_text, dom.xpath(title_xpath)),
map(extract_text, dom.xpath(content_xpath))
):
results.append({'url': url, 'title': title, 'content': content})
if not suggestion_xpath:
return results
for suggestion in dom.xpath(suggestion_xpath):
results.append({'suggestion': extract_text(suggestion)})
return results
| agpl-3.0 | 8,834,177,589,919,687,000 | 29.216667 | 79 | 0.630171 | false |
ingenioustechie/zamboni | mkt/api/tests/test_base.py | 6 | 4752 | import urllib
from django import forms
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from mock import patch
from nose.tools import eq_
from rest_framework.decorators import (authentication_classes,
permission_classes)
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from mkt.api.base import cors_api_view, SubRouterWithFormat
from mkt.api.tests.test_oauth import RestOAuth
from mkt.site.tests import TestCase
from mkt.webapps.views import AppViewSet
class URLRequestFactory(RequestFactory):
def _encode_data(self, data, content_type):
return urllib.urlencode(data)
class TestEncoding(RestOAuth):
def test_blah_encoded(self):
"""
Regression test of bug #858403: ensure that a 415 (and not 500) is
raised when an unsupported Content-Type header is passed to an API
endpoint.
"""
r = self.client.post(reverse('app-list'),
CONTENT_TYPE='application/blah',
data='cvan was here')
eq_(r.status_code, 415)
def test_bad_json(self):
r = self.client.post(reverse('app-list'),
CONTENT_TYPE='application/json',
data="not ' json ' 5")
eq_(r.status_code, 400)
def test_not_json(self):
r = self.client.get(reverse('app-list'),
HTTP_ACCEPT='application/blah')
# We should return a 406, but for endpoints that only accept JSON, we
# cheat and return json content without even looking at the Accept
# header (see mkt.api.renderers and settings).
eq_(r.status_code, 200)
eq_(r['content-type'], 'application/json; charset=utf-8')
@patch.object(AppViewSet, 'create')
def test_form_encoded(self, create_mock):
create_mock.return_value = Response()
self.client.post(reverse('app-list'),
data='foo=bar',
content_type='application/x-www-form-urlencoded')
eq_(create_mock.call_args[0][0].data['foo'], 'bar')
class TestCORSWrapper(TestCase):
urls = 'mkt.api.tests.test_base_urls'
def test_cors(self):
@cors_api_view(['GET', 'PATCH'])
@authentication_classes([])
@permission_classes([])
def foo(request):
return Response()
request = RequestFactory().options('/')
foo(request)
eq_(request.CORS, ['GET', 'PATCH'])
def test_cors_with_headers(self):
@cors_api_view(['POST'], headers=('x-barfoo',))
@authentication_classes([])
@permission_classes([])
def foo(request):
return Response()
request = RequestFactory().options('/')
foo(request)
eq_(request.CORS_HEADERS, ('x-barfoo',))
def test_cors_options(self):
res = self.client.options(reverse('test-cors-api-view'))
eq_(res['Access-Control-Allow-Origin'], '*')
eq_(res['Access-Control-Allow-Headers'], 'x-barfoo, x-foobar')
class Form(forms.Form):
app = forms.ChoiceField(choices=(('valid', 'valid'),))
class TestSubRouterWithFormat(TestCase):
def test_format_is_included(self):
router = SubRouterWithFormat()
router.register('foo', ModelViewSet, base_name='bar')
expected = [
{'name': 'bar-list', 'pattern': '^(?P<pk>[^/.]+)/foo/$'},
{'name': 'bar-detail', 'pattern': '^(?P<pk>[^/.]+)/foo/$'},
{'name': 'bar-list',
'pattern': '^(?P<pk>[^/.]+)/foo\\.(?P<format>[a-z0-9]+)/?$'},
{'name': 'bar-detail',
'pattern': '^(?P<pk>[^/.]+)/foo\\.(?P<format>[a-z0-9]+)/?$'},
]
actual = [{
'name': url.name, 'pattern': url.regex.pattern
} for url in router.urls]
for i, _ in enumerate(expected):
eq_(actual[i], expected[i])
def test_format_is_included_no_trailing_slashes(self):
router = SubRouterWithFormat(trailing_slash=False)
router.register('foo', ModelViewSet, base_name='bar')
expected = [
{'name': 'bar-list', 'pattern': '^(?P<pk>[^/.]+)/foo$'},
{'name': 'bar-detail', 'pattern': '^(?P<pk>[^/.]+)/foo$'},
{'name': 'bar-list',
'pattern': '^(?P<pk>[^/.]+)/foo\\.(?P<format>[a-z0-9]+)/?$'},
{'name': 'bar-detail',
'pattern': '^(?P<pk>[^/.]+)/foo\\.(?P<format>[a-z0-9]+)/?$'},
]
actual = [{
'name': url.name, 'pattern': url.regex.pattern
} for url in router.urls]
for i, _ in enumerate(expected):
eq_(actual[i], expected[i])
| bsd-3-clause | 2,908,792,441,982,307,300 | 35.553846 | 77 | 0.561237 | false |
odtvince/APITaxi | APITaxi/descriptors/hail.py | 1 | 1415 | # -*- coding: utf-8 -*-
from ..models.hail import Hail as HailModel
from ..utils.make_model import make_model
from ..api import api
from ..utils import fields
hail_model = make_model('hail', 'Hail')
puttable_arguments = ['status', 'incident_taxi_reason',
'reporting_customer', 'reporting_customer_reason', 'customer_lon',
'customer_lat', 'customer_address', 'customer_phone_number', 'rating_ride',
'rating_ride_reason', 'incident_customer_reason']
dict_hail = dict(filter(lambda f: f[0] in puttable_arguments,
HailModel.marshall_obj().items()))
for k in dict_hail.keys():
dict_hail[k].required = False
hail_expect_put_details = api.model('hail_expect_put_details', dict_hail)
hail_expect_put = api.model('hail_expect_put',
{'data': fields.List(fields.Nested(hail_expect_put_details))})
postable_arguemnts = ['customer_id', 'customer_lon', 'customer_lat',
'customer_address', 'customer_phone_number', 'taxi_id', 'operateur']
dict_hail = dict(filter(lambda f: f[0] in postable_arguemnts,
HailModel.marshall_obj().items()))
dict_hail['operateur'] = fields.String(attribute='operateur.email', required=True)
dict_hail['taxi_id'] = fields.String(required=True)
hail_expect_post_details = api.model('hail_expect_post_details', dict_hail)
hail_expect_post = api.model('hail_expect_post',
{'data': fields.List(fields.Nested(hail_expect_post_details))})
| agpl-3.0 | 2,132,293,936,183,458,600 | 46.166667 | 83 | 0.696113 | false |
littleweaver/django-daguerre | daguerre/widgets.py | 1 | 1317 | from django.contrib.admin.widgets import AdminFileWidget
from django.utils.safestring import mark_safe
from django.urls import reverse
class AreaWidget(AdminFileWidget):
class Media:
css = {
'all': ('imgareaselect/css/imgareaselect-animated.css',
'daguerre/css/areawidget.css',)
}
js = (
'imgareaselect/scripts/jquery.imgareaselect.js',
'daguerre/js/areawidget.daguerre.js',
)
def render(self, name, value, attrs=None):
content = super(AreaWidget, self).render(name, value, attrs)
if value and hasattr(value, 'url'):
content += (
"<div class='daguerre-areas' id='{0}-areas'"
" data-storage-path='{1}' data-width='{2}' data-height='{3}'"
" data-url='{4}' data-area-url='{5}'></div>").format(
name,
value.name,
value.width,
value.height,
reverse(
'daguerre_ajax_adjustment_info',
kwargs={'storage_path': value.name}),
reverse(
'daguerre_ajax_update_area',
kwargs={'storage_path': value.name}))
return mark_safe(content)
| bsd-3-clause | 1,678,550,584,569,059,300 | 37.735294 | 77 | 0.510251 | false |
pidah/st2contrib | packs/slack/slack_api_gen.py | 7 | 3003 | import yaml
import re
import urllib2
from bs4 import BeautifulSoup
method_dict = {}
base_url = 'https://api.slack.com/methods'
api_doc_main = urllib2.urlopen('%s/channels.invite' % base_url)
soup = BeautifulSoup(api_doc_main)
api_methods = soup.find('select', id='api_method')
for method in api_methods.stripped_strings:
if method != 'View another method...':
method_dict[method] = {'params': {}}
method_url = "%s/%s" % (base_url, method)
method_page = urllib2.urlopen(method_url)
method_soup = BeautifulSoup(method_page)
method_description = method_soup.find('section', attrs={
"class": "tab_pane selected clearfix large_bottom_padding"}) \
.find_all('p')[0].text
method_description = re.sub('\n|\r', ' ', method_description)
method_dict[method]['description'] = method_description
method_args_table = method_soup.find('table', attrs={
"class": "arguments full_width"}).tbody.find_all('tr')
del method_args_table[0]
for row in method_args_table:
arg = row.find('code')
required = row.find_all('td')[2]
if re.search("Required", required.text):
required = True
default = None
elif re.search(",", required.text):
required, default = required.text.split(',')
required = False
default = default.split('=')[1]
else:
required = False
default = None
method_dict[method]['params'][arg.text] = {}
method_dict[method]['params'][arg.text]['required'] = required
method_dict[method]['params'][arg.text]['default'] = default
for method in method_dict:
file_name = 'actions/%s.yaml' % method
output_dict = {'name': method,
'runner_type': 'run-python',
'enabled': True,
'entry_point': 'run.py',
'description': method_dict[method]['description'],
'parameters': {
'end_point': {
'type': 'string',
'immutable': True,
'default': method}
}
}
for param in method_dict[method]['params']:
if param == 'token':
method_dict[method]['params'][param]['required'] = False
output_dict['parameters'][param] = {'type': 'string'}
if method_dict[method]['params'][param]['default'] is not None:
output_dict['parameters'][param]['default'] = \
method_dict[method]['params'][param]['default']
output_dict['parameters'][param]['required'] = \
method_dict[method]['params'][param]['required']
print yaml.safe_dump(output_dict, default_flow_style=False)
fh = open(file_name, 'w')
fh.write(yaml.safe_dump(output_dict, default_flow_style=False))
fh.close()
| apache-2.0 | 8,225,473,968,011,166,000 | 39.04 | 74 | 0.541792 | false |
qsnake/abinit | util/maintainers/check-forbidden-flags.py | 1 | 2797 | #!/usr/bin/env python
#
# Copyright (C) 2010-2012 ABINIT Group (Yann Pouillon)
#
# This file is part of the ABINIT software package. For license information,
# please see the COPYING file in the top-level directory of the ABINIT source
# distribution.
#
from ConfigParser import ConfigParser
from time import gmtime,strftime
import commands
import os
import re
import sys
class MyConfigParser(ConfigParser):
def optionxform(self,option):
return str(option)
# ---------------------------------------------------------------------------- #
#
# Functions
#
env_ignore = ["CFLAGS","CXXFLAGS","FCFLAGS","NVCC_*","fcflags_opt_*"]
def is_ignored(keyword):
for env in env_ignore:
if ( "*" in env ):
if ( re.match(env,keyword) ):
return True
elif ( env == keyword ):
return True
return False
# ---------------------------------------------------------------------------- #
#
# Main program
#
# Check if we are in the top of the ABINIT source tree
if ( not os.path.exists("configure.ac") or
not os.path.exists("src/98_main/abinit.F90") ):
print "%s: You must be in the top of an ABINIT source tree." % my_name
print "%s: Aborting now." % my_name
sys.exit(1)
# Init
re_dbgflags = re.compile("(^-g|[^0-9A-Za-z]-g)")
re_optflags = re.compile("(-O[0-9]|-xO[0-9])")
# Extract environment variables from config file
cnf_env = MyConfigParser()
cnf_env.read("config/specs/environment.conf")
env_config = list()
for env in cnf_env.sections():
if ( cnf_env.get(env,"reset") == "no" ):
if ( not is_ignored(env) ):
env_config.append(env)
env_config.sort()
# Extract information from build example config file
bex_ok = True
cnf_bex = MyConfigParser()
cnf_bex.read("config/specs/build-examples.conf")
env_forbidden = dict()
for bot in cnf_bex.sections():
env_forbidden[bot] = list()
for var in cnf_bex.options(bot):
if ( var in env_config ):
val = cnf_bex.get(bot,var)
if ( re_dbgflags.search(val) ):
env_forbidden[bot].append(("D",var))
bex_ok = False
if ( re_optflags.search(val) ):
env_forbidden[bot].append(("O",var))
bex_ok = False
env_fkeys = env_forbidden.keys()
env_fkeys.sort()
# Report any match
my_exitcode = 0
if ( not bex_ok ):
sys.stderr.write("%s: reporting use of forbidden flags\n\n" % \
(os.path.basename(sys.argv[0])))
sys.stderr.write("X: D=debug / O=Optimization\n\n")
sys.stderr.write("%s %-24s %-48s\n" % \
("X","Variable","Bot"))
sys.stderr.write("%s %s %s\n" % ("-","-" * 24,"-" * 48))
for bot in env_fkeys:
if ( len(env_forbidden[bot]) > 0 ):
my_exitcode = 1
for (tag,var) in env_forbidden[bot]:
sys.stderr.write("%s %-24s %-48s\n" % \
(tag,var,bot))
sys.stderr.write("\n")
sys.exit(my_exitcode)
| gpl-3.0 | 656,815,018,838,858,200 | 25.638095 | 80 | 0.600644 | false |
dongweiming/code | vilya/models/actions/pull.py | 3 | 1985 | # -*- coding: utf-8 -*-
from vilya.models.actions.base import Action, ActionScope
# TODO
# 'date': self.now(),
# 'url': pullreq_url,
# 'description': description,
# 'from_proj': from_proj_str,
# 'to_proj': to_proj_str,
# 'commiter': commiter,
# 'owner': owner,
# 'title': title,
# 'status': status,
# 'type': 'pull_request'
class Pull(Action):
# FIXME: 把计算都放到 相应的 models 里去(ticket/pr)
def __init__(self, sender, date, pullreq, ticket, owner, state, url):
super(Pull, self).__init__(sender, date) # sender <- commiter
self.to_proj = pullreq.to_proj_str
self.from_proj = pullreq.from_proj_str
self.owner = owner
self.title = ticket.title
self.ticket = ticket.ticket_id
self.state = state # <- status
self.content = Pull._truncate(ticket.description) # <- description
self.url = url
self._target = pullreq.to_proj.name
@property
def type(self):
return 'pull_request'
@property
def scope(self):
return ActionScope.project
@property
def target(self):
return self._target
@property
def entry_id(self):
return self.ticket
@property
def entry_title(self):
return '#%d %s' % (self.ticket, self.title)
def _migrate_ticket_id(url):
''' hack from url '''
if url.split('/')[3].isdigit():
return int(url.split('/')[3])
elif url.split('/')[4].isdigit():
return int(url.split('/')[4])
return ''
migrate_pull_request = {
'sender': 'commiter',
'content': 'description',
'state': 'status',
'target': ('to_proj', lambda x: x.split(':')[0]),
'scope': ('', lambda x: 'project'), # only projects has
'ticket': ['url', _migrate_ticket_id],
'entry_id': ['url', _migrate_ticket_id], # force update
'entry_title': 'title',
}
| bsd-3-clause | -3,281,742,474,262,547,500 | 26.208333 | 75 | 0.54926 | false |
lzp819739483/dpkt | dpkt/crc32c.py | 17 | 4094 | # $Id$
import array
# CRC-32C Checksum
# http://tools.ietf.org/html/rfc3309
crc32c_table = (
0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L, 0xC79A971FL,
0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL, 0x8AD958CFL, 0x78B2DBCCL,
0x6BE22838L, 0x9989AB3BL, 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L,
0x5E133C24L, 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL,
0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L, 0x9A879FA0L,
0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L, 0x5D1D08BFL, 0xAF768BBCL,
0xBC267848L, 0x4E4DFB4BL, 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L,
0x33ED7D2AL, 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L,
0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L, 0x6DFE410EL,
0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL, 0x30E349B1L, 0xC288CAB2L,
0xD1D83946L, 0x23B3BA45L, 0xF779DEAEL, 0x05125DADL, 0x1642AE59L,
0xE4292D5AL, 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL,
0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L, 0x417B1DBCL,
0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L, 0x86E18AA3L, 0x748A09A0L,
0x67DAFA54L, 0x95B17957L, 0xCBA24573L, 0x39C9C670L, 0x2A993584L,
0xD8F2B687L, 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L,
0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L, 0x96BF4DCCL,
0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L, 0xDBFC821CL, 0x2997011FL,
0x3AC7F2EBL, 0xC8AC71E8L, 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L,
0x0F36E6F7L, 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L,
0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L, 0xEB1FCBADL,
0x197448AEL, 0x0A24BB5AL, 0xF84F3859L, 0x2C855CB2L, 0xDEEEDFB1L,
0xCDBE2C45L, 0x3FD5AF46L, 0x7198540DL, 0x83F3D70EL, 0x90A324FAL,
0x62C8A7F9L, 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L,
0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L, 0x3CDB9BDDL,
0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L, 0x82F63B78L, 0x709DB87BL,
0x63CD4B8FL, 0x91A6C88CL, 0x456CAC67L, 0xB7072F64L, 0xA457DC90L,
0x563C5F93L, 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L,
0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL, 0x92A8FC17L,
0x60C37F14L, 0x73938CE0L, 0x81F80FE3L, 0x55326B08L, 0xA759E80BL,
0xB4091BFFL, 0x466298FCL, 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL,
0x0B21572CL, 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L,
0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L, 0x65D122B9L,
0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL, 0x2892ED69L, 0xDAF96E6AL,
0xC9A99D9EL, 0x3BC21E9DL, 0xEF087A76L, 0x1D63F975L, 0x0E330A81L,
0xFC588982L, 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL,
0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L, 0x38CC2A06L,
0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L, 0xFF56BD19L, 0x0D3D3E1AL,
0x1E6DCDEEL, 0xEC064EEDL, 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L,
0xD0DDD530L, 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL,
0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL, 0x8ECEE914L,
0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L, 0xD3D3E1ABL, 0x21B862A8L,
0x32E8915CL, 0xC083125FL, 0x144976B4L, 0xE622F5B7L, 0xF5720643L,
0x07198540L, 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L,
0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL, 0xE330A81AL,
0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL, 0x24AA3F05L, 0xD6C1BC06L,
0xC5914FF2L, 0x37FACCF1L, 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L,
0x7AB90321L, 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL,
0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L, 0x34F4F86AL,
0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL, 0x79B737BAL, 0x8BDCB4B9L,
0x988C474DL, 0x6AE7C44EL, 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L,
0xAD7D5351L
)
def add(crc, buf):
buf = array.array('B', buf)
for b in buf:
crc = (crc >> 8) ^ crc32c_table[(crc ^ b) & 0xff]
return crc
def done(crc):
tmp = ~crc & 0xffffffffL
b0 = tmp & 0xff
b1 = (tmp >> 8) & 0xff
b2 = (tmp >> 16) & 0xff
b3 = (tmp >> 24) & 0xff
crc = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3
return crc
def cksum(buf):
"""Return computed CRC-32c checksum."""
return done(add(0xffffffffL, buf))
| bsd-3-clause | 6,837,719,413,612,497,000 | 50.175 | 68 | 0.760381 | false |
tboyce021/home-assistant | homeassistant/components/sonos/media_player.py | 1 | 55157 | """Support to interface with Sonos players."""
import asyncio
import datetime
import functools as ft
import logging
import socket
import urllib.parse
import async_timeout
import pysonos
from pysonos import alarms
from pysonos.core import (
PLAY_MODE_BY_MEANING,
PLAY_MODES,
PLAYING_LINE_IN,
PLAYING_RADIO,
PLAYING_TV,
)
from pysonos.exceptions import SoCoException, SoCoUPnPException
import pysonos.music_library
import pysonos.snapshot
import voluptuous as vol
from homeassistant.components.media_player import BrowseMedia, MediaPlayerEntity
from homeassistant.components.media_player.const import (
ATTR_MEDIA_ENQUEUE,
MEDIA_CLASS_ALBUM,
MEDIA_CLASS_ARTIST,
MEDIA_CLASS_COMPOSER,
MEDIA_CLASS_CONTRIBUTING_ARTIST,
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_GENRE,
MEDIA_CLASS_PLAYLIST,
MEDIA_CLASS_TRACK,
MEDIA_TYPE_ALBUM,
MEDIA_TYPE_ARTIST,
MEDIA_TYPE_COMPOSER,
MEDIA_TYPE_CONTRIBUTING_ARTIST,
MEDIA_TYPE_GENRE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_TRACK,
REPEAT_MODE_ALL,
REPEAT_MODE_OFF,
REPEAT_MODE_ONE,
SUPPORT_BROWSE_MEDIA,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_REPEAT_SET,
SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.const import (
ATTR_TIME,
EVENT_HOMEASSISTANT_STOP,
STATE_IDLE,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import ServiceCall, callback
from homeassistant.helpers import config_validation as cv, entity_platform, service
import homeassistant.helpers.device_registry as dr
from homeassistant.util.dt import utcnow
from . import CONF_ADVERTISE_ADDR, CONF_HOSTS, CONF_INTERFACE_ADDR
from .const import (
DATA_SONOS,
DOMAIN as SONOS_DOMAIN,
SONOS_ALBUM,
SONOS_ALBUM_ARTIST,
SONOS_ARTIST,
SONOS_COMPOSER,
SONOS_GENRE,
SONOS_PLAYLISTS,
SONOS_TRACKS,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = 10
DISCOVERY_INTERVAL = 60
SUPPORT_SONOS = (
SUPPORT_BROWSE_MEDIA
| SUPPORT_CLEAR_PLAYLIST
| SUPPORT_NEXT_TRACK
| SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_REPEAT_SET
| SUPPORT_SEEK
| SUPPORT_SELECT_SOURCE
| SUPPORT_SHUFFLE_SET
| SUPPORT_STOP
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
)
SOURCE_LINEIN = "Line-in"
SOURCE_TV = "TV"
EXPANDABLE_MEDIA_TYPES = [
MEDIA_TYPE_ALBUM,
MEDIA_TYPE_ARTIST,
MEDIA_TYPE_COMPOSER,
MEDIA_TYPE_GENRE,
MEDIA_TYPE_PLAYLIST,
SONOS_ALBUM,
SONOS_ALBUM_ARTIST,
SONOS_ARTIST,
SONOS_GENRE,
SONOS_COMPOSER,
SONOS_PLAYLISTS,
]
SONOS_TO_MEDIA_CLASSES = {
SONOS_ALBUM: MEDIA_CLASS_ALBUM,
SONOS_ALBUM_ARTIST: MEDIA_CLASS_ARTIST,
SONOS_ARTIST: MEDIA_CLASS_CONTRIBUTING_ARTIST,
SONOS_COMPOSER: MEDIA_CLASS_COMPOSER,
SONOS_GENRE: MEDIA_CLASS_GENRE,
SONOS_PLAYLISTS: MEDIA_CLASS_PLAYLIST,
SONOS_TRACKS: MEDIA_CLASS_TRACK,
"object.container.album.musicAlbum": MEDIA_CLASS_ALBUM,
"object.container.genre.musicGenre": MEDIA_CLASS_PLAYLIST,
"object.container.person.composer": MEDIA_CLASS_PLAYLIST,
"object.container.person.musicArtist": MEDIA_CLASS_ARTIST,
"object.container.playlistContainer.sameArtist": MEDIA_CLASS_ARTIST,
"object.container.playlistContainer": MEDIA_CLASS_PLAYLIST,
"object.item.audioItem.musicTrack": MEDIA_CLASS_TRACK,
}
SONOS_TO_MEDIA_TYPES = {
SONOS_ALBUM: MEDIA_TYPE_ALBUM,
SONOS_ALBUM_ARTIST: MEDIA_TYPE_ARTIST,
SONOS_ARTIST: MEDIA_TYPE_CONTRIBUTING_ARTIST,
SONOS_COMPOSER: MEDIA_TYPE_COMPOSER,
SONOS_GENRE: MEDIA_TYPE_GENRE,
SONOS_PLAYLISTS: MEDIA_TYPE_PLAYLIST,
SONOS_TRACKS: MEDIA_TYPE_TRACK,
"object.container.album.musicAlbum": MEDIA_TYPE_ALBUM,
"object.container.genre.musicGenre": MEDIA_TYPE_PLAYLIST,
"object.container.person.composer": MEDIA_TYPE_PLAYLIST,
"object.container.person.musicArtist": MEDIA_TYPE_ARTIST,
"object.container.playlistContainer.sameArtist": MEDIA_TYPE_ARTIST,
"object.container.playlistContainer": MEDIA_TYPE_PLAYLIST,
"object.item.audioItem.musicTrack": MEDIA_TYPE_TRACK,
}
MEDIA_TYPES_TO_SONOS = {
MEDIA_TYPE_ALBUM: SONOS_ALBUM,
MEDIA_TYPE_ARTIST: SONOS_ALBUM_ARTIST,
MEDIA_TYPE_CONTRIBUTING_ARTIST: SONOS_ARTIST,
MEDIA_TYPE_COMPOSER: SONOS_COMPOSER,
MEDIA_TYPE_GENRE: SONOS_GENRE,
MEDIA_TYPE_PLAYLIST: SONOS_PLAYLISTS,
MEDIA_TYPE_TRACK: SONOS_TRACKS,
}
SONOS_TYPES_MAPPING = {
"A:ALBUM": SONOS_ALBUM,
"A:ALBUMARTIST": SONOS_ALBUM_ARTIST,
"A:ARTIST": SONOS_ARTIST,
"A:COMPOSER": SONOS_COMPOSER,
"A:GENRE": SONOS_GENRE,
"A:PLAYLISTS": SONOS_PLAYLISTS,
"A:TRACKS": SONOS_TRACKS,
"object.container.album.musicAlbum": SONOS_ALBUM,
"object.container.genre.musicGenre": SONOS_GENRE,
"object.container.person.composer": SONOS_COMPOSER,
"object.container.person.musicArtist": SONOS_ALBUM_ARTIST,
"object.container.playlistContainer.sameArtist": SONOS_ARTIST,
"object.container.playlistContainer": SONOS_PLAYLISTS,
"object.item.audioItem.musicTrack": SONOS_TRACKS,
}
LIBRARY_TITLES_MAPPING = {
"A:ALBUM": "Albums",
"A:ALBUMARTIST": "Artists",
"A:ARTIST": "Contributing Artists",
"A:COMPOSER": "Composers",
"A:GENRE": "Genres",
"A:PLAYLISTS": "Playlists",
"A:TRACKS": "Tracks",
}
PLAYABLE_MEDIA_TYPES = [
MEDIA_TYPE_ALBUM,
MEDIA_TYPE_ARTIST,
MEDIA_TYPE_COMPOSER,
MEDIA_TYPE_CONTRIBUTING_ARTIST,
MEDIA_TYPE_GENRE,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_TRACK,
]
REPEAT_TO_SONOS = {
REPEAT_MODE_OFF: False,
REPEAT_MODE_ALL: True,
REPEAT_MODE_ONE: "ONE",
}
SONOS_TO_REPEAT = {meaning: mode for mode, meaning in REPEAT_TO_SONOS.items()}
ATTR_SONOS_GROUP = "sonos_group"
UPNP_ERRORS_TO_IGNORE = ["701", "711", "712"]
SERVICE_JOIN = "join"
SERVICE_UNJOIN = "unjoin"
SERVICE_SNAPSHOT = "snapshot"
SERVICE_RESTORE = "restore"
SERVICE_SET_TIMER = "set_sleep_timer"
SERVICE_CLEAR_TIMER = "clear_sleep_timer"
SERVICE_UPDATE_ALARM = "update_alarm"
SERVICE_SET_OPTION = "set_option"
SERVICE_PLAY_QUEUE = "play_queue"
SERVICE_REMOVE_FROM_QUEUE = "remove_from_queue"
ATTR_SLEEP_TIME = "sleep_time"
ATTR_ALARM_ID = "alarm_id"
ATTR_VOLUME = "volume"
ATTR_ENABLED = "enabled"
ATTR_INCLUDE_LINKED_ZONES = "include_linked_zones"
ATTR_MASTER = "master"
ATTR_WITH_GROUP = "with_group"
ATTR_NIGHT_SOUND = "night_sound"
ATTR_SPEECH_ENHANCE = "speech_enhance"
ATTR_QUEUE_POSITION = "queue_position"
ATTR_STATUS_LIGHT = "status_light"
UNAVAILABLE_VALUES = {"", "NOT_IMPLEMENTED", None}
class UnknownMediaType(BrowseError):
"""Unknown media type."""
class SonosData:
"""Storage class for platform global data."""
def __init__(self):
"""Initialize the data."""
self.entities = []
self.discovered = []
self.topology_condition = asyncio.Condition()
self.discovery_thread = None
self.hosts_heartbeat = None
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Sonos platform. Obsolete."""
_LOGGER.error(
"Loading Sonos by media_player platform configuration is no longer supported"
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Sonos from a config entry."""
if DATA_SONOS not in hass.data:
hass.data[DATA_SONOS] = SonosData()
config = hass.data[SONOS_DOMAIN].get("media_player", {})
_LOGGER.debug("Reached async_setup_entry, config=%s", config)
advertise_addr = config.get(CONF_ADVERTISE_ADDR)
if advertise_addr:
pysonos.config.EVENT_ADVERTISE_IP = advertise_addr
def _stop_discovery(event):
data = hass.data[DATA_SONOS]
if data.discovery_thread:
data.discovery_thread.stop()
data.discovery_thread = None
if data.hosts_heartbeat:
data.hosts_heartbeat()
data.hosts_heartbeat = None
def _discovery(now=None):
"""Discover players from network or configuration."""
hosts = config.get(CONF_HOSTS)
def _discovered_player(soco):
"""Handle a (re)discovered player."""
try:
_LOGGER.debug("Reached _discovered_player, soco=%s", soco)
if soco.uid not in hass.data[DATA_SONOS].discovered:
_LOGGER.debug("Adding new entity")
hass.data[DATA_SONOS].discovered.append(soco.uid)
hass.add_job(async_add_entities, [SonosEntity(soco)])
else:
entity = _get_entity_from_soco_uid(hass, soco.uid)
if entity and (entity.soco == soco or not entity.available):
_LOGGER.debug("Seen %s", entity)
hass.add_job(entity.async_seen(soco))
except SoCoException as ex:
_LOGGER.debug("SoCoException, ex=%s", ex)
if hosts:
for host in hosts:
try:
_LOGGER.debug("Testing %s", host)
player = pysonos.SoCo(socket.gethostbyname(host))
if player.is_visible:
# Make sure that the player is available
_ = player.volume
_discovered_player(player)
except (OSError, SoCoException) as ex:
_LOGGER.debug("Exception %s", ex)
if now is None:
_LOGGER.warning("Failed to initialize '%s'", host)
_LOGGER.debug("Tested all hosts")
hass.data[DATA_SONOS].hosts_heartbeat = hass.helpers.event.call_later(
DISCOVERY_INTERVAL, _discovery
)
else:
_LOGGER.debug("Starting discovery thread")
hass.data[DATA_SONOS].discovery_thread = pysonos.discover_thread(
_discovered_player,
interval=DISCOVERY_INTERVAL,
interface_addr=config.get(CONF_INTERFACE_ADDR),
)
_LOGGER.debug("Adding discovery job")
hass.async_add_executor_job(_discovery)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_discovery)
platform = entity_platform.current_platform.get()
@service.verify_domain_control(hass, SONOS_DOMAIN)
async def async_service_handle(service_call: ServiceCall):
"""Handle dispatched services."""
entities = await platform.async_extract_from_service(service_call)
if not entities:
return
if service_call.service == SERVICE_JOIN:
master = platform.entities.get(service_call.data[ATTR_MASTER])
if master:
await SonosEntity.join_multi(hass, master, entities)
else:
_LOGGER.error(
"Invalid master specified for join service: %s",
service_call.data[ATTR_MASTER],
)
elif service_call.service == SERVICE_UNJOIN:
await SonosEntity.unjoin_multi(hass, entities)
elif service_call.service == SERVICE_SNAPSHOT:
await SonosEntity.snapshot_multi(
hass, entities, service_call.data[ATTR_WITH_GROUP]
)
elif service_call.service == SERVICE_RESTORE:
await SonosEntity.restore_multi(
hass, entities, service_call.data[ATTR_WITH_GROUP]
)
hass.services.async_register(
SONOS_DOMAIN,
SERVICE_JOIN,
async_service_handle,
cv.make_entity_service_schema({vol.Required(ATTR_MASTER): cv.entity_id}),
)
hass.services.async_register(
SONOS_DOMAIN,
SERVICE_UNJOIN,
async_service_handle,
cv.make_entity_service_schema({}),
)
join_unjoin_schema = cv.make_entity_service_schema(
{vol.Optional(ATTR_WITH_GROUP, default=True): cv.boolean}
)
hass.services.async_register(
SONOS_DOMAIN, SERVICE_SNAPSHOT, async_service_handle, join_unjoin_schema
)
hass.services.async_register(
SONOS_DOMAIN, SERVICE_RESTORE, async_service_handle, join_unjoin_schema
)
platform.async_register_entity_service(
SERVICE_SET_TIMER,
{
vol.Required(ATTR_SLEEP_TIME): vol.All(
vol.Coerce(int), vol.Range(min=0, max=86399)
)
},
"set_sleep_timer",
)
platform.async_register_entity_service(SERVICE_CLEAR_TIMER, {}, "clear_sleep_timer")
platform.async_register_entity_service(
SERVICE_UPDATE_ALARM,
{
vol.Required(ATTR_ALARM_ID): cv.positive_int,
vol.Optional(ATTR_TIME): cv.time,
vol.Optional(ATTR_VOLUME): cv.small_float,
vol.Optional(ATTR_ENABLED): cv.boolean,
vol.Optional(ATTR_INCLUDE_LINKED_ZONES): cv.boolean,
},
"set_alarm",
)
platform.async_register_entity_service(
SERVICE_SET_OPTION,
{
vol.Optional(ATTR_NIGHT_SOUND): cv.boolean,
vol.Optional(ATTR_SPEECH_ENHANCE): cv.boolean,
vol.Optional(ATTR_STATUS_LIGHT): cv.boolean,
},
"set_option",
)
platform.async_register_entity_service(
SERVICE_PLAY_QUEUE,
{vol.Optional(ATTR_QUEUE_POSITION): cv.positive_int},
"play_queue",
)
platform.async_register_entity_service(
SERVICE_REMOVE_FROM_QUEUE,
{vol.Optional(ATTR_QUEUE_POSITION): cv.positive_int},
"remove_from_queue",
)
class _ProcessSonosEventQueue:
"""Queue like object for dispatching sonos events."""
def __init__(self, handler):
"""Initialize Sonos event queue."""
self._handler = handler
def put(self, item, block=True, timeout=None):
"""Process event."""
try:
self._handler(item)
except SoCoException as ex:
_LOGGER.warning("Error calling %s: %s", self._handler, ex)
def _get_entity_from_soco_uid(hass, uid):
"""Return SonosEntity from SoCo uid."""
for entity in hass.data[DATA_SONOS].entities:
if uid == entity.unique_id:
return entity
return None
def soco_error(errorcodes=None):
"""Filter out specified UPnP errors from logs and avoid exceptions."""
def decorator(funct):
"""Decorate functions."""
@ft.wraps(funct)
def wrapper(*args, **kwargs):
"""Wrap for all soco UPnP exception."""
try:
return funct(*args, **kwargs)
except SoCoUPnPException as err:
if not errorcodes or err.error_code not in errorcodes:
_LOGGER.error("Error on %s with %s", funct.__name__, err)
except SoCoException as err:
_LOGGER.error("Error on %s with %s", funct.__name__, err)
return wrapper
return decorator
def soco_coordinator(funct):
"""Call function on coordinator."""
@ft.wraps(funct)
def wrapper(entity, *args, **kwargs):
"""Wrap for call to coordinator."""
if entity.is_coordinator:
return funct(entity, *args, **kwargs)
return funct(entity.coordinator, *args, **kwargs)
return wrapper
def _timespan_secs(timespan):
"""Parse a time-span into number of seconds."""
if timespan in UNAVAILABLE_VALUES:
return None
return sum(60 ** x[0] * int(x[1]) for x in enumerate(reversed(timespan.split(":"))))
class SonosEntity(MediaPlayerEntity):
"""Representation of a Sonos entity."""
def __init__(self, player):
"""Initialize the Sonos entity."""
self._subscriptions = []
self._poll_timer = None
self._seen_timer = None
self._volume_increment = 2
self._unique_id = player.uid
self._player = player
self._player_volume = None
self._player_muted = None
self._play_mode = None
self._coordinator = None
self._sonos_group = [self]
self._status = None
self._uri = None
self._media_library = pysonos.music_library.MusicLibrary(self.soco)
self._media_duration = None
self._media_position = None
self._media_position_updated_at = None
self._media_image_url = None
self._media_channel = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
self._queue_position = None
self._night_sound = None
self._speech_enhance = None
self._source_name = None
self._favorites = []
self._soco_snapshot = None
self._snapshot_group = None
# Set these early since device_info() needs them
speaker_info = self.soco.get_speaker_info(True)
self._name = speaker_info["zone_name"]
self._model = speaker_info["model_name"]
self._sw_version = speaker_info["software_version"]
self._mac_address = speaker_info["mac_address"]
async def async_added_to_hass(self):
"""Subscribe sonos events."""
await self.async_seen(self.soco)
self.hass.data[DATA_SONOS].entities.append(self)
def _rebuild_groups():
"""Build the current group topology."""
for entity in self.hass.data[DATA_SONOS].entities:
entity.update_groups()
self.hass.async_add_executor_job(_rebuild_groups)
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
def __hash__(self):
"""Return a hash of self."""
return hash(self.unique_id)
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def device_info(self):
"""Return information about the device."""
return {
"identifiers": {(SONOS_DOMAIN, self._unique_id)},
"name": self._name,
"model": self._model.replace("Sonos ", ""),
"sw_version": self._sw_version,
"connections": {(dr.CONNECTION_NETWORK_MAC, self._mac_address)},
"manufacturer": "Sonos",
}
@property
@soco_coordinator
def state(self):
"""Return the state of the entity."""
if self._status in (
"PAUSED_PLAYBACK",
"STOPPED",
):
# Sonos can consider itself "paused" but without having media loaded
# (happens if playing Spotify and via Spotify app you pick another device to play on)
if self.media_title is None:
return STATE_IDLE
return STATE_PAUSED
if self._status in ("PLAYING", "TRANSITIONING"):
return STATE_PLAYING
return STATE_IDLE
@property
def is_coordinator(self):
"""Return true if player is a coordinator."""
return self._coordinator is None
@property
def soco(self):
"""Return soco object."""
return self._player
@property
def coordinator(self):
"""Return coordinator of this player."""
return self._coordinator
async def async_seen(self, player):
"""Record that this player was seen right now."""
was_available = self.available
self._player = player
if self._seen_timer:
self._seen_timer()
self._seen_timer = self.hass.helpers.event.async_call_later(
2.5 * DISCOVERY_INTERVAL, self.async_unseen
)
if was_available:
return
self._poll_timer = self.hass.helpers.event.async_track_time_interval(
self.update, datetime.timedelta(seconds=SCAN_INTERVAL)
)
done = await self.hass.async_add_executor_job(self._attach_player)
if not done:
self._seen_timer()
self.async_unseen()
self.async_write_ha_state()
@callback
def async_unseen(self, now=None):
"""Make this player unavailable when it was not seen recently."""
self._seen_timer = None
if self._poll_timer:
self._poll_timer()
self._poll_timer = None
def _unsub(subscriptions):
for subscription in subscriptions:
subscription.unsubscribe()
self.hass.async_add_executor_job(_unsub, self._subscriptions)
self._subscriptions = []
self.async_write_ha_state()
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._seen_timer is not None
def _clear_media_position(self):
"""Clear the media_position."""
self._media_position = None
self._media_position_updated_at = None
def _set_favorites(self):
"""Set available favorites."""
self._favorites = []
for fav in self.soco.music_library.get_sonos_favorites():
try:
# Exclude non-playable favorites with no linked resources
if fav.reference.resources:
self._favorites.append(fav)
except SoCoException as ex:
# Skip unknown types
_LOGGER.error("Unhandled favorite '%s': %s", fav.title, ex)
def _attach_player(self):
"""Get basic information and add event subscriptions."""
try:
self._play_mode = self.soco.play_mode
self.update_volume()
self._set_favorites()
player = self.soco
def subscribe(sonos_service, action):
"""Add a subscription to a pysonos service."""
queue = _ProcessSonosEventQueue(action)
sub = sonos_service.subscribe(auto_renew=True, event_queue=queue)
self._subscriptions.append(sub)
subscribe(player.avTransport, self.update_media)
subscribe(player.renderingControl, self.update_volume)
subscribe(player.zoneGroupTopology, self.update_groups)
subscribe(player.contentDirectory, self.update_content)
return True
except SoCoException as ex:
_LOGGER.warning("Could not connect %s: %s", self.entity_id, ex)
return False
@property
def should_poll(self):
"""Return that we should not be polled (we handle that internally)."""
return False
def update(self, now=None):
"""Retrieve latest state."""
try:
self.update_groups()
self.update_volume()
if self.is_coordinator:
self.update_media()
except SoCoException:
pass
def update_media(self, event=None):
"""Update information about currently playing media."""
variables = event and event.variables
if variables:
new_status = variables["transport_state"]
else:
transport_info = self.soco.get_current_transport_info()
new_status = transport_info["current_transport_state"]
# Ignore transitions, we should get the target state soon
if new_status == "TRANSITIONING":
return
self._play_mode = event.current_play_mode if event else self.soco.play_mode
self._uri = None
self._media_duration = None
self._media_image_url = None
self._media_channel = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
self._queue_position = None
self._source_name = None
update_position = new_status != self._status
self._status = new_status
track_uri = variables["current_track_uri"] if variables else None
whats_playing = self.soco.whats_playing(track_uri)
if whats_playing == PLAYING_TV:
self.update_media_linein(SOURCE_TV)
elif whats_playing == PLAYING_LINE_IN:
self.update_media_linein(SOURCE_LINEIN)
else:
track_info = self.soco.get_current_track_info()
if not track_info["uri"]:
self._clear_media_position()
else:
self._uri = track_info["uri"]
self._media_artist = track_info.get("artist")
self._media_album_name = track_info.get("album")
self._media_title = track_info.get("title")
if whats_playing == PLAYING_RADIO:
self.update_media_radio(variables, track_info)
else:
self.update_media_music(update_position, track_info)
self.schedule_update_ha_state()
# Also update slaves
for entity in self.hass.data[DATA_SONOS].entities:
coordinator = entity.coordinator
if coordinator and coordinator.unique_id == self.unique_id:
entity.schedule_update_ha_state()
def update_media_linein(self, source):
"""Update state when playing from line-in/tv."""
self._clear_media_position()
self._media_title = source
self._source_name = source
def update_media_radio(self, variables, track_info):
"""Update state when streaming radio."""
self._clear_media_position()
try:
album_art_uri = variables["current_track_meta_data"].album_art_uri
self._media_image_url = self._media_library.build_album_art_full_uri(
album_art_uri
)
except (TypeError, KeyError, AttributeError):
pass
# Non-playing radios will not have a current title. Radios without tagging
# can have part of the radio URI as title. In these cases we try to use the
# radio name instead.
try:
uri_meta_data = variables["enqueued_transport_uri_meta_data"]
if isinstance(
uri_meta_data, pysonos.data_structures.DidlAudioBroadcast
) and (
self.state != STATE_PLAYING
or self.soco.is_radio_uri(self._media_title)
or self._media_title in self._uri
):
self._media_title = uri_meta_data.title
except (TypeError, KeyError, AttributeError):
pass
media_info = self.soco.get_current_media_info()
self._media_channel = media_info["channel"]
# Check if currently playing radio station is in favorites
for fav in self._favorites:
if fav.reference.get_uri() == media_info["uri"]:
self._source_name = fav.title
def update_media_music(self, update_media_position, track_info):
"""Update state when playing music tracks."""
self._media_duration = _timespan_secs(track_info.get("duration"))
current_position = _timespan_secs(track_info.get("position"))
# player started reporting position?
if current_position is not None and self._media_position is None:
update_media_position = True
# position jumped?
if current_position is not None and self._media_position is not None:
if self.state == STATE_PLAYING:
time_diff = utcnow() - self._media_position_updated_at
time_diff = time_diff.total_seconds()
else:
time_diff = 0
calculated_position = self._media_position + time_diff
if abs(calculated_position - current_position) > 1.5:
update_media_position = True
if current_position is None:
self._clear_media_position()
elif update_media_position:
self._media_position = current_position
self._media_position_updated_at = utcnow()
self._media_image_url = track_info.get("album_art")
playlist_position = int(track_info.get("playlist_position"))
if playlist_position > 0:
self._queue_position = playlist_position - 1
def update_volume(self, event=None):
"""Update information about currently volume settings."""
if event:
variables = event.variables
if "volume" in variables:
self._player_volume = int(variables["volume"]["Master"])
if "mute" in variables:
self._player_muted = variables["mute"]["Master"] == "1"
if "night_mode" in variables:
self._night_sound = variables["night_mode"] == "1"
if "dialog_level" in variables:
self._speech_enhance = variables["dialog_level"] == "1"
self.schedule_update_ha_state()
else:
self._player_volume = self.soco.volume
self._player_muted = self.soco.mute
self._night_sound = self.soco.night_mode
self._speech_enhance = self.soco.dialog_mode
def update_groups(self, event=None):
"""Handle callback for topology change event."""
def _get_soco_group():
"""Ask SoCo cache for existing topology."""
coordinator_uid = self.unique_id
slave_uids = []
try:
if self.soco.group and self.soco.group.coordinator:
coordinator_uid = self.soco.group.coordinator.uid
slave_uids = [
p.uid
for p in self.soco.group.members
if p.uid != coordinator_uid
]
except SoCoException:
pass
return [coordinator_uid] + slave_uids
async def _async_extract_group(event):
"""Extract group layout from a topology event."""
group = event and event.zone_player_uui_ds_in_group
if group:
return group.split(",")
return await self.hass.async_add_executor_job(_get_soco_group)
@callback
def _async_regroup(group):
"""Rebuild internal group layout."""
sonos_group = []
for uid in group:
entity = _get_entity_from_soco_uid(self.hass, uid)
if entity:
sonos_group.append(entity)
self._coordinator = None
self._sonos_group = sonos_group
self.async_write_ha_state()
for slave_uid in group[1:]:
slave = _get_entity_from_soco_uid(self.hass, slave_uid)
if slave:
# pylint: disable=protected-access
slave._coordinator = self
slave._sonos_group = sonos_group
slave.async_schedule_update_ha_state()
async def _async_handle_group_event(event):
"""Get async lock and handle event."""
if event and self._poll_timer:
# Cancel poll timer since we do receive events
self._poll_timer()
self._poll_timer = None
async with self.hass.data[DATA_SONOS].topology_condition:
group = await _async_extract_group(event)
if self.unique_id == group[0]:
_async_regroup(group)
self.hass.data[DATA_SONOS].topology_condition.notify_all()
if event and not hasattr(event, "zone_player_uui_ds_in_group"):
return
self.hass.add_job(_async_handle_group_event(event))
def update_content(self, event=None):
"""Update information about available content."""
if event and "favorites_update_id" in event.variables:
self._set_favorites()
self.schedule_update_ha_state()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._player_volume is None:
return None
return self._player_volume / 100
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
return self._player_muted
@property
@soco_coordinator
def shuffle(self):
"""Shuffling state."""
return PLAY_MODES[self._play_mode][0]
@property
@soco_coordinator
def repeat(self):
"""Return current repeat mode."""
sonos_repeat = PLAY_MODES[self._play_mode][1]
return SONOS_TO_REPEAT[sonos_repeat]
@property
@soco_coordinator
def media_content_id(self):
"""Content id of current playing media."""
return self._uri
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
@soco_coordinator
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._media_duration
@property
@soco_coordinator
def media_position(self):
"""Position of current playing media in seconds."""
return self._media_position
@property
@soco_coordinator
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._media_position_updated_at
@property
@soco_coordinator
def media_image_url(self):
"""Image url of current playing media."""
return self._media_image_url or None
@property
@soco_coordinator
def media_channel(self):
"""Channel currently playing."""
return self._media_channel or None
@property
@soco_coordinator
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._media_artist or None
@property
@soco_coordinator
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._media_album_name or None
@property
@soco_coordinator
def media_title(self):
"""Title of current playing media."""
return self._media_title or None
@property
@soco_coordinator
def queue_position(self):
"""If playing local queue return the position in the queue else None."""
return self._queue_position
@property
@soco_coordinator
def source(self):
"""Name of the current input source."""
return self._source_name or None
@property
@soco_coordinator
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SONOS
@soco_error()
def volume_up(self):
"""Volume up media player."""
self._player.volume += self._volume_increment
@soco_error()
def volume_down(self):
"""Volume down media player."""
self._player.volume -= self._volume_increment
@soco_error()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.soco.volume = str(int(volume * 100))
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def set_shuffle(self, shuffle):
"""Enable/Disable shuffle mode."""
sonos_shuffle = shuffle
sonos_repeat = PLAY_MODES[self._play_mode][1]
self.soco.play_mode = PLAY_MODE_BY_MEANING[(sonos_shuffle, sonos_repeat)]
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def set_repeat(self, repeat):
"""Set repeat mode."""
sonos_shuffle = PLAY_MODES[self._play_mode][0]
sonos_repeat = REPEAT_TO_SONOS[repeat]
self.soco.play_mode = PLAY_MODE_BY_MEANING[(sonos_shuffle, sonos_repeat)]
@soco_error()
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self.soco.mute = mute
@soco_error()
@soco_coordinator
def select_source(self, source):
"""Select input source."""
if source == SOURCE_LINEIN:
self.soco.switch_to_line_in()
elif source == SOURCE_TV:
self.soco.switch_to_tv()
else:
fav = [fav for fav in self._favorites if fav.title == source]
if len(fav) == 1:
src = fav.pop()
uri = src.reference.get_uri()
if self.soco.is_radio_uri(uri):
self.soco.play_uri(uri, title=source)
else:
self.soco.clear_queue()
self.soco.add_to_queue(src.reference)
self.soco.play_from_queue(0)
@property
@soco_coordinator
def source_list(self):
"""List of available input sources."""
sources = [fav.title for fav in self._favorites]
model = self._model.upper()
if "PLAY:5" in model or "CONNECT" in model:
sources += [SOURCE_LINEIN]
elif "PLAYBAR" in model:
sources += [SOURCE_LINEIN, SOURCE_TV]
elif "BEAM" in model or "PLAYBASE" in model:
sources += [SOURCE_TV]
return sources
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_play(self):
"""Send play command."""
self.soco.play()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_stop(self):
"""Send stop command."""
self.soco.stop()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_pause(self):
"""Send pause command."""
self.soco.pause()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_next_track(self):
"""Send next track command."""
self.soco.next()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_previous_track(self):
"""Send next track command."""
self.soco.previous()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_seek(self, position):
"""Send seek command."""
self.soco.seek(str(datetime.timedelta(seconds=int(position))))
@soco_error()
@soco_coordinator
def clear_playlist(self):
"""Clear players playlist."""
self.soco.clear_queue()
@soco_error()
@soco_coordinator
def play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If media_type is "playlist", media_id should be a Sonos
Playlist name. Otherwise, media_id should be a URI.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the queue.
"""
if media_type in (MEDIA_TYPE_MUSIC, MEDIA_TYPE_TRACK):
if kwargs.get(ATTR_MEDIA_ENQUEUE):
try:
if self.soco.is_spotify_uri(media_id):
self.soco.add_spotify_uri_to_queue(media_id)
else:
self.soco.add_uri_to_queue(media_id)
except SoCoUPnPException:
_LOGGER.error(
'Error parsing media uri "%s", '
"please check it's a valid media resource "
"supported by Sonos",
media_id,
)
else:
if self.soco.is_spotify_uri(media_id):
self.soco.clear_queue()
self.soco.add_spotify_uri_to_queue(media_id)
self.soco.play_from_queue(0)
else:
self.soco.play_uri(media_id)
elif media_type == MEDIA_TYPE_PLAYLIST:
if media_id.startswith("S:"):
item = get_media(self._media_library, media_id, media_type)
self.soco.play_uri(item.get_uri())
return
try:
playlists = self.soco.get_sonos_playlists()
playlist = next(p for p in playlists if p.title == media_id)
self.soco.clear_queue()
self.soco.add_to_queue(playlist)
self.soco.play_from_queue(0)
except StopIteration:
_LOGGER.error('Could not find a Sonos playlist named "%s"', media_id)
elif media_type in PLAYABLE_MEDIA_TYPES:
item = get_media(self._media_library, media_id, media_type)
if not item:
_LOGGER.error('Could not find "%s" in the library', media_id)
return
self.soco.play_uri(item.get_uri())
else:
_LOGGER.error('Sonos does not support a media type of "%s"', media_type)
@soco_error()
def join(self, slaves):
"""Form a group with other players."""
if self._coordinator:
self.unjoin()
group = [self]
else:
group = self._sonos_group.copy()
for slave in slaves:
if slave.unique_id != self.unique_id:
slave.soco.join(self.soco)
# pylint: disable=protected-access
slave._coordinator = self
if slave not in group:
group.append(slave)
return group
@staticmethod
async def join_multi(hass, master, entities):
"""Form a group with other players."""
async with hass.data[DATA_SONOS].topology_condition:
group = await hass.async_add_executor_job(master.join, entities)
await SonosEntity.wait_for_groups(hass, [group])
@soco_error()
def unjoin(self):
"""Unjoin the player from a group."""
self.soco.unjoin()
self._coordinator = None
@staticmethod
async def unjoin_multi(hass, entities):
"""Unjoin several players from their group."""
def _unjoin_all(entities):
"""Sync helper."""
# Unjoin slaves first to prevent inheritance of queues
coordinators = [e for e in entities if e.is_coordinator]
slaves = [e for e in entities if not e.is_coordinator]
for entity in slaves + coordinators:
entity.unjoin()
async with hass.data[DATA_SONOS].topology_condition:
await hass.async_add_executor_job(_unjoin_all, entities)
await SonosEntity.wait_for_groups(hass, [[e] for e in entities])
@soco_error()
def snapshot(self, with_group):
"""Snapshot the state of a player."""
self._soco_snapshot = pysonos.snapshot.Snapshot(self.soco)
self._soco_snapshot.snapshot()
if with_group:
self._snapshot_group = self._sonos_group.copy()
else:
self._snapshot_group = None
@staticmethod
async def snapshot_multi(hass, entities, with_group):
"""Snapshot all the entities and optionally their groups."""
# pylint: disable=protected-access
def _snapshot_all(entities):
"""Sync helper."""
for entity in entities:
entity.snapshot(with_group)
# Find all affected players
entities = set(entities)
if with_group:
for entity in list(entities):
entities.update(entity._sonos_group)
async with hass.data[DATA_SONOS].topology_condition:
await hass.async_add_executor_job(_snapshot_all, entities)
@soco_error()
def restore(self):
"""Restore a snapshotted state to a player."""
try:
self._soco_snapshot.restore()
except (TypeError, AttributeError, SoCoException) as ex:
# Can happen if restoring a coordinator onto a current slave
_LOGGER.warning("Error on restore %s: %s", self.entity_id, ex)
self._soco_snapshot = None
self._snapshot_group = None
@staticmethod
async def restore_multi(hass, entities, with_group):
"""Restore snapshots for all the entities."""
# pylint: disable=protected-access
def _restore_groups(entities, with_group):
"""Pause all current coordinators and restore groups."""
for entity in (e for e in entities if e.is_coordinator):
if entity.state == STATE_PLAYING:
entity.media_pause()
groups = []
if with_group:
# Unjoin slaves first to prevent inheritance of queues
for entity in [e for e in entities if not e.is_coordinator]:
if entity._snapshot_group != entity._sonos_group:
entity.unjoin()
# Bring back the original group topology
for entity in (e for e in entities if e._snapshot_group):
if entity._snapshot_group[0] == entity:
entity.join(entity._snapshot_group)
groups.append(entity._snapshot_group.copy())
return groups
def _restore_players(entities):
"""Restore state of all players."""
for entity in (e for e in entities if not e.is_coordinator):
entity.restore()
for entity in (e for e in entities if e.is_coordinator):
entity.restore()
# Find all affected players
entities = {e for e in entities if e._soco_snapshot}
if with_group:
for entity in [e for e in entities if e._snapshot_group]:
entities.update(entity._snapshot_group)
async with hass.data[DATA_SONOS].topology_condition:
groups = await hass.async_add_executor_job(
_restore_groups, entities, with_group
)
await SonosEntity.wait_for_groups(hass, groups)
await hass.async_add_executor_job(_restore_players, entities)
@staticmethod
async def wait_for_groups(hass, groups):
"""Wait until all groups are present, or timeout."""
# pylint: disable=protected-access
def _test_groups(groups):
"""Return whether all groups exist now."""
for group in groups:
coordinator = group[0]
# Test that coordinator is coordinating
current_group = coordinator._sonos_group
if coordinator != current_group[0]:
return False
# Test that slaves match
if set(group[1:]) != set(current_group[1:]):
return False
return True
try:
with async_timeout.timeout(5):
while not _test_groups(groups):
await hass.data[DATA_SONOS].topology_condition.wait()
except asyncio.TimeoutError:
_LOGGER.warning("Timeout waiting for target groups %s", groups)
for entity in hass.data[DATA_SONOS].entities:
entity.soco._zgs_cache.clear()
@soco_error()
@soco_coordinator
def set_sleep_timer(self, sleep_time):
"""Set the timer on the player."""
self.soco.set_sleep_timer(sleep_time)
@soco_error()
@soco_coordinator
def clear_sleep_timer(self):
"""Clear the timer on the player."""
self.soco.set_sleep_timer(None)
@soco_error()
@soco_coordinator
def set_alarm(
self, alarm_id, time=None, volume=None, enabled=None, include_linked_zones=None
):
"""Set the alarm clock on the player."""
alarm = None
for one_alarm in alarms.get_alarms(self.soco):
# pylint: disable=protected-access
if one_alarm._alarm_id == str(alarm_id):
alarm = one_alarm
if alarm is None:
_LOGGER.warning("did not find alarm with id %s", alarm_id)
return
if time is not None:
alarm.start_time = time
if volume is not None:
alarm.volume = int(volume * 100)
if enabled is not None:
alarm.enabled = enabled
if include_linked_zones is not None:
alarm.include_linked_zones = include_linked_zones
alarm.save()
@soco_error()
def set_option(self, night_sound=None, speech_enhance=None, status_light=None):
"""Modify playback options."""
if night_sound is not None and self._night_sound is not None:
self.soco.night_mode = night_sound
if speech_enhance is not None and self._speech_enhance is not None:
self.soco.dialog_mode = speech_enhance
if status_light is not None:
self.soco.status_light = status_light
@soco_error()
def play_queue(self, queue_position=0):
"""Start playing the queue."""
self.soco.play_from_queue(queue_position)
@soco_error()
@soco_coordinator
def remove_from_queue(self, queue_position=0):
"""Remove item from the queue."""
self.soco.remove_from_queue(queue_position)
@property
def device_state_attributes(self):
"""Return entity specific state attributes."""
attributes = {ATTR_SONOS_GROUP: [e.entity_id for e in self._sonos_group]}
if self._night_sound is not None:
attributes[ATTR_NIGHT_SOUND] = self._night_sound
if self._speech_enhance is not None:
attributes[ATTR_SPEECH_ENHANCE] = self._speech_enhance
if self.queue_position is not None:
attributes[ATTR_QUEUE_POSITION] = self.queue_position
return attributes
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
if media_content_type in [None, "library"]:
return await self.hass.async_add_executor_job(
library_payload, self._media_library
)
payload = {
"search_type": media_content_type,
"idstring": media_content_id,
}
response = await self.hass.async_add_executor_job(
build_item_response, self._media_library, payload
)
if response is None:
raise BrowseError(
f"Media not found: {media_content_type} / {media_content_id}"
)
return response
def build_item_response(media_library, payload):
"""Create response payload for the provided media query."""
if payload["search_type"] == MEDIA_TYPE_ALBUM and payload["idstring"].startswith(
("A:GENRE", "A:COMPOSER")
):
payload["idstring"] = "A:ALBUMARTIST/" + "/".join(
payload["idstring"].split("/")[2:]
)
media = media_library.browse_by_idstring(
MEDIA_TYPES_TO_SONOS[payload["search_type"]],
payload["idstring"],
full_album_art_uri=True,
max_items=0,
)
if media is None:
return
thumbnail = None
title = None
# Fetch album info for titles and thumbnails
# Can't be extracted from track info
if (
payload["search_type"] == MEDIA_TYPE_ALBUM
and media[0].item_class == "object.item.audioItem.musicTrack"
):
item = get_media(media_library, payload["idstring"], SONOS_ALBUM_ARTIST)
title = getattr(item, "title", None)
thumbnail = getattr(item, "album_art_uri", media[0].album_art_uri)
if not title:
try:
title = urllib.parse.unquote(payload["idstring"].split("/")[1])
except IndexError:
title = LIBRARY_TITLES_MAPPING[payload["idstring"]]
try:
media_class = SONOS_TO_MEDIA_CLASSES[
MEDIA_TYPES_TO_SONOS[payload["search_type"]]
]
except KeyError:
_LOGGER.debug("Unknown media type received %s", payload["search_type"])
return None
children = []
for item in media:
try:
children.append(item_payload(item))
except UnknownMediaType:
pass
return BrowseMedia(
title=title,
thumbnail=thumbnail,
media_class=media_class,
media_content_id=payload["idstring"],
media_content_type=payload["search_type"],
children=children,
can_play=can_play(payload["search_type"]),
can_expand=can_expand(payload["search_type"]),
)
def item_payload(item):
"""
Create response payload for a single media item.
Used by async_browse_media.
"""
media_type = get_media_type(item)
try:
media_class = SONOS_TO_MEDIA_CLASSES[media_type]
except KeyError as err:
_LOGGER.debug("Unknown media type received %s", media_type)
raise UnknownMediaType from err
return BrowseMedia(
title=item.title,
thumbnail=getattr(item, "album_art_uri", None),
media_class=media_class,
media_content_id=get_content_id(item),
media_content_type=SONOS_TO_MEDIA_TYPES[media_type],
can_play=can_play(item.item_class),
can_expand=can_expand(item),
)
def library_payload(media_library):
"""
Create response payload to describe contents of a specific library.
Used by async_browse_media.
"""
if not media_library.browse_by_idstring(
"tracks",
"",
max_items=1,
):
raise BrowseError("Local library not found")
children = []
for item in media_library.browse():
try:
children.append(item_payload(item))
except UnknownMediaType:
pass
return BrowseMedia(
title="Music Library",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="library",
media_content_type="library",
can_play=False,
can_expand=True,
children=children,
)
def get_media_type(item):
"""Extract media type of item."""
if item.item_class == "object.item.audioItem.musicTrack":
return SONOS_TRACKS
if (
item.item_class == "object.container.album.musicAlbum"
and SONOS_TYPES_MAPPING.get(item.item_id.split("/")[0])
in [
SONOS_ALBUM_ARTIST,
SONOS_GENRE,
]
):
return SONOS_TYPES_MAPPING[item.item_class]
return SONOS_TYPES_MAPPING.get(item.item_id.split("/")[0], item.item_class)
def can_play(item):
"""
Test if playable.
Used by async_browse_media.
"""
return SONOS_TO_MEDIA_TYPES.get(item) in PLAYABLE_MEDIA_TYPES
def can_expand(item):
"""
Test if expandable.
Used by async_browse_media.
"""
if isinstance(item, str):
return SONOS_TYPES_MAPPING.get(item) in EXPANDABLE_MEDIA_TYPES
if SONOS_TO_MEDIA_TYPES.get(item.item_class) in EXPANDABLE_MEDIA_TYPES:
return True
return SONOS_TYPES_MAPPING.get(item.item_id) in EXPANDABLE_MEDIA_TYPES
def get_content_id(item):
"""Extract content id or uri."""
if item.item_class == "object.item.audioItem.musicTrack":
return item.get_uri()
return item.item_id
def get_media(media_library, item_id, search_type):
"""Fetch media/album."""
search_type = MEDIA_TYPES_TO_SONOS.get(search_type, search_type)
if not item_id.startswith("A:ALBUM") and search_type == SONOS_ALBUM:
item_id = "A:ALBUMARTIST/" + "/".join(item_id.split("/")[2:])
for item in media_library.browse_by_idstring(
search_type,
"/".join(item_id.split("/")[:-1]),
full_album_art_uri=True,
max_items=0,
):
if item.item_id == item_id:
return item
| apache-2.0 | 3,008,322,014,613,312,000 | 31.675948 | 97 | 0.593977 | false |
MediffRobotics/DeepRobotics | Experiments/ImitationDetection/retrain.py | 8 | 42455 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple transfer learning with an Inception v3 architecture model which
displays summaries in TensorBoard.
This example shows how to take a Inception v3 architecture model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
The top layer receives as input a 2048-dimensional vector for each image. We
train a softmax layer on top of this representation. Assuming the softmax layer
contains N labels, this corresponds to learning N + 2048*N model parameters
corresponding to the learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
bazel build third_party/tensorflow/examples/image_retraining:retrain && \
bazel-bin/third_party/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the label_image sample code.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import hashlib
import os.path
import random
import re
import struct
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
FLAGS = None
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
BOTTLENECK_TENSOR_SIZE = 2048
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
result = {}
sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
print('No files found')
continue
if len(file_list) < 20:
print('WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
print('WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '.txt'
def create_inception_graph():
""""Creates a graph from saved GraphDef file and returns a Graph object.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Session() as sess:
model_filename = os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
bottleneck_values = sess.run(
bottleneck_tensor,
{image_data_tensor: image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract():
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
"""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL,
filepath,
_progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def write_list_of_floats_to_file(list_of_floats , file_path):
"""Writes a given list of floats to a binary file.
Args:
list_of_floats: List of floats we want to write to a file.
file_path: Path to a file where list of floats will be stored.
"""
s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)
with open(file_path, 'wb') as f:
f.write(s)
def read_list_of_floats_from_file(file_path):
"""Reads list of floats from a given file.
Args:
file_path: Path to a file where list of floats was stored.
Returns:
Array of bottleneck values (list of floats).
"""
with open(file_path, 'rb') as f:
s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())
return list(s)
bottleneck_path_2_bottleneck_values = {}
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
bottleneck_tensor: The output tensor for the bottleneck values.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category)
if not os.path.exists(bottleneck_path):
print('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
bottleneck_values = run_bottleneck_on_image(sess, image_data,
jpeg_data_tensor,
bottleneck_tensor)
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
bottleneck_tensor: The penultimate output layer of the graph.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(sess, image_lists, label_name, index,
image_dir, category, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
print(str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck = run_bottleneck_on_image(sess, distorted_image_data,
resized_input_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH)
precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,
MODEL_INPUT_DEPTH])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
layer_weights = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001), name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(
cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def main(_):
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
# Set up the pre-trained graph.
maybe_download_and_extract()
graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (
create_inception_graph())
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
print('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
print('Only one valid folder of images found at ' + FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
sess = tf.Session()
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
distorted_jpeg_data_tensor, distorted_image_tensor = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(len(image_lists.keys()),
FLAGS.final_tensor_name,
bottleneck_tensor)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every time
# with distortions applied, or from the cache stored on disk.
if do_distort_images:
train_bottlenecks, train_ground_truth = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
train_bottlenecks, train_ground_truth, _ = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run([merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,
train_accuracy * 100))
print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,
cross_entropy_value))
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
print('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size,
'testing', FLAGS.bottleneck_dir,
FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
print('Final test accuracy = %.1f%% (N=%d)' % (
test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
print('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
print('%70s %s' % (test_filename, image_lists.keys()[predictions[i]]))
# Write out the trained graph and labels with the weights stored as constants.
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='/tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--output_labels',
type=str,
default='/tmp/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=4000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='/tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| gpl-3.0 | -3,726,489,025,901,537,000 | 39.356464 | 129 | 0.665293 | false |
iguzu/gae-django | tests/modeltests/ordering/models.py | 17 | 3537 | """
6. Specifying ordering
Specify default ordering for a model using the ``ordering`` attribute, which
should be a list or tuple of field names. This tells Django how to order
``QuerySet`` results.
If a field name in ``ordering`` starts with a hyphen, that field will be
ordered in descending order. Otherwise, it'll be ordered in ascending order.
The special-case field name ``"?"`` specifies random order.
The ordering attribute is not required. If you leave it off, ordering will be
undefined -- not random, just undefined.
"""
from django.db import models
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __unicode__(self):
return self.headline
__test__ = {'API_TESTS':"""
# Create a couple of Articles.
>>> from datetime import datetime
>>> a1 = Article(headline='Article 1', pub_date=datetime(2005, 7, 26))
>>> a1.save()
>>> a2 = Article(headline='Article 2', pub_date=datetime(2005, 7, 27))
>>> a2.save()
>>> a3 = Article(headline='Article 3', pub_date=datetime(2005, 7, 27))
>>> a3.save()
>>> a4 = Article(headline='Article 4', pub_date=datetime(2005, 7, 28))
>>> a4.save()
# By default, Article.objects.all() orders by pub_date descending, then
# headline ascending.
>>> Article.objects.all()
[<Article: Article 4>, <Article: Article 2>, <Article: Article 3>, <Article: Article 1>]
# Override ordering with order_by, which is in the same format as the ordering
# attribute in models.
>>> Article.objects.order_by('headline')
[<Article: Article 1>, <Article: Article 2>, <Article: Article 3>, <Article: Article 4>]
>>> Article.objects.order_by('pub_date', '-headline')
[<Article: Article 1>, <Article: Article 3>, <Article: Article 2>, <Article: Article 4>]
# Only the last order_by has any effect (since they each override any previous
# ordering).
>>> Article.objects.order_by('id')
[<Article: Article 1>, <Article: Article 2>, <Article: Article 3>, <Article: Article 4>]
>>> Article.objects.order_by('id').order_by('-headline')
[<Article: Article 4>, <Article: Article 3>, <Article: Article 2>, <Article: Article 1>]
# Use the 'stop' part of slicing notation to limit the results.
>>> Article.objects.order_by('headline')[:2]
[<Article: Article 1>, <Article: Article 2>]
# Use the 'stop' and 'start' parts of slicing notation to offset the result list.
>>> Article.objects.order_by('headline')[1:3]
[<Article: Article 2>, <Article: Article 3>]
# Getting a single item should work too:
>>> Article.objects.all()[0]
<Article: Article 4>
# Use '?' to order randomly. (We're using [...] in the output to indicate we
# don't know what order the output will be in.
>>> Article.objects.order_by('?')
[...]
# Ordering can be reversed using the reverse() method on a queryset. This
# allows you to extract things like "the last two items" (reverse and then
# take the first two).
>>> Article.objects.all().reverse()[:2]
[<Article: Article 1>, <Article: Article 3>]
# Ordering can be based on fields included from an 'extra' clause
>>> Article.objects.extra(select={'foo': 'pub_date'}, order_by=['foo', 'headline'])
[<Article: Article 1>, <Article: Article 2>, <Article: Article 3>, <Article: Article 4>]
# If the extra clause uses an SQL keyword for a name, it will be protected by quoting.
>>> Article.objects.extra(select={'order': 'pub_date'}, order_by=['order', 'headline'])
[<Article: Article 1>, <Article: Article 2>, <Article: Article 3>, <Article: Article 4>]
"""}
| bsd-3-clause | -2,977,372,771,957,931,500 | 38.741573 | 88 | 0.690698 | false |
Acidburn0zzz/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wpttest.py | 1 | 20423 | import os
import subprocess
import sys
from six.moves.urllib.parse import urljoin
from collections import defaultdict
from six import iteritems, string_types
from .wptmanifest.parser import atoms
atom_reset = atoms["Reset"]
enabled_tests = {"testharness", "reftest", "wdspec", "crashtest"}
class Result(object):
def __init__(self,
status,
message,
expected=None,
extra=None,
stack=None,
known_intermittent=None):
if status not in self.statuses:
raise ValueError("Unrecognised status %s" % status)
self.status = status
self.message = message
self.expected = expected
self.known_intermittent = known_intermittent if known_intermittent is not None else []
self.extra = extra if extra is not None else {}
self.stack = stack
def __repr__(self):
return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.status)
class SubtestResult(object):
def __init__(self, name, status, message, stack=None, expected=None, known_intermittent=None):
self.name = name
if status not in self.statuses:
raise ValueError("Unrecognised status %s" % status)
self.status = status
self.message = message
self.stack = stack
self.expected = expected
self.known_intermittent = known_intermittent if known_intermittent is not None else []
def __repr__(self):
return "<%s.%s %s %s>" % (self.__module__, self.__class__.__name__, self.name, self.status)
class TestharnessResult(Result):
default_expected = "OK"
statuses = {"OK", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH", "PRECONDITION_FAILED"}
class TestharnessSubtestResult(SubtestResult):
default_expected = "PASS"
statuses = {"PASS", "FAIL", "TIMEOUT", "NOTRUN", "PRECONDITION_FAILED"}
class ReftestResult(Result):
default_expected = "PASS"
statuses = {"PASS", "FAIL", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT",
"CRASH"}
class WdspecResult(Result):
default_expected = "OK"
statuses = {"OK", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"}
class WdspecSubtestResult(SubtestResult):
default_expected = "PASS"
statuses = {"PASS", "FAIL", "ERROR"}
class CrashtestResult(Result):
default_expected = "PASS"
statuses = {"PASS", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT",
"CRASH"}
def get_run_info(metadata_root, product, **kwargs):
return RunInfo(metadata_root, product, **kwargs)
class RunInfo(dict):
def __init__(self, metadata_root, product, debug,
browser_version=None,
browser_channel=None,
verify=None,
extras=None,
enable_webrender=False):
import mozinfo
self._update_mozinfo(metadata_root)
self.update(mozinfo.info)
from .update.tree import GitTree
try:
# GitTree.__init__ throws if we are not in a git tree.
rev = GitTree(log_error=False).rev
except (OSError, subprocess.CalledProcessError):
rev = None
if rev:
self["revision"] = rev
self["python_version"] = sys.version_info.major
self["product"] = product
if debug is not None:
self["debug"] = debug
elif "debug" not in self:
# Default to release
self["debug"] = False
if browser_version:
self["browser_version"] = browser_version
if browser_channel:
self["browser_channel"] = browser_channel
self["verify"] = verify
if "wasm" not in self:
self["wasm"] = False
if extras is not None:
self.update(extras)
self["headless"] = extras.get("headless", False)
self["webrender"] = enable_webrender
def _update_mozinfo(self, metadata_root):
"""Add extra build information from a mozinfo.json file in a parent
directory"""
import mozinfo
path = metadata_root
dirs = set()
while path != os.path.expanduser('~'):
if path in dirs:
break
dirs.add(str(path))
path = os.path.split(path)[0]
mozinfo.find_and_update_from_json(*dirs)
def server_protocol(manifest_item):
if hasattr(manifest_item, "h2") and manifest_item.h2:
return "h2"
if hasattr(manifest_item, "https") and manifest_item.https:
return "https"
return "http"
class Test(object):
result_cls = None
subtest_result_cls = None
test_type = None
default_timeout = 10 # seconds
long_timeout = 60 # seconds
def __init__(self, tests_root, url, inherit_metadata, test_metadata,
timeout=None, path=None, protocol="http", quic=False):
self.tests_root = tests_root
self.url = url
self._inherit_metadata = inherit_metadata
self._test_metadata = test_metadata
self.timeout = timeout if timeout is not None else self.default_timeout
self.path = path
self.environment = {"protocol": protocol, "prefs": self.prefs, "quic": quic}
def __eq__(self, other):
if not isinstance(other, Test):
return False
return self.id == other.id
# Python 2 does not have this delegation, while Python 3 does.
def __ne__(self, other):
return not self.__eq__(other)
def update_metadata(self, metadata=None):
if metadata is None:
metadata = {}
return metadata
@classmethod
def from_manifest(cls, manifest_file, manifest_item, inherit_metadata, test_metadata):
timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
return cls(manifest_file.tests_root,
manifest_item.url,
inherit_metadata,
test_metadata,
timeout=timeout,
path=os.path.join(manifest_file.tests_root, manifest_item.path),
protocol=server_protocol(manifest_item))
@property
def id(self):
return self.url
@property
def keys(self):
return tuple()
@property
def abs_path(self):
return os.path.join(self.tests_root, self.path)
def _get_metadata(self, subtest=None):
if self._test_metadata is not None and subtest is not None:
return self._test_metadata.get_subtest(subtest)
else:
return self._test_metadata
def itermeta(self, subtest=None):
if self._test_metadata is not None:
if subtest is not None:
subtest_meta = self._get_metadata(subtest)
if subtest_meta is not None:
yield subtest_meta
yield self._get_metadata()
for metadata in reversed(self._inherit_metadata):
yield metadata
def disabled(self, subtest=None):
for meta in self.itermeta(subtest):
disabled = meta.disabled
if disabled is not None:
return disabled
return None
@property
def restart_after(self):
for meta in self.itermeta(None):
restart_after = meta.restart_after
if restart_after is not None:
return True
return False
@property
def leaks(self):
for meta in self.itermeta(None):
leaks = meta.leaks
if leaks is not None:
return leaks
return False
@property
def min_assertion_count(self):
for meta in self.itermeta(None):
count = meta.min_assertion_count
if count is not None:
return count
return 0
@property
def max_assertion_count(self):
for meta in self.itermeta(None):
count = meta.max_assertion_count
if count is not None:
return count
return 0
@property
def lsan_disabled(self):
for meta in self.itermeta():
if meta.lsan_disabled is not None:
return meta.lsan_disabled
return False
@property
def lsan_allowed(self):
lsan_allowed = set()
for meta in self.itermeta():
lsan_allowed |= meta.lsan_allowed
if atom_reset in lsan_allowed:
lsan_allowed.remove(atom_reset)
break
return lsan_allowed
@property
def lsan_max_stack_depth(self):
for meta in self.itermeta(None):
depth = meta.lsan_max_stack_depth
if depth is not None:
return depth
return None
@property
def mozleak_allowed(self):
mozleak_allowed = set()
for meta in self.itermeta():
mozleak_allowed |= meta.leak_allowed
if atom_reset in mozleak_allowed:
mozleak_allowed.remove(atom_reset)
break
return mozleak_allowed
@property
def mozleak_threshold(self):
rv = {}
for meta in self.itermeta(None):
threshold = meta.leak_threshold
for key, value in iteritems(threshold):
if key not in rv:
rv[key] = value
return rv
@property
def tags(self):
tags = set()
for meta in self.itermeta():
meta_tags = meta.tags
tags |= meta_tags
if atom_reset in meta_tags:
tags.remove(atom_reset)
break
tags.add("dir:%s" % self.id.lstrip("/").split("/")[0])
return tags
@property
def prefs(self):
prefs = {}
for meta in reversed(list(self.itermeta())):
meta_prefs = meta.prefs
if atom_reset in meta_prefs:
del meta_prefs[atom_reset]
prefs = {}
prefs.update(meta_prefs)
return prefs
def expected(self, subtest=None):
if subtest is None:
default = self.result_cls.default_expected
else:
default = self.subtest_result_cls.default_expected
metadata = self._get_metadata(subtest)
if metadata is None:
return default
try:
expected = metadata.get("expected")
if isinstance(expected, string_types):
return expected
elif isinstance(expected, list):
return expected[0]
elif expected is None:
return default
except KeyError:
return default
def implementation_status(self):
implementation_status = None
for meta in self.itermeta():
implementation_status = meta.implementation_status
if implementation_status:
return implementation_status
# assuming no specific case, we are implementing it
return "implementing"
def known_intermittent(self, subtest=None):
metadata = self._get_metadata(subtest)
if metadata is None:
return []
try:
expected = metadata.get("expected")
if isinstance(expected, list):
return expected[1:]
return []
except KeyError:
return []
def expect_any_subtest_status(self):
metadata = self._get_metadata()
if metadata is None:
return False
try:
# This key is used by the Blink CI to ignore subtest statuses
metadata.get("blink_expect_any_subtest_status")
return True
except KeyError:
return False
def __repr__(self):
return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.id)
class TestharnessTest(Test):
result_cls = TestharnessResult
subtest_result_cls = TestharnessSubtestResult
test_type = "testharness"
def __init__(self, tests_root, url, inherit_metadata, test_metadata,
timeout=None, path=None, protocol="http", testdriver=False,
jsshell=False, scripts=None, quic=False):
Test.__init__(self, tests_root, url, inherit_metadata, test_metadata, timeout,
path, protocol, quic)
self.testdriver = testdriver
self.jsshell = jsshell
self.scripts = scripts or []
@classmethod
def from_manifest(cls, manifest_file, manifest_item, inherit_metadata, test_metadata):
timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
testdriver = manifest_item.testdriver if hasattr(manifest_item, "testdriver") else False
jsshell = manifest_item.jsshell if hasattr(manifest_item, "jsshell") else False
quic = manifest_item.quic if hasattr(manifest_item, "quic") else False
script_metadata = manifest_item.script_metadata or []
scripts = [v for (k, v) in script_metadata
if k in (b"script", "script")]
return cls(manifest_file.tests_root,
manifest_item.url,
inherit_metadata,
test_metadata,
timeout=timeout,
path=os.path.join(manifest_file.tests_root, manifest_item.path),
protocol=server_protocol(manifest_item),
testdriver=testdriver,
jsshell=jsshell,
scripts=scripts,
quic=quic)
@property
def id(self):
return self.url
class ManualTest(Test):
test_type = "manual"
@property
def id(self):
return self.url
class ReftestTest(Test):
"""A reftest
A reftest should be considered to pass if one of its references matches (see below) *and* the
reference passes if it has any references recursively.
Attributes:
references (List[Tuple[str, str]]): a list of alternate references, where one must match for the test to pass
viewport_size (Optional[Tuple[int, int]]): size of the viewport for this test, if not default
dpi (Optional[int]): dpi to use when rendering this test, if not default
"""
result_cls = ReftestResult
test_type = "reftest"
def __init__(self, tests_root, url, inherit_metadata, test_metadata, references,
timeout=None, path=None, viewport_size=None, dpi=None, fuzzy=None, protocol="http", quic=False):
Test.__init__(self, tests_root, url, inherit_metadata, test_metadata, timeout,
path, protocol, quic)
for _, ref_type in references:
if ref_type not in ("==", "!="):
raise ValueError
self.references = references
self.viewport_size = viewport_size
self.dpi = dpi
self._fuzzy = fuzzy or {}
@classmethod
def from_manifest(cls,
manifest_file,
manifest_test,
inherit_metadata,
test_metadata):
timeout = cls.long_timeout if manifest_test.timeout == "long" else cls.default_timeout
quic = manifest_test.quic if hasattr(manifest_test, "quic") else False
url = manifest_test.url
node = cls(manifest_file.tests_root,
manifest_test.url,
inherit_metadata,
test_metadata,
[],
timeout=timeout,
path=manifest_test.path,
viewport_size=manifest_test.viewport_size,
dpi=manifest_test.dpi,
protocol=server_protocol(manifest_test),
fuzzy=manifest_test.fuzzy,
quic=quic)
refs_by_type = defaultdict(list)
for ref_url, ref_type in manifest_test.references:
refs_by_type[ref_type].append(ref_url)
# Construct a list of all the mismatches, where we end up with mismatch_1 != url !=
# mismatch_2 != url != mismatch_3 etc.
#
# Per the logic documented above, this means that none of the mismatches provided match,
mismatch_walk = None
if refs_by_type["!="]:
mismatch_walk = ReftestTest(manifest_file.tests_root,
refs_by_type["!="][0],
[],
None,
[])
cmp_ref = mismatch_walk
for ref_url in refs_by_type["!="][1:]:
cmp_self = ReftestTest(manifest_file.tests_root,
url,
[],
None,
[])
cmp_ref.references.append((cmp_self, "!="))
cmp_ref = ReftestTest(manifest_file.tests_root,
ref_url,
[],
None,
[])
cmp_self.references.append((cmp_ref, "!="))
if mismatch_walk is None:
mismatch_refs = []
else:
mismatch_refs = [(mismatch_walk, "!=")]
if refs_by_type["=="]:
# For each == ref, add a reference to this node whose tail is the mismatch list.
# Per the logic documented above, this means any one of the matches must pass plus all the mismatches.
for ref_url in refs_by_type["=="]:
ref = ReftestTest(manifest_file.tests_root,
ref_url,
[],
None,
mismatch_refs)
node.references.append((ref, "=="))
else:
# Otherwise, we just add the mismatches directly as we are immediately into the
# mismatch chain with no alternates.
node.references.extend(mismatch_refs)
return node
def update_metadata(self, metadata):
if "url_count" not in metadata:
metadata["url_count"] = defaultdict(int)
for reference, _ in self.references:
# We assume a naive implementation in which a url with multiple
# possible screenshots will need to take both the lhs and rhs screenshots
# for each possible match
metadata["url_count"][(self.environment["protocol"], reference.url)] += 1
reference.update_metadata(metadata)
return metadata
@property
def id(self):
return self.url
@property
def keys(self):
return ("reftype", "refurl")
@property
def fuzzy(self):
return self._fuzzy
@property
def fuzzy_override(self):
values = {}
for meta in reversed(list(self.itermeta(None))):
value = meta.fuzzy
if not value:
continue
if atom_reset in value:
value.remove(atom_reset)
values = {}
for key, data in value:
if isinstance(key, (tuple, list)):
key = list(key)
key[0] = urljoin(self.url, key[0])
key[1] = urljoin(self.url, key[1])
key = tuple(key)
elif key:
# Key is just a relative url to a ref
key = urljoin(self.url, key)
values[key] = data
return values
class WdspecTest(Test):
result_cls = WdspecResult
subtest_result_cls = WdspecSubtestResult
test_type = "wdspec"
default_timeout = 25
long_timeout = 180 # 3 minutes
class CrashTest(Test):
result_cls = CrashtestResult
test_type = "crashtest"
manifest_test_cls = {"reftest": ReftestTest,
"testharness": TestharnessTest,
"manual": ManualTest,
"wdspec": WdspecTest,
"crashtest": CrashTest}
def from_manifest(manifest_file, manifest_test, inherit_metadata, test_metadata):
test_cls = manifest_test_cls[manifest_test.item_type]
return test_cls.from_manifest(manifest_file, manifest_test, inherit_metadata, test_metadata)
| mpl-2.0 | 3,108,549,741,736,874,500 | 32.154221 | 117 | 0.554424 | false |
andmarios/ansible-modules-core | packaging/os/apt.py | 10 | 27073 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Flowroute LLC
# Written by Matthew Williams <[email protected]>
# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: apt
short_description: Manages apt-packages
description:
- Manages I(apt) packages (such as for Debian/Ubuntu).
version_added: "0.0.2"
options:
name:
description:
- A package name, like C(foo), or package specifier with version, like C(foo=1.0). Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported. Note that the apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user. Since we don't have warnings and prompts before installing we disallow this. Use an explicit fnmatch pattern if you want wildcarding)
required: false
default: null
aliases: [ 'pkg', 'package' ]
state:
description:
- Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies are installed.
required: false
default: present
choices: [ "latest", "absent", "present", "build-dep" ]
update_cache:
description:
- Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
required: false
default: no
choices: [ "yes", "no" ]
cache_valid_time:
description:
- If C(update_cache) is specified and the last run is less or equal than I(cache_valid_time) seconds ago, the C(update_cache) gets skipped.
required: false
default: no
purge:
description:
- Will force purging of configuration files if the module state is set to I(absent).
required: false
default: no
choices: [ "yes", "no" ]
default_release:
description:
- Corresponds to the C(-t) option for I(apt) and sets pin priorities
required: false
default: null
install_recommends:
description:
- Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
required: false
default: null
choices: [ "yes", "no" ]
force:
description:
- If C(yes), force installs/removes.
required: false
default: "no"
choices: [ "yes", "no" ]
upgrade:
description:
- 'If yes or safe, performs an aptitude safe-upgrade.'
- 'If full, performs an aptitude full-upgrade.'
- 'If dist, performs an apt-get dist-upgrade.'
- 'Note: This does not upgrade a specific package, use state=latest for that.'
version_added: "1.1"
required: false
default: "no"
choices: [ "no", "yes", "safe", "full", "dist"]
dpkg_options:
description:
- Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"'
- Options should be supplied as comma separated list
required: false
default: 'force-confdef,force-confold'
deb:
description:
- Path to a .deb package on the remote machine.
required: false
version_added: "1.6"
requirements: [ python-apt, aptitude ]
author: "Matthew Williams (@mgwilliams)"
notes:
- Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) require C(aptitude), otherwise
C(apt-get) suffices.
'''
EXAMPLES = '''
# Update repositories cache and install "foo" package
- apt: name=foo update_cache=yes
# Remove "foo" package
- apt: name=foo state=absent
# Install the package "foo"
- apt: name=foo state=present
# Install the version '1.00' of package "foo"
- apt: name=foo=1.00 state=present
# Update the repository cache and update package "nginx" to latest version using default release squeeze-backport
- apt: name=nginx state=latest default_release=squeeze-backports update_cache=yes
# Install latest version of "openjdk-6-jdk" ignoring "install-recommends"
- apt: name=openjdk-6-jdk state=latest install_recommends=no
# Update all packages to the latest version
- apt: upgrade=dist
# Run the equivalent of "apt-get update" as a separate step
- apt: update_cache=yes
# Only run "update_cache=yes" if the last one is more than 3600 seconds ago
- apt: update_cache=yes cache_valid_time=3600
# Pass options to dpkg on run
- apt: upgrade=dist update_cache=yes dpkg_options='force-confold,force-confdef'
# Install a .deb package
- apt: deb=/tmp/mypackage.deb
# Install the build dependencies for package "foo"
- apt: pkg=foo state=build-dep
'''
RETURN = '''
cache_updated:
description: if the cache was updated or not
returned: success, in some cases
type: boolean
sample: True
cache_update_time:
description: time of the last cache update (0 if unknown)
returned: success, in some cases
type: datetime
sample: 1425828348000
stdout:
description: output from apt
returned: success, when needed
type: string
sample: "Reading package lists...\nBuilding dependency tree...\nReading state information...\nThe following extra packages will be installed:\n apache2-bin ..."
stderr:
description: error output from apt
returned: success, when needed
type: string
sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..."
'''
import traceback
# added to stave off future warnings about apt api
import warnings
warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning)
import os
import datetime
import fnmatch
import itertools
# APT related constants
APT_ENV_VARS = dict(
DEBIAN_FRONTEND = 'noninteractive',
DEBIAN_PRIORITY = 'critical',
LANG = 'C'
)
DPKG_OPTIONS = 'force-confdef,force-confold'
APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed"
APT_LISTS_PATH = "/var/lib/apt/lists"
APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
HAS_PYTHON_APT = True
try:
import apt
import apt.debfile
import apt_pkg
except ImportError:
HAS_PYTHON_APT = False
def package_split(pkgspec):
parts = pkgspec.split('=', 1)
if len(parts) > 1:
return parts[0], parts[1]
else:
return parts[0], None
def package_versions(pkgname, pkg, pkg_cache):
try:
versions = set(p.version for p in pkg.versions)
except AttributeError:
# assume older version of python-apt is installed
# apt.package.Package#versions require python-apt >= 0.7.9.
pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname)
pkg_versions = (p.VersionList for p in pkg_cache_list)
versions = set(p.VerStr for p in itertools.chain(*pkg_versions))
return versions
def package_version_compare(version, other_version):
try:
return apt_pkg.version_compare(version, other_version)
except AttributeError:
return apt_pkg.VersionCompare(version, other_version)
def package_status(m, pkgname, version, cache, state):
try:
# get the package from the cache, as well as the
# the low-level apt_pkg.Package object which contains
# state fields not directly acccesible from the
# higher-level apt.package.Package object.
pkg = cache[pkgname]
ll_pkg = cache._cache[pkgname] # the low-level package object
except KeyError:
if state == 'install':
try:
provided_packages = cache.get_providing_packages(pkgname)
if provided_packages:
is_installed = False
# when virtual package providing only one package, look up status of target package
if cache.is_virtual_package(pkgname) and len(provided_packages) == 1:
package = provided_packages[0]
installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install')
if installed:
is_installed = True
return is_installed, True, False
m.fail_json(msg="No package matching '%s' is available" % pkgname)
except AttributeError:
# python-apt version too old to detect virtual packages
# mark as upgradable and let apt-get install deal with it
return False, True, False
else:
return False, False, False
try:
has_files = len(pkg.installed_files) > 0
except UnicodeDecodeError:
has_files = True
except AttributeError:
has_files = False # older python-apt cannot be used to determine non-purged
try:
package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED
except AttributeError: # python-apt 0.7.X has very weak low-level object
try:
# might not be necessary as python-apt post-0.7.X should have current_state property
package_is_installed = pkg.is_installed
except AttributeError:
# assume older version of python-apt is installed
package_is_installed = pkg.isInstalled
if version:
versions = package_versions(pkgname, pkg, cache._cache)
avail_upgrades = fnmatch.filter(versions, version)
if package_is_installed:
try:
installed_version = pkg.installed.version
except AttributeError:
installed_version = pkg.installedVersion
# Only claim the package is installed if the version is matched as well
package_is_installed = fnmatch.fnmatch(installed_version, version)
# Only claim the package is upgradable if a candidate matches the version
package_is_upgradable = False
for candidate in avail_upgrades:
if package_version_compare(candidate, installed_version) > 0:
package_is_upgradable = True
break
else:
package_is_upgradable = bool(avail_upgrades)
else:
try:
package_is_upgradable = pkg.is_upgradable
except AttributeError:
# assume older version of python-apt is installed
package_is_upgradable = pkg.isUpgradable
return package_is_installed, package_is_upgradable, has_files
def expand_dpkg_options(dpkg_options_compressed):
options_list = dpkg_options_compressed.split(',')
dpkg_options = ""
for dpkg_option in options_list:
dpkg_options = '%s -o "Dpkg::Options::=--%s"' \
% (dpkg_options, dpkg_option)
return dpkg_options.strip()
def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
# Note: apt-get does implicit regex matching when an exact package name
# match is not found. Something like this:
# matches = [pkg.name for pkg in cache if re.match(pkgspec, pkg.name)]
# (Should also deal with the ':' for multiarch like the fnmatch code below)
#
# We have decided not to do similar implicit regex matching but might take
# a PR to add some sort of explicit regex matching:
# https://github.com/ansible/ansible-modules-core/issues/1258
new_pkgspec = []
for pkgspec_pattern in pkgspec:
pkgname_pattern, version = package_split(pkgspec_pattern)
# note that none of these chars is allowed in a (debian) pkgname
if frozenset('*?[]!').intersection(pkgname_pattern):
# handle multiarch pkgnames, the idea is that "apt*" should
# only select native packages. But "apt*:i386" should still work
if not ":" in pkgname_pattern:
try:
pkg_name_cache = _non_multiarch
except NameError:
pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if not ':' in pkg.name]
else:
try:
pkg_name_cache = _all_pkg_names
except NameError:
pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache]
matches = fnmatch.filter(pkg_name_cache, pkgname_pattern)
if len(matches) == 0:
m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_pattern))
else:
new_pkgspec.extend(matches)
else:
# No wildcards in name
new_pkgspec.append(pkgspec_pattern)
return new_pkgspec
def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=None, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
build_dep=False):
pkg_list = []
packages = ""
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
for package in pkgspec:
name, version = package_split(package)
installed, upgradable, has_files = package_status(m, name, version, cache, state='install')
if build_dep:
# Let apt decide what to install
pkg_list.append("'%s'" % package)
continue
if not installed or (upgrade and upgradable):
pkg_list.append("'%s'" % package)
if installed and upgradable and version:
# This happens when the package is installed, a newer version is
# available, and the version is a wildcard that matches both
#
# We do not apply the upgrade flag because we cannot specify both
# a version and state=latest. (This behaviour mirrors how apt
# treats a version with wildcard in the package)
pkg_list.append("'%s'" % package)
packages = ' '.join(pkg_list)
if len(packages) != 0:
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
for (k,v) in APT_ENV_VARS.iteritems():
os.environ[k] = v
if build_dep:
cmd = "%s -y %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages)
else:
cmd = "%s -y %s %s %s install %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages)
if default_release:
cmd += " -t '%s'" % (default_release,)
if install_recommends is False:
cmd += " -o APT::Install-Recommends=no"
elif install_recommends is True:
cmd += " -o APT::Install-Recommends=yes"
# install_recommends is None uses the OS default
rc, out, err = m.run_command(cmd)
if rc:
return (False, dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err))
else:
return (True, dict(changed=True, stdout=out, stderr=err))
else:
return (True, dict(changed=False))
def install_deb(m, debs, cache, force, install_recommends, dpkg_options):
changed=False
deps_to_install = []
pkgs_to_install = []
for deb_file in debs.split(','):
try:
pkg = apt.debfile.DebPackage(deb_file)
# Check if it's already installed
if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME:
continue
# Check if package is installable
if not pkg.check() and not force:
m.fail_json(msg=pkg._failure_string)
# add any missing deps to the list of deps we need
# to install so they're all done in one shot
deps_to_install.extend(pkg.missing_deps)
except Exception, e:
m.fail_json(msg="Unable to install package: %s" % str(e))
# and add this deb to the list of packages to install
pkgs_to_install.append(deb_file)
# install the deps through apt
retvals = {}
if len(deps_to_install) > 0:
(success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
install_recommends=install_recommends,
dpkg_options=expand_dpkg_options(dpkg_options))
if not success:
m.fail_json(**retvals)
changed = retvals.get('changed', False)
if len(pkgs_to_install) > 0:
options = ' '.join(["--%s"% x for x in dpkg_options.split(",")])
if m.check_mode:
options += " --simulate"
if force:
options += " --force-all"
cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
rc, out, err = m.run_command(cmd)
if "stdout" in retvals:
stdout = retvals["stdout"] + out
else:
stdout = out
if "stderr" in retvals:
stderr = retvals["stderr"] + err
else:
stderr = err
if rc == 0:
m.exit_json(changed=True, stdout=stdout, stderr=stderr)
else:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
else:
m.exit_json(changed=changed, stdout=retvals.get('stdout',''), stderr=retvals.get('stderr',''))
def remove(m, pkgspec, cache, purge=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
pkg_list = []
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
for package in pkgspec:
name, version = package_split(package)
installed, upgradable, has_files = package_status(m, name, version, cache, state='remove')
if installed or (has_files and purge):
pkg_list.append("'%s'" % package)
packages = ' '.join(pkg_list)
if len(packages) == 0:
m.exit_json(changed=False)
else:
if purge:
purge = '--purge'
else:
purge = ''
for (k,v) in APT_ENV_VARS.iteritems():
os.environ[k] = v
cmd = "%s -q -y %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, packages)
if m.check_mode:
m.exit_json(changed=True)
rc, out, err = m.run_command(cmd)
if rc:
m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err)
m.exit_json(changed=True, stdout=out, stderr=err)
def upgrade(m, mode="yes", force=False, default_release=None,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
apt_cmd = None
prompt_regex = None
if mode == "dist":
# apt-get dist-upgrade
apt_cmd = APT_GET_CMD
upgrade_command = "dist-upgrade"
elif mode == "full":
# aptitude full-upgrade
apt_cmd = APTITUDE_CMD
upgrade_command = "full-upgrade"
else:
# aptitude safe-upgrade # mode=yes # default
apt_cmd = APTITUDE_CMD
upgrade_command = "safe-upgrade"
prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])"
if force:
if apt_cmd == APT_GET_CMD:
force_yes = '--force-yes'
else:
force_yes = '--assume-yes --allow-untrusted'
else:
force_yes = ''
apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
for (k,v) in APT_ENV_VARS.iteritems():
os.environ[k] = v
cmd = '%s -y %s %s %s %s' % (apt_cmd_path, dpkg_options,
force_yes, check_arg, upgrade_command)
if default_release:
cmd += " -t '%s'" % (default_release,)
rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex)
if rc:
m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out)
if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
m.exit_json(changed=True, msg=out, stdout=out, stderr=err)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present', 'build-dep']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
cache_valid_time = dict(type='int'),
purge = dict(default=False, type='bool'),
package = dict(default=None, aliases=['pkg', 'name'], type='list'),
deb = dict(default=None),
default_release = dict(default=None, aliases=['default-release']),
install_recommends = dict(default=None, aliases=['install-recommends'], type='bool'),
force = dict(default='no', type='bool'),
upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']),
dpkg_options = dict(default=DPKG_OPTIONS)
),
mutually_exclusive = [['package', 'upgrade', 'deb']],
required_one_of = [['package', 'upgrade', 'update_cache', 'deb']],
supports_check_mode = True
)
if not HAS_PYTHON_APT:
try:
module.run_command('apt-get update && apt-get install python-apt -y -q --force-yes', use_unsafe_shell=True, check_rc=True)
global apt, apt_pkg
import apt
import apt.debfile
import apt_pkg
except ImportError:
module.fail_json(msg="Could not import python modules: apt, apt_pkg. Please install python-apt package.")
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
if p['upgrade'] == 'no':
p['upgrade'] = None
if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]:
module.fail_json(msg="Could not find aptitude. Please ensure it is installed.")
updated_cache = False
updated_cache_time = 0
install_recommends = p['install_recommends']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
# Deal with deprecated aliases
if p['state'] == 'installed':
p['state'] = 'present'
if p['state'] == 'removed':
p['state'] = 'absent'
try:
cache = apt.Cache()
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
if p['update_cache']:
# Default is: always update the cache
cache_valid = False
now = datetime.datetime.now()
if p.get('cache_valid_time', False):
try:
mtime = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime
except:
# Looks like the update-success-stamp is not available
# Fallback: Checking the mtime of the lists
try:
mtime = os.stat(APT_LISTS_PATH).st_mtime
except:
# No mtime could be read. We update the cache to be safe
mtime = False
if mtime:
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
mtimestamp = datetime.datetime.fromtimestamp(mtime)
if mtimestamp + tdelta >= now:
cache_valid = True
updated_cache_time = int(time.mktime(mtimestamp.timetuple()))
if cache_valid is not True:
cache.update()
cache.open(progress=None)
updated_cache = True
updated_cache_time = int(time.mktime(now.timetuple()))
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(changed=False, cache_updated=updated_cache, cache_update_time=updated_cache_time)
else:
updated_cache = False
updated_cache_time = 0
force_yes = p['force']
if p['upgrade']:
upgrade(module, p['upgrade'], force_yes, p['default_release'], dpkg_options)
if p['deb']:
if p['state'] != 'present':
module.fail_json(msg="deb only supports state=present")
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
force=force_yes, dpkg_options=p['dpkg_options'])
packages = p['package']
latest = p['state'] == 'latest'
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if p['state'] in ('latest', 'present', 'build-dep'):
state_upgrade = False
state_builddep = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
result = install(module, packages, cache, upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes, dpkg_options=dpkg_options,
build_dep=state_builddep)
(success, retvals) = result
retvals['cache_updated']=updated_cache
retvals['cache_update_time']=updated_cache_time
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'absent':
remove(module, packages, cache, p['purge'], dpkg_options)
except apt.cache.LockFailedException:
module.fail_json(msg="Failed to lock apt for exclusive operation")
except apt.cache.FetchFailedException:
module.fail_json(msg="Could not fetch updated apt files")
# import module snippets
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
| gpl-3.0 | -3,446,414,351,826,634,000 | 38.010086 | 578 | 0.606582 | false |
opfeifle/SU2 | SU2_PY/SU2/run/geometry.py | 3 | 3841 | #!/usr/bin/env python
## \file geometry.py
# \brief python package for running geometry analyses
# \author T. Lukaczyk, F. Palacios
# \version 5.0.0 "Raven"
#
# SU2 Lead Developers: Dr. Francisco Palacios ([email protected]).
# Dr. Thomas D. Economon ([email protected]).
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
# Prof. Edwin van der Weide's group at the University of Twente.
# Prof. Vincent Terrapon's group at the University of Liege.
#
# Copyright (C) 2012-2017 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy
from .. import io as su2io
from interface import GEO as SU2_GEO
from ..util import ordered_bunch
# ----------------------------------------------------------------------
# Direct Simulation
# ----------------------------------------------------------------------
def geometry ( config , step = 1e-3 ):
""" info = SU2.run.geometry(config)
Runs an geometry analysis with:
SU2.run.decomp()
SU2.run.GEO()
Assumptions:
Performs both function and gradient analysis
Inputs:
config - an SU2 configuration
step - gradient finite difference step if config.GEO_MODE=GRADIENT
Outputs:
info - SU2 State with keys:
FUNCTIONS
GRADIENTS
Updates:
Executes in:
./
"""
# local copy
konfig = copy.deepcopy(config)
# unpack
function_name = konfig['GEO_PARAM']
func_filename = konfig['VALUE_OBJFUNC_FILENAME']
grad_filename = konfig['GRAD_OBJFUNC_FILENAME']
# choose dv values
Definition_DV = konfig['DEFINITION_DV']
n_DV = len(Definition_DV['KIND'])
if isinstance(step,list):
assert len(step) == n_DV , 'unexpected step vector length'
else:
step = [step]*n_DV
dv_old = [0.0]*n_DV # SU2_DOT input requirement, assumes linear superposition of design variables
dv_new = step
konfig.unpack_dvs(dv_new,dv_old)
# Run Solution
SU2_GEO(konfig)
# info out
info = su2io.State()
# get function values
if konfig.GEO_MODE == 'FUNCTION':
functions = su2io.tools.read_plot(func_filename)
for key,value in functions.items():
functions[key] = value[0]
info.FUNCTIONS.update( functions )
# get gradient_values
if konfig.GEO_MODE == 'GRADIENT':
gradients = su2io.tools.read_plot(grad_filename)
info.GRADIENTS.update( gradients )
return info
| lgpl-2.1 | 751,786,724,826,876,000 | 33.294643 | 101 | 0.576152 | false |
trbngr/beets | beetsplug/embedart.py | 9 | 6666 | # This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows beets to embed album art into file metadata."""
import logging
import imghdr
from beets.plugins import BeetsPlugin
from beets import mediafile
from beets import ui
from beets.ui import decargs
from beets.util import syspath, normpath, displayable_path
from beets.util.artresizer import ArtResizer
from beets import config
log = logging.getLogger('beets')
def _embed(path, items, maxwidth=0):
"""Embed an image file, located at `path`, into each item.
"""
if maxwidth:
path = ArtResizer.shared.resize(maxwidth, syspath(path))
data = open(syspath(path), 'rb').read()
kindstr = imghdr.what(None, data)
if kindstr is None:
log.error(u'Could not embed art of unkown type: {0}'.format(
displayable_path(path)
))
return
elif kindstr not in ('jpeg', 'png'):
log.error(u'Image type {0} is not allowed as cover art: {1}'.format(
kindstr, displayable_path(path)
))
return
# Add art to each file.
log.debug('Embedding album art.')
for item in items:
try:
f = mediafile.MediaFile(syspath(item.path))
except mediafile.UnreadableFileError as exc:
log.warn('Could not embed art in {0}: {1}'.format(
displayable_path(item.path), exc
))
continue
f.art = data
f.save(config['id3v23'].get(bool))
class EmbedCoverArtPlugin(BeetsPlugin):
"""Allows albumart to be embedded into the actual files.
"""
def __init__(self):
super(EmbedCoverArtPlugin, self).__init__()
self.config.add({
'maxwidth': 0,
'auto': True,
})
if self.config['maxwidth'].get(int) and \
not ArtResizer.shared.local:
self.config['maxwidth'] = 0
log.warn("embedart: ImageMagick or PIL not found; "
"'maxwidth' option ignored")
def commands(self):
# Embed command.
embed_cmd = ui.Subcommand('embedart',
help='embed image files into file metadata')
embed_cmd.parser.add_option('-f', '--file', metavar='PATH',
help='the image file to embed')
def embed_func(lib, opts, args):
if opts.file:
imagepath = normpath(opts.file)
embed(lib, imagepath, decargs(args))
else:
embed_current(lib, decargs(args))
embed_cmd.func = embed_func
# Extract command.
extract_cmd = ui.Subcommand('extractart',
help='extract an image from file metadata')
extract_cmd.parser.add_option('-o', dest='outpath',
help='image output file')
def extract_func(lib, opts, args):
outpath = normpath(opts.outpath or 'cover')
extract(lib, outpath, decargs(args))
extract_cmd.func = extract_func
# Clear command.
clear_cmd = ui.Subcommand('clearart',
help='remove images from file metadata')
def clear_func(lib, opts, args):
clear(lib, decargs(args))
clear_cmd.func = clear_func
return [embed_cmd, extract_cmd, clear_cmd]
# "embedart" command with --file argument.
def embed(lib, imagepath, query):
albums = lib.albums(query)
for i_album in albums:
album = i_album
break
else:
log.error('No album matches query.')
return
log.info(u'Embedding album art into {0.albumartist} - {0.album}.'.format(
album
))
_embed(imagepath, album.items(),
config['embedart']['maxwidth'].get(int))
# "embedart" command without explicit file.
def embed_current(lib, query):
albums = lib.albums(query)
for album in albums:
if not album.artpath:
log.info(u'No album art present: {0} - {1}'.
format(album.albumartist, album.album))
continue
log.info(u'Embedding album art into {0} - {1}'.
format(album.albumartist, album.album))
_embed(album.artpath, album.items(),
config['embedart']['maxwidth'].get(int))
# "extractart" command.
def extract(lib, outpath, query):
items = lib.items(query)
for i_item in items:
item = i_item
break
else:
log.error('No item matches query.')
return
# Extract the art.
try:
mf = mediafile.MediaFile(syspath(item.path))
except mediafile.UnreadableFileError as exc:
log.error(u'Could not extract art from {0}: {1}'.format(
displayable_path(item.path), exc
))
return
art = mf.art
if not art:
log.error('No album art present in %s - %s.' %
(item.artist, item.title))
return
# Add an extension to the filename.
ext = imghdr.what(None, h=art)
if not ext:
log.error('Unknown image type.')
return
outpath += '.' + ext
log.info(u'Extracting album art from: {0.artist} - {0.title}\n'
u'To: {1}'.format(item, displayable_path(outpath)))
with open(syspath(outpath), 'wb') as f:
f.write(art)
# "clearart" command.
def clear(lib, query):
log.info('Clearing album art from items:')
for item in lib.items(query):
log.info(u'%s - %s' % (item.artist, item.title))
try:
mf = mediafile.MediaFile(syspath(item.path))
except mediafile.UnreadableFileError as exc:
log.error(u'Could not clear art from {0}: {1}'.format(
displayable_path(item.path), exc
))
continue
mf.art = None
mf.save(config['id3v23'].get(bool))
# Automatically embed art into imported albums.
@EmbedCoverArtPlugin.listen('album_imported')
def album_imported(lib, album):
if album.artpath and config['embedart']['auto']:
_embed(album.artpath, album.items(),
config['embedart']['maxwidth'].get(int))
| mit | -4,384,050,846,492,064,000 | 32.837563 | 79 | 0.59721 | false |
vied12/photoautomat-circus | sources/webassets/test.py | 19 | 4687 | """Helpers for testing webassets.
This is included in the webassets package because it is useful for testing
external libraries that use webassets (like the flask-assets wrapper).
"""
from __future__ import print_function
import tempfile
import shutil
import os
from os import path
import time
from webassets import Environment, Bundle
from webassets.six.moves import map
from webassets.six.moves import zip
__all__ = ('TempDirHelper', 'TempEnvironmentHelper',)
class TempDirHelper(object):
"""Base-class for tests which provides a temporary directory
(which is properly deleted after the test is done), and various
helper methods to do filesystem operations within that directory.
"""
default_files = {}
def setup(self):
self._tempdir_created = tempfile.mkdtemp()
self.create_files(self.default_files)
def teardown(self):
shutil.rmtree(self._tempdir_created)
def __enter__(self):
self.setup()
return self
def __exit__(self, type, value, traceback):
self.teardown()
@property
def tempdir(self):
# Use a read-only property here, so the user is
# less likely to modify the attribute, and have
# his data deleted on teardown.
return self._tempdir_created
def create_files(self, files):
"""Helper that allows to quickly create a bunch of files in
the media directory of the current test run.
"""
import codecs
# Allow passing a list of filenames to create empty files
if not hasattr(files, 'items'):
files = dict(map(lambda n: (n, ''), files))
for name, data in files.items():
dirs = path.dirname(self.path(name))
if not path.exists(dirs):
os.makedirs(dirs)
f = codecs.open(self.path(name), 'w', 'utf-8')
f.write(data)
f.close()
def create_directories(self, *dirs):
"""Helper to create directories within the media directory
of the current test's environment.
"""
result = []
for dir in dirs:
full_path = self.path(dir)
result.append(full_path)
os.makedirs(full_path)
return result
def exists(self, name):
"""Ensure the given file exists within the current test run's
media directory.
"""
return path.exists(self.path(name))
def get(self, name):
"""Return the given file's contents.
"""
with open(self.path(name)) as f:
r = f.read()
print(repr(r))
return r
def unlink(self, name):
os.unlink(self.path(name))
def path(self, name):
"""Return the given file's full path."""
return path.join(self._tempdir_created, name)
def setmtime(self, *files, **kwargs):
"""Set the mtime of the given files. Useful helper when
needing to test things like the timestamp updater.
Specify ``mtime`` as a keyword argument, or time.time()
will automatically be used. Returns the mtime used.
Specify ``mod`` as a keyword argument, and the modifier
will be added to the ``mtime`` used.
"""
mtime = kwargs.pop('mtime', time.time())
mtime += kwargs.pop('mod', 0)
assert not kwargs, "Unsupported kwargs: %s" % ', '.join(kwargs.keys())
for f in files:
os.utime(self.path(f), (mtime, mtime))
return mtime
def p(self, *files):
"""Print the contents of the given files to stdout; useful
for some quick debugging.
"""
if not files:
files = ['out'] # This is a often used output filename
for f in files:
content = self.get(f)
print(f)
print("-" * len(f))
print(repr(content))
print(content)
print()
class TempEnvironmentHelper(TempDirHelper):
"""Base-class for tests which provides a pre-created
environment, based in a temporary directory, and utility
methods to do filesystem operations within that directory.
"""
default_files = {'in1': 'A', 'in2': 'B', 'in3': 'C', 'in4': 'D'}
def setup(self):
TempDirHelper.setup(self)
self.env = self._create_environment()
# Unless we explicitly test it, we don't want to use the cache
# during testing.
self.env.cache = False
self.env.manifest = False
def _create_environment(self):
return Environment(self._tempdir_created, '')
def mkbundle(self, *a, **kw):
b = Bundle(*a, **kw)
b.env = self.env
return b
| gpl-3.0 | -4,954,814,504,524,542,000 | 29.435065 | 79 | 0.597397 | false |
google/eclipse2017 | system-test-container/app/tests/site_test.py | 1 | 4431 | #
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import socket
import time
import unittest2
import requests
from google.cloud import datastore
from google.oauth2 import id_token
from common import constants
from common import git_tag
from common.id_token import get_id_token
from common import users
from common import roles
from common import util
from google.auth.transport import requests as gat_requests
logging.basicConfig(level=logging.INFO, format=constants.LOG_FMT_M_THREADED)
PROJECT_ID = 'eclipse-2017-dev-dek'
class SiteTest(unittest2.TestCase):
"""
Basic site test
"""
READINESS_PROBE_PATH = '/'
NUM_WORKERS = 7
NUM_REQUESTS = 1
def __init__(self, *args, **kwargs):
super(SiteTest, self).__init__(*args, **kwargs)
def setUp(self):
self.client = datastore.Client(PROJECT_ID)
def _get_uri(self, host, port, path, request_headers = {'content-type': 'text/plain'}, expected_data = None, accepted_status_codes = (constants.HTTP_OK,), timeout = 60):
try:
r = requests.get("http://%s:%d/%s" % (host, port, path))
except requests.exceptions.ConnectionError:
msg = 'Cannot contact server: {0}'.format(host)
logging.error(msg)
return False
if r.status_code not in accepted_status_codes:
msg = 'Unexpected status code: {0}'.format(r.status)
logging.error(msg)
return False
elif r.status_code != constants.HTTP_OK:
msg = 'Server returned error: {0}'.format(num_mbytes)
logging.error(msg)
return False
if expected_data is not None:
if r.text != expected_data:
logging.error("Expected data: %s was not matched by received data: %s" % (expected_data, data))
return False
return True
def test_get_static(self):
STATIC_NGINX_HOST = 'static-nginx'
ADMIN_NGINX_HOST = 'admin-nginx'
PROFILE_NGINX_HOST = 'profile-nginx'
NGINX_PORT = 80
SERVER_PORT = 8080
for case in [
{ 'host': STATIC_NGINX_HOST, 'port': NGINX_PORT, 'path': '/' },
{ 'host': STATIC_NGINX_HOST, 'port': NGINX_PORT, 'path': '/hash.html', 'expected_data': git_tag.GIT_TAG},
{ 'host': ADMIN_NGINX_HOST, 'port': NGINX_PORT, 'path': '/', 'expected_data': 'OK' },
{ 'host': PROFILE_NGINX_HOST, 'port': NGINX_PORT, 'path': '/', 'expected_data': 'OK' },
]:
logging.info("Test: %s" % case)
self.assertTrue(self._get_uri(**case))
def _delete_user_via_datastore_if_exists(self, userid_hash):
user = users.get_user(self.client, userid_hash)
if user is not None:
users.delete_user(self.client, userid_hash)
def _get_user_via_api(self, userid_hash, token):
headers = { 'x-idtoken': token }
r = requests.get('http://profile-nginx/services/user/profile/%s' % userid_hash,
headers = headers)
print r
print r.text
def _create_user_via_datastore(self, userid_hash):
user = datastore.Entity(key = self.client.key("User", userid_hash))
user['name'] = u"Test User " + userid_hash
user['email'] = u"test" + userid_hash + u"@example.com"
users.create_or_update_user(self.client, user)
roles.create_user_role(self.client, userid_hash)
def test_profile_disabled(self):
token = get_id_token()
r = gat_requests.Request()
idinfo = util._validate_id_token(token)
userid = users.get_userid(idinfo)
userid_hash = users.get_userid_hash(userid)
self._delete_user_via_datastore_if_exists(userid_hash)
self._get_user_via_api(userid_hash, token)
self._create_user_via_datastore(userid_hash)
self._get_user_via_api(userid_hash, token)
| apache-2.0 | 1,497,879,265,293,816,600 | 35.619835 | 173 | 0.627624 | false |
jor-/scipy | scipy/signal/tests/test_signaltools.py | 2 | 103529 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import sys
from decimal import Decimal
from itertools import product
import warnings
import pytest
from pytest import raises as assert_raises
from numpy.testing import (
assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_warns, assert_array_less)
from scipy._lib._numpy_compat import suppress_warnings
from numpy import array, arange
import numpy as np
from scipy.ndimage.filters import correlate1d
from scipy.optimize import fmin
from scipy import signal
from scipy.signal import (
correlate, convolve, convolve2d, fftconvolve, choose_conv_method,
hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos,
invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt,
sosfilt_zi, tf2zpk, BadCoefficients, detrend)
from scipy.signal.windows import hann
from scipy.signal.signaltools import _filtfilt_gust
if sys.version_info >= (3, 5):
from math import gcd
else:
from fractions import gcd
class _TestConvolve(object):
def test_basic(self):
a = [3, 4, 5, 6, 5, 4]
b = [1, 2, 3]
c = convolve(a, b)
assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12]))
def test_same(self):
a = [3, 4, 5]
b = [1, 2, 3, 4]
c = convolve(a, b, mode="same")
assert_array_equal(c, array([10, 22, 34]))
def test_same_eq(self):
a = [3, 4, 5]
b = [1, 2, 3]
c = convolve(a, b, mode="same")
assert_array_equal(c, array([10, 22, 22]))
def test_complex(self):
x = array([1 + 1j, 2 + 1j, 3 + 1j])
y = array([1 + 1j, 2 + 1j])
z = convolve(x, y)
assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j]))
def test_zero_rank(self):
a = 1289
b = 4567
c = convolve(a, b)
assert_equal(c, a * b)
def test_broadcastable(self):
a = np.arange(27).reshape(3, 3, 3)
b = np.arange(3)
for i in range(3):
b_shape = [1]*3
b_shape[i] = 3
x = convolve(a, b.reshape(b_shape), method='direct')
y = convolve(a, b.reshape(b_shape), method='fft')
assert_allclose(x, y)
def test_single_element(self):
a = array([4967])
b = array([3920])
c = convolve(a, b)
assert_equal(c, a * b)
def test_2d_arrays(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
c = convolve(a, b)
d = array([[2, 7, 16, 17, 12],
[10, 30, 62, 58, 38],
[12, 31, 58, 49, 30]])
assert_array_equal(c, d)
def test_input_swapping(self):
small = arange(8).reshape(2, 2, 2)
big = 1j * arange(27).reshape(3, 3, 3)
big += arange(27)[::-1].reshape(3, 3, 3)
out_array = array(
[[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j],
[52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j],
[46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j],
[40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]],
[[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j],
[282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j],
[246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j],
[142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]],
[[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j],
[174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j],
[138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j],
[70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]],
[[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j],
[68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j],
[38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j],
[12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]])
assert_array_equal(convolve(small, big, 'full'), out_array)
assert_array_equal(convolve(big, small, 'full'), out_array)
assert_array_equal(convolve(small, big, 'same'),
out_array[1:3, 1:3, 1:3])
assert_array_equal(convolve(big, small, 'same'),
out_array[0:3, 0:3, 0:3])
assert_array_equal(convolve(small, big, 'valid'),
out_array[1:3, 1:3, 1:3])
assert_array_equal(convolve(big, small, 'valid'),
out_array[1:3, 1:3, 1:3])
def test_invalid_params(self):
a = [3, 4, 5]
b = [1, 2, 3]
assert_raises(ValueError, convolve, a, b, mode='spam')
assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft')
assert_raises(ValueError, convolve, a, b, mode='ham', method='direct')
assert_raises(ValueError, convolve, a, b, mode='full', method='bacon')
assert_raises(ValueError, convolve, a, b, mode='same', method='bacon')
class TestConvolve(_TestConvolve):
def test_valid_mode2(self):
# See gh-5897
a = [1, 2, 3, 6, 5, 3]
b = [2, 3, 4, 5, 3, 4, 2, 2, 1]
expected = [70, 78, 73, 65]
out = convolve(a, b, 'valid')
assert_array_equal(out, expected)
out = convolve(b, a, 'valid')
assert_array_equal(out, expected)
a = [1 + 5j, 2 - 1j, 3 + 0j]
b = [2 - 3j, 1 + 0j]
expected = [2 - 3j, 8 - 10j]
out = convolve(a, b, 'valid')
assert_array_equal(out, expected)
out = convolve(b, a, 'valid')
assert_array_equal(out, expected)
def test_same_mode(self):
a = [1, 2, 3, 3, 1, 2]
b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3]
c = convolve(a, b, 'same')
d = array([57, 61, 63, 57, 45, 36])
assert_array_equal(c, d)
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'})
def test_convolve_method(self, n=100):
types = sum([t for _, t in np.sctypes.items()], [])
types = {np.dtype(t).name for t in types}
# These types include 'bool' and all precisions (int8, float32, etc)
# The removed types throw errors in correlate or fftconvolve
for dtype in ['complex256', 'complex192', 'float128', 'float96',
'str', 'void', 'bytes', 'object', 'unicode', 'string']:
if dtype in types:
types.remove(dtype)
args = [(t1, t2, mode) for t1 in types for t2 in types
for mode in ['valid', 'full', 'same']]
# These are random arrays, which means test is much stronger than
# convolving testing by convolving two np.ones arrays
np.random.seed(42)
array_types = {'i': np.random.choice([0, 1], size=n),
'f': np.random.randn(n)}
array_types['b'] = array_types['u'] = array_types['i']
array_types['c'] = array_types['f'] + 0.5j*array_types['f']
for t1, t2, mode in args:
x1 = array_types[np.dtype(t1).kind].astype(t1)
x2 = array_types[np.dtype(t2).kind].astype(t2)
results = {key: convolve(x1, x2, method=key, mode=mode)
for key in ['fft', 'direct']}
assert_equal(results['fft'].dtype, results['direct'].dtype)
if 'bool' in t1 and 'bool' in t2:
assert_equal(choose_conv_method(x1, x2), 'direct')
continue
# Found by experiment. Found approx smallest value for (rtol, atol)
# threshold to have tests pass.
if any([t in {'complex64', 'float32'} for t in [t1, t2]]):
kwargs = {'rtol': 1.0e-4, 'atol': 1e-6}
elif 'float16' in [t1, t2]:
# atol is default for np.allclose
kwargs = {'rtol': 1e-3, 'atol': 1e-3}
else:
# defaults for np.allclose (different from assert_allclose)
kwargs = {'rtol': 1e-5, 'atol': 1e-8}
assert_allclose(results['fft'], results['direct'], **kwargs)
def test_convolve_method_large_input(self):
# This is really a test that convolving two large integers goes to the
# direct method even if they're in the fft method.
for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]:
z = np.array([2**n], dtype=np.int64)
fft = convolve(z, z, method='fft')
direct = convolve(z, z, method='direct')
# this is the case when integer precision gets to us
# issue #6076 has more detail, hopefully more tests after resolved
if n < 50:
assert_equal(fft, direct)
assert_equal(fft, 2**(2*n))
assert_equal(direct, 2**(2*n))
def test_mismatched_dims(self):
# Input arrays should have the same number of dimensions
assert_raises(ValueError, convolve, [1], 2, method='direct')
assert_raises(ValueError, convolve, 1, [2], method='direct')
assert_raises(ValueError, convolve, [1], 2, method='fft')
assert_raises(ValueError, convolve, 1, [2], method='fft')
assert_raises(ValueError, convolve, [1], [[2]])
assert_raises(ValueError, convolve, [3], 2)
class _TestConvolve2d(object):
def test_2d_arrays(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
d = array([[2, 7, 16, 17, 12],
[10, 30, 62, 58, 38],
[12, 31, 58, 49, 30]])
e = convolve2d(a, b)
assert_array_equal(e, d)
def test_valid_mode(self):
e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
f = [[1, 2, 3], [3, 4, 5]]
h = array([[62, 80, 98, 116, 134]])
g = convolve2d(e, f, 'valid')
assert_array_equal(g, h)
# See gh-5897
g = convolve2d(f, e, 'valid')
assert_array_equal(g, h)
def test_valid_mode_complx(self):
e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j
h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]])
g = convolve2d(e, f, 'valid')
assert_array_almost_equal(g, h)
# See gh-5897
g = convolve2d(f, e, 'valid')
assert_array_equal(g, h)
def test_fillvalue(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
fillval = 1
c = convolve2d(a, b, 'full', 'fill', fillval)
d = array([[24, 26, 31, 34, 32],
[28, 40, 62, 64, 52],
[32, 46, 67, 62, 48]])
assert_array_equal(c, d)
def test_fillvalue_deprecations(self):
# Deprecated 2017-07, scipy version 1.0.0
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning, "Casting complex values to real")
r = sup.record(DeprecationWarning, "could not cast `fillvalue`")
convolve2d([[1]], [[1, 2]], fillvalue=1j)
assert_(len(r) == 1)
warnings.filterwarnings(
"error", message="could not cast `fillvalue`",
category=DeprecationWarning)
assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]],
fillvalue=1j)
with suppress_warnings():
warnings.filterwarnings(
"always", message="`fillvalue` must be scalar or an array ",
category=DeprecationWarning)
assert_warns(DeprecationWarning, convolve2d, [[1]], [[1, 2]],
fillvalue=[1, 2])
warnings.filterwarnings(
"error", message="`fillvalue` must be scalar or an array ",
category=DeprecationWarning)
assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]],
fillvalue=[1, 2])
def test_fillvalue_empty(self):
# Check that fillvalue being empty raises an error:
assert_raises(ValueError, convolve2d, [[1]], [[1, 2]],
fillvalue=[])
def test_wrap_boundary(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
c = convolve2d(a, b, 'full', 'wrap')
d = array([[80, 80, 74, 80, 80],
[68, 68, 62, 68, 68],
[80, 80, 74, 80, 80]])
assert_array_equal(c, d)
def test_sym_boundary(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
c = convolve2d(a, b, 'full', 'symm')
d = array([[34, 30, 44, 62, 66],
[52, 48, 62, 80, 84],
[82, 78, 92, 110, 114]])
assert_array_equal(c, d)
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'})
class TestConvolve2d(_TestConvolve2d):
def test_same_mode(self):
e = [[1, 2, 3], [3, 4, 5]]
f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
g = convolve2d(e, f, 'same')
h = array([[22, 28, 34],
[80, 98, 116]])
assert_array_equal(g, h)
def test_valid_mode2(self):
# See gh-5897
e = [[1, 2, 3], [3, 4, 5]]
f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
expected = [[62, 80, 98, 116, 134]]
out = convolve2d(e, f, 'valid')
assert_array_equal(out, expected)
out = convolve2d(f, e, 'valid')
assert_array_equal(out, expected)
e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]]
f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]]
expected = [[27 - 1j, 46. + 2j]]
out = convolve2d(e, f, 'valid')
assert_array_equal(out, expected)
# See gh-5897
out = convolve2d(f, e, 'valid')
assert_array_equal(out, expected)
def test_consistency_convolve_funcs(self):
# Compare np.convolve, signal.convolve, signal.convolve2d
a = np.arange(5)
b = np.array([3.2, 1.4, 3])
for mode in ['full', 'valid', 'same']:
assert_almost_equal(np.convolve(a, b, mode=mode),
signal.convolve(a, b, mode=mode))
assert_almost_equal(np.squeeze(
signal.convolve2d([a], [b], mode=mode)),
signal.convolve(a, b, mode=mode))
def test_invalid_dims(self):
assert_raises(ValueError, convolve2d, 3, 4)
assert_raises(ValueError, convolve2d, [3], [4])
assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]])
class TestFFTConvolve(object):
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_real(self, axes):
a = array([1, 2, 3])
expected = array([1, 4, 10, 12, 9.])
if axes == '':
out = fftconvolve(a, a)
else:
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_real_axes(self, axes):
a = array([1, 2, 3])
expected = array([1, 4, 10, 12, 9.])
a = np.tile(a, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_complex(self, axes):
a = array([1 + 1j, 2 + 2j, 3 + 3j])
expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j])
if axes == '':
out = fftconvolve(a, a)
else:
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_complex_axes(self, axes):
a = array([1 + 1j, 2 + 2j, 3 + 3j])
expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j])
a = np.tile(a, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['',
None,
[0, 1],
[1, 0],
[0, -1],
[-1, 0],
[-2, 1],
[1, -2],
[-2, -1],
[-1, -2]])
def test_2d_real_same(self, axes):
a = array([[1, 2, 3],
[4, 5, 6]])
expected = array([[1, 4, 10, 12, 9],
[8, 26, 56, 54, 36],
[16, 40, 73, 60, 36]])
if axes == '':
out = fftconvolve(a, a)
else:
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [[1, 2],
[2, 1],
[1, -1],
[-1, 1],
[-2, 2],
[2, -2],
[-2, -1],
[-1, -2]])
def test_2d_real_same_axes(self, axes):
a = array([[1, 2, 3],
[4, 5, 6]])
expected = array([[1, 4, 10, 12, 9],
[8, 26, 56, 54, 36],
[16, 40, 73, 60, 36]])
a = np.tile(a, [2, 1, 1])
expected = np.tile(expected, [2, 1, 1])
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['',
None,
[0, 1],
[1, 0],
[0, -1],
[-1, 0],
[-2, 1],
[1, -2],
[-2, -1],
[-1, -2]])
def test_2d_complex_same(self, axes):
a = array([[1 + 2j, 3 + 4j, 5 + 6j],
[2 + 1j, 4 + 3j, 6 + 5j]])
expected = array([
[-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j],
[10j, 44j, 118j, 156j, 122j],
[3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j]
])
if axes == '':
out = fftconvolve(a, a)
else:
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [[1, 2],
[2, 1],
[1, -1],
[-1, 1],
[-2, 2],
[2, -2],
[-2, -1],
[-1, -2]])
def test_2d_complex_same_axes(self, axes):
a = array([[1 + 2j, 3 + 4j, 5 + 6j],
[2 + 1j, 4 + 3j, 6 + 5j]])
expected = array([
[-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j],
[10j, 44j, 118j, 156j, 122j],
[3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j]
])
a = np.tile(a, [2, 1, 1])
expected = np.tile(expected, [2, 1, 1])
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_real_same_mode(self, axes):
a = array([1, 2, 3])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected_1 = array([35., 41., 47.])
expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.])
if axes == '':
out = fftconvolve(a, b, 'same')
else:
out = fftconvolve(a, b, 'same', axes=axes)
assert_array_almost_equal(out, expected_1)
if axes == '':
out = fftconvolve(b, a, 'same')
else:
out = fftconvolve(b, a, 'same', axes=axes)
assert_array_almost_equal(out, expected_2)
@pytest.mark.parametrize('axes', [1, -1, [1], [-1]])
def test_real_same_mode_axes(self, axes):
a = array([1, 2, 3])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected_1 = array([35., 41., 47.])
expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.])
a = np.tile(a, [2, 1])
b = np.tile(b, [2, 1])
expected_1 = np.tile(expected_1, [2, 1])
expected_2 = np.tile(expected_2, [2, 1])
out = fftconvolve(a, b, 'same', axes=axes)
assert_array_almost_equal(out, expected_1)
out = fftconvolve(b, a, 'same', axes=axes)
assert_array_almost_equal(out, expected_2)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_valid_mode_real(self, axes):
# See gh-5897
a = array([3, 2, 1])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected = array([24., 31., 41., 43., 49., 25., 12.])
if axes == '':
out = fftconvolve(a, b, 'valid')
else:
out = fftconvolve(a, b, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
if axes == '':
out = fftconvolve(b, a, 'valid')
else:
out = fftconvolve(b, a, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [1, [1]])
def test_valid_mode_real_axes(self, axes):
# See gh-5897
a = array([3, 2, 1])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected = array([24., 31., 41., 43., 49., 25., 12.])
a = np.tile(a, [2, 1])
b = np.tile(b, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, b, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_valid_mode_complex(self, axes):
a = array([3 - 1j, 2 + 7j, 1 + 0j])
b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j])
expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j])
if axes == '':
out = fftconvolve(a, b, 'valid')
else:
out = fftconvolve(a, b, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
if axes == '':
out = fftconvolve(b, a, 'valid')
else:
out = fftconvolve(b, a, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_valid_mode_complex_axes(self, axes):
a = array([3 - 1j, 2 + 7j, 1 + 0j])
b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j])
expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j])
a = np.tile(a, [2, 1])
b = np.tile(b, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, b, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
out = fftconvolve(b, a, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
def test_empty(self):
# Regression test for #1745: crashes with 0-length input.
assert_(fftconvolve([], []).size == 0)
assert_(fftconvolve([5, 6], []).size == 0)
assert_(fftconvolve([], [7]).size == 0)
def test_zero_rank(self):
a = array(4967)
b = array(3920)
out = fftconvolve(a, b)
assert_equal(out, a * b)
def test_single_element(self):
a = array([4967])
b = array([3920])
out = fftconvolve(a, b)
assert_equal(out, a * b)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_random_data(self, axes):
np.random.seed(1234)
a = np.random.rand(1233) + 1j * np.random.rand(1233)
b = np.random.rand(1321) + 1j * np.random.rand(1321)
expected = np.convolve(a, b, 'full')
if axes == '':
out = fftconvolve(a, b, 'full')
else:
out = fftconvolve(a, b, 'full', axes=axes)
assert_(np.allclose(out, expected, rtol=1e-10))
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_random_data_axes(self, axes):
np.random.seed(1234)
a = np.random.rand(1233) + 1j * np.random.rand(1233)
b = np.random.rand(1321) + 1j * np.random.rand(1321)
expected = np.convolve(a, b, 'full')
a = np.tile(a, [2, 1])
b = np.tile(b, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, b, 'full', axes=axes)
assert_(np.allclose(out, expected, rtol=1e-10))
@pytest.mark.parametrize('axes', [[1, 4],
[4, 1],
[1, -1],
[-1, 1],
[-4, 4],
[4, -4],
[-4, -1],
[-1, -4]])
def test_random_data_multidim_axes(self, axes):
a_shape, b_shape = (123, 22), (132, 11)
np.random.seed(1234)
a = np.random.rand(*a_shape) + 1j * np.random.rand(*a_shape)
b = np.random.rand(*b_shape) + 1j * np.random.rand(*b_shape)
expected = convolve2d(a, b, 'full')
a = a[:, :, None, None, None]
b = b[:, :, None, None, None]
expected = expected[:, :, None, None, None]
a = np.rollaxis(a.swapaxes(0, 2), 1, 5)
b = np.rollaxis(b.swapaxes(0, 2), 1, 5)
expected = np.rollaxis(expected.swapaxes(0, 2), 1, 5)
# use 1 for dimension 2 in a and 3 in b to test broadcasting
a = np.tile(a, [2, 1, 3, 1, 1])
b = np.tile(b, [2, 1, 1, 4, 1])
expected = np.tile(expected, [2, 1, 3, 4, 1])
out = fftconvolve(a, b, 'full', axes=axes)
assert_allclose(out, expected, rtol=1e-10, atol=1e-10)
@pytest.mark.slow
@pytest.mark.parametrize(
'n',
list(range(1, 100)) +
list(range(1000, 1500)) +
np.random.RandomState(1234).randint(1001, 10000, 5).tolist())
def test_many_sizes(self, n):
a = np.random.rand(n) + 1j * np.random.rand(n)
b = np.random.rand(n) + 1j * np.random.rand(n)
expected = np.convolve(a, b, 'full')
out = fftconvolve(a, b, 'full')
assert_allclose(out, expected, atol=1e-10)
out = fftconvolve(a, b, 'full', axes=[0])
assert_allclose(out, expected, atol=1e-10)
def test_invalid_shapes(self):
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
with assert_raises(ValueError,
match="For 'valid' mode, one must be at least "
"as large as the other in every dimension"):
fftconvolve(a, b, mode='valid')
def test_invalid_shapes_axes(self):
a = np.zeros([5, 6, 2, 1])
b = np.zeros([5, 6, 3, 1])
with assert_raises(ValueError,
match=r"incompatible shapes for in1 and in2:"
r" \(5L?, 6L?, 2L?, 1L?\) and"
r" \(5L?, 6L?, 3L?, 1L?\)"):
fftconvolve(a, b, axes=[0, 1])
@pytest.mark.parametrize('a,b',
[([1], 2),
(1, [2]),
([3], [[2]])])
def test_mismatched_dims(self, a, b):
with assert_raises(ValueError,
match="in1 and in2 should have the same"
" dimensionality"):
fftconvolve(a, b)
def test_invalid_flags(self):
with assert_raises(ValueError,
match="acceptable mode flags are 'valid',"
" 'same', or 'full'"):
fftconvolve([1], [2], mode='chips')
with assert_raises(ValueError,
match="when provided, axes cannot be empty"):
fftconvolve([1], [2], axes=[])
with assert_raises(ValueError, match="axes must be a scalar or "
"iterable of integers"):
fftconvolve([1], [2], axes=[[1, 2], [3, 4]])
with assert_raises(ValueError, match="axes must be a scalar or "
"iterable of integers"):
fftconvolve([1], [2], axes=[1., 2., 3., 4.])
with assert_raises(ValueError,
match="axes exceeds dimensionality of input"):
fftconvolve([1], [2], axes=[1])
with assert_raises(ValueError,
match="axes exceeds dimensionality of input"):
fftconvolve([1], [2], axes=[-2])
with assert_raises(ValueError,
match="all axes must be unique"):
fftconvolve([1], [2], axes=[0, 0])
class TestMedFilt(object):
def test_basic(self):
f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46],
[50, 50, 50, 50, 50, 0, 72, 77, 68, 66],
[50, 50, 50, 50, 50, 46, 47, 19, 64, 77],
[50, 50, 50, 50, 50, 42, 15, 29, 95, 35],
[50, 50, 50, 50, 50, 46, 34, 9, 21, 66],
[70, 97, 28, 68, 78, 77, 61, 58, 71, 42],
[64, 53, 44, 29, 68, 32, 19, 68, 24, 84],
[3, 33, 53, 67, 1, 78, 74, 55, 12, 83],
[7, 11, 46, 70, 60, 47, 24, 43, 61, 26],
[32, 61, 88, 7, 39, 4, 92, 64, 45, 61]]
d = signal.medfilt(f, [7, 3])
e = signal.medfilt2d(np.array(f, float), [7, 3])
assert_array_equal(d, [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0],
[0, 50, 50, 50, 50, 42, 19, 21, 29, 0],
[50, 50, 50, 50, 50, 47, 34, 34, 46, 35],
[50, 50, 50, 50, 50, 50, 42, 47, 64, 42],
[50, 50, 50, 50, 50, 50, 46, 55, 64, 35],
[33, 50, 50, 50, 50, 47, 46, 43, 55, 26],
[32, 50, 50, 50, 50, 47, 46, 45, 55, 26],
[7, 46, 50, 50, 47, 46, 46, 43, 45, 21],
[0, 32, 33, 39, 32, 32, 43, 43, 43, 0],
[0, 7, 11, 7, 4, 4, 19, 19, 24, 0]])
assert_array_equal(d, e)
def test_none(self):
# Ticket #1124. Ensure this does not segfault.
signal.medfilt(None)
# Expand on this test to avoid a regression with possible contiguous
# numpy arrays that have odd strides. The stride value below gets
# us into wrong memory if used (but it does not need to be used)
dummy = np.arange(10, dtype=np.float64)
a = dummy[5:6]
a.strides = 16
assert_(signal.medfilt(a, 1) == 5.)
def test_refcounting(self):
# Check a refcounting-related crash
a = Decimal(123)
x = np.array([a, a], dtype=object)
if hasattr(sys, 'getrefcount'):
n = 2 * sys.getrefcount(a)
else:
n = 10
# Shouldn't segfault:
for j in range(n):
signal.medfilt(x)
if hasattr(sys, 'getrefcount'):
assert_(sys.getrefcount(a) < n)
assert_equal(x, [a, a])
class TestWiener(object):
def test_basic(self):
g = array([[5, 6, 4, 3],
[3, 5, 6, 2],
[2, 3, 5, 6],
[1, 6, 9, 7]], 'd')
h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667],
[2.666666667, 4.33333333333, 4.44444444444, 2.8888888888],
[2.222222222, 4.4444444444, 5.4444444444, 4.801066874837],
[1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]])
assert_array_almost_equal(signal.wiener(g), h, decimal=6)
assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6)
padtype_options = ["constant", "mean", "median", "minimum", "maximum", "line"]
class TestResample(object):
def test_basic(self):
# Some basic tests
# Regression test for issue #3603.
# window.shape must equal to sig.shape[0]
sig = np.arange(128)
num = 256
win = signal.get_window(('kaiser', 8.0), 160)
assert_raises(ValueError, signal.resample, sig, num, window=win)
# Other degenerate conditions
assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1)
assert_raises(ValueError, signal.resample_poly, sig, 1, 0)
assert_raises(ValueError, signal.resample_poly, sig, 2, 1, padtype='')
assert_raises(ValueError, signal.resample_poly, sig, 2, 1,
padtype='mean', cval=10)
# test for issue #6505 - should not modify window.shape when axis ≠ 0
sig2 = np.tile(np.arange(160), (2, 1))
signal.resample(sig2, num, axis=-1, window=win)
assert_(win.shape == (160,))
@pytest.mark.parametrize('window', (None, 'hamming'))
@pytest.mark.parametrize('N', (20, 19))
@pytest.mark.parametrize('num', (100, 101, 10, 11))
def test_rfft(self, N, num, window):
# Make sure the speed up using rfft gives the same result as the normal
# way using fft
x = np.linspace(0, 10, N, endpoint=False)
y = np.cos(-x**2/6.0)
assert_allclose(signal.resample(y, num, window=window),
signal.resample(y + 0j, num, window=window).real)
y = np.array([np.cos(-x**2/6.0), np.sin(-x**2/6.0)])
y_complex = y + 0j
assert_allclose(
signal.resample(y, num, axis=1, window=window),
signal.resample(y_complex, num, axis=1, window=window).real,
atol=1e-9)
@pytest.mark.parametrize('nx', (1, 2, 3, 5, 8))
@pytest.mark.parametrize('ny', (1, 2, 3, 5, 8))
@pytest.mark.parametrize('dtype', ('float', 'complex'))
def test_dc(self, nx, ny, dtype):
x = np.array([1] * nx, dtype)
y = signal.resample(x, ny)
assert_allclose(y, [1] * ny)
@pytest.mark.parametrize('padtype', padtype_options)
def test_mutable_window(self, padtype):
# Test that a mutable window is not modified
impulse = np.zeros(3)
window = np.random.RandomState(0).randn(2)
window_orig = window.copy()
signal.resample_poly(impulse, 5, 1, window=window, padtype=padtype)
assert_array_equal(window, window_orig)
@pytest.mark.parametrize('padtype', padtype_options)
def test_output_float32(self, padtype):
# Test that float32 inputs yield a float32 output
x = np.arange(10, dtype=np.float32)
h = np.array([1, 1, 1], dtype=np.float32)
y = signal.resample_poly(x, 1, 2, window=h, padtype=padtype)
assert(y.dtype == np.float32)
@pytest.mark.parametrize(
"method, ext, padtype",
[("fft", False, None)]
+ list(
product(
["polyphase"], [False, True], padtype_options,
)
),
)
def test_resample_methods(self, method, ext, padtype):
# Test resampling of sinusoids and random noise (1-sec)
rate = 100
rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201]
# Sinusoids, windowed to avoid edge artifacts
t = np.arange(rate) / float(rate)
freqs = np.array((1., 10., 40.))[:, np.newaxis]
x = np.sin(2 * np.pi * freqs * t) * hann(rate)
for rate_to in rates_to:
t_to = np.arange(rate_to) / float(rate_to)
y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to)
if method == 'fft':
y_resamps = signal.resample(x, rate_to, axis=-1)
else:
if ext and rate_to != rate:
# Match default window design
g = gcd(rate_to, rate)
up = rate_to // g
down = rate // g
max_rate = max(up, down)
f_c = 1. / max_rate
half_len = 10 * max_rate
window = signal.firwin(2 * half_len + 1, f_c,
window=('kaiser', 5.0))
polyargs = {'window': window, 'padtype': padtype}
else:
polyargs = {'padtype': padtype}
y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1,
**polyargs)
for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs):
if freq >= 0.5 * rate_to:
y_to.fill(0.) # mostly low-passed away
if padtype in ['minimum', 'maximum']:
assert_allclose(y_resamp, y_to, atol=3e-1)
else:
assert_allclose(y_resamp, y_to, atol=1e-3)
else:
assert_array_equal(y_to.shape, y_resamp.shape)
corr = np.corrcoef(y_to, y_resamp)[0, 1]
assert_(corr > 0.99, msg=(corr, rate, rate_to))
# Random data
rng = np.random.RandomState(0)
x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind
for rate_to in rates_to:
# random data
t_to = np.arange(rate_to) / float(rate_to)
y_to = np.interp(t_to, t, x)
if method == 'fft':
y_resamp = signal.resample(x, rate_to)
else:
y_resamp = signal.resample_poly(x, rate_to, rate,
padtype=padtype)
assert_array_equal(y_to.shape, y_resamp.shape)
corr = np.corrcoef(y_to, y_resamp)[0, 1]
assert_(corr > 0.99, msg=corr)
# More tests of fft method (Master 0.18.1 fails these)
if method == 'fft':
x1 = np.array([1.+0.j, 0.+0.j])
y1_test = signal.resample(x1, 4)
# upsampling a complex array
y1_true = np.array([1.+0.j, 0.5+0.j, 0.+0.j, 0.5+0.j])
assert_allclose(y1_test, y1_true, atol=1e-12)
x2 = np.array([1., 0.5, 0., 0.5])
y2_test = signal.resample(x2, 2) # downsampling a real array
y2_true = np.array([1., 0.])
assert_allclose(y2_test, y2_true, atol=1e-12)
def test_poly_vs_filtfilt(self):
# Check that up=1.0 gives same answer as filtfilt + slicing
random_state = np.random.RandomState(17)
try_types = (int, np.float32, np.complex64, float, complex)
size = 10000
down_factors = [2, 11, 79]
for dtype in try_types:
x = random_state.randn(size).astype(dtype)
if dtype in (np.complex64, np.complex128):
x += 1j * random_state.randn(size)
# resample_poly assumes zeros outside of signl, whereas filtfilt
# can only constant-pad. Make them equivalent:
x[0] = 0
x[-1] = 0
for down in down_factors:
h = signal.firwin(31, 1. / down, window='hamming')
yf = filtfilt(h, 1.0, x, padtype='constant')[::down]
# Need to pass convolved version of filter to resample_poly,
# since filtfilt does forward and backward, but resample_poly
# only goes forward
hc = convolve(h, h[::-1])
y = signal.resample_poly(x, 1, down, window=hc)
assert_allclose(yf, y, atol=1e-7, rtol=1e-7)
def test_correlate1d(self):
for down in [2, 4]:
for nx in range(1, 40, down):
for nweights in (32, 33):
x = np.random.random((nx,))
weights = np.random.random((nweights,))
y_g = correlate1d(x, weights[::-1], mode='constant')
y_s = signal.resample_poly(
x, up=1, down=down, window=weights)
assert_allclose(y_g[::down], y_s)
class TestCSpline1DEval(object):
def test_basic(self):
y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0])
x = arange(len(y))
dx = x[1] - x[0]
cj = signal.cspline1d(y)
x2 = arange(len(y) * 10.0) / 10.0
y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0])
# make sure interpolated values are on knot points
assert_array_almost_equal(y2[::10], y, decimal=5)
def test_complex(self):
# create some smoothly varying complex signal to interpolate
x = np.arange(2)
y = np.zeros(x.shape, dtype=np.complex64)
T = 10.0
f = 1.0 / T
y = np.exp(2.0J * np.pi * f * x)
# get the cspline transform
cy = signal.cspline1d(y)
# determine new test x value and interpolate
xnew = np.array([0.5])
ynew = signal.cspline1d_eval(cy, xnew)
assert_equal(ynew.dtype, y.dtype)
class TestOrderFilt(object):
def test_basic(self):
assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1),
[2, 3, 2])
class _TestLinearFilter(object):
def generate(self, shape):
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
return self.convert_dtype(x)
def convert_dtype(self, arr):
if self.dtype == np.dtype('O'):
arr = np.asarray(arr)
out = np.empty(arr.shape, self.dtype)
iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'],
[['readonly'],['writeonly']])
for x, y in iter:
y[...] = self.type(x[()])
return out
else:
return np.array(arr, self.dtype, copy=False)
def test_rank_1_IIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, -0.5])
y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])
assert_array_almost_equal(lfilter(b, a, x), y_r)
def test_rank_1_FIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, 1])
a = self.convert_dtype([1])
y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.])
assert_array_almost_equal(lfilter(b, a, x), y_r)
def test_rank_1_IIR_init_cond(self):
x = self.generate((6,))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([0.5, -0.5])
zi = self.convert_dtype([1, 2])
y_r = self.convert_dtype([1, 5, 9, 13, 17, 21])
zf_r = self.convert_dtype([13, -10])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_1_FIR_init_cond(self):
x = self.generate((6,))
b = self.convert_dtype([1, 1, 1])
a = self.convert_dtype([1])
zi = self.convert_dtype([1, 1])
y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.])
zf_r = self.convert_dtype([9, 5])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_2_IIR_axis_0(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4],
[6, 4, 2]])
y = lfilter(b, a, x, axis=0)
assert_array_almost_equal(y_r2_a0, y)
def test_rank_2_IIR_axis_1(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12],
[18, -16, 18]])
y = lfilter(b, a, x, axis=1)
assert_array_almost_equal(y_r2_a1, y)
def test_rank_2_IIR_axis_0_init_cond(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
zi = self.convert_dtype(np.ones((4,1)))
y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13],
[19, -17, 19]])
zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis]
y, zf = lfilter(b, a, x, axis=1, zi=zi)
assert_array_almost_equal(y_r2_a0_1, y)
assert_array_almost_equal(zf, zf_r)
def test_rank_2_IIR_axis_1_init_cond(self):
x = self.generate((4,3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
zi = self.convert_dtype(np.ones((1,3)))
y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1],
[1, 3, 5], [5, 3, 1]])
zf_r = self.convert_dtype([[-23, -23, -23]])
y, zf = lfilter(b, a, x, axis=0, zi=zi)
assert_array_almost_equal(y_r2_a0_0, y)
assert_array_almost_equal(zf, zf_r)
def test_rank_3_IIR(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
for axis in range(x.ndim):
y = lfilter(b, a, x, axis)
y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)
assert_array_almost_equal(y, y_r)
def test_rank_3_IIR_init_cond(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
for axis in range(x.ndim):
zi_shape = list(x.shape)
zi_shape[axis] = 1
zi = self.convert_dtype(np.ones(zi_shape))
zi1 = self.convert_dtype([1])
y, zf = lfilter(b, a, x, axis, zi)
lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]
lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]
y_r = np.apply_along_axis(lf0, axis, x)
zf_r = np.apply_along_axis(lf1, axis, x)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_3_FIR(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
for axis in range(x.ndim):
y = lfilter(b, a, x, axis)
y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)
assert_array_almost_equal(y, y_r)
def test_rank_3_FIR_init_cond(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
for axis in range(x.ndim):
zi_shape = list(x.shape)
zi_shape[axis] = 2
zi = self.convert_dtype(np.ones(zi_shape))
zi1 = self.convert_dtype([1, 1])
y, zf = lfilter(b, a, x, axis, zi)
lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]
lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]
y_r = np.apply_along_axis(lf0, axis, x)
zf_r = np.apply_along_axis(lf1, axis, x)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_zi_pseudobroadcast(self):
x = self.generate((4, 5, 20))
b,a = signal.butter(8, 0.2, output='ba')
b = self.convert_dtype(b)
a = self.convert_dtype(a)
zi_size = b.shape[0] - 1
# lfilter requires x.ndim == zi.ndim exactly. However, zi can have
# length 1 dimensions.
zi_full = self.convert_dtype(np.ones((4, 5, zi_size)))
zi_sing = self.convert_dtype(np.ones((1, 1, zi_size)))
y_full, zf_full = lfilter(b, a, x, zi=zi_full)
y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing)
assert_array_almost_equal(y_sing, y_full)
assert_array_almost_equal(zf_full, zf_sing)
# lfilter does not prepend ones
assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size))
def test_scalar_a(self):
# a can be a scalar.
x = self.generate(6)
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
y_r = self.convert_dtype([0, 1, 2, 2, 2, 2])
y = lfilter(b, a[0], x)
assert_array_almost_equal(y, y_r)
def test_zi_some_singleton_dims(self):
# lfilter doesn't really broadcast (no prepending of 1's). But does
# do singleton expansion if x and zi have the same ndim. This was
# broken only if a subset of the axes were singletons (gh-4681).
x = self.convert_dtype(np.zeros((3,2,5), 'l'))
b = self.convert_dtype(np.ones(5, 'l'))
a = self.convert_dtype(np.array([1,0,0]))
zi = np.ones((3,1,4), 'l')
zi[1,:,:] *= 2
zi[2,:,:] *= 3
zi = self.convert_dtype(zi)
zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l'))
y_expected = np.zeros((3,2,5), 'l')
y_expected[:,:,:4] = [[[1]], [[2]], [[3]]]
y_expected = self.convert_dtype(y_expected)
# IIR
y_iir, zf_iir = lfilter(b, a, x, -1, zi)
assert_array_almost_equal(y_iir, y_expected)
assert_array_almost_equal(zf_iir, zf_expected)
# FIR
y_fir, zf_fir = lfilter(b, a[0], x, -1, zi)
assert_array_almost_equal(y_fir, y_expected)
assert_array_almost_equal(zf_fir, zf_expected)
def base_bad_size_zi(self, b, a, x, axis, zi):
b = self.convert_dtype(b)
a = self.convert_dtype(a)
x = self.convert_dtype(x)
zi = self.convert_dtype(zi)
assert_raises(ValueError, lfilter, b, a, x, axis, zi)
def test_bad_size_zi(self):
# rank 1
x1 = np.arange(6)
self.base_bad_size_zi([1], [1], x1, -1, [1])
self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1])
self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]])
self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]])
self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1])
self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]])
self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3])
# rank 2
x2 = np.arange(12).reshape((4,3))
# for axis=0 zi.shape should == (max(len(a),len(b))-1, 3)
self.base_bad_size_zi([1], [1], x2, 0, [0])
# for each of these there are 5 cases tested (in this order):
# 1. not deep enough, right # elements
# 2. too deep, right # elements
# 3. right depth, right # elements, transposed
# 4. right depth, too few elements
# 5. right depth, too many elements
self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])
# for axis=1 zi.shape should == (4, max(len(a),len(b))-1)
self.base_bad_size_zi([1], [1], x2, 1, [0])
self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
def test_empty_zi(self):
# Regression test for #880: empty array for zi crashes.
x = self.generate((5,))
a = self.convert_dtype([1])
b = self.convert_dtype([1])
zi = self.convert_dtype([])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, x)
assert_equal(zf.dtype, self.dtype)
assert_equal(zf.size, 0)
def test_lfiltic_bad_zi(self):
# Regression test for #3699: bad initial conditions
a = self.convert_dtype([1])
b = self.convert_dtype([1])
# "y" sets the datatype of zi, so it truncates if int
zi = lfiltic(b, a, [1., 0])
zi_1 = lfiltic(b, a, [1, 0])
zi_2 = lfiltic(b, a, [True, False])
assert_array_equal(zi, zi_1)
assert_array_equal(zi, zi_2)
def test_short_x_FIR(self):
# regression test for #5116
# x shorter than b, with non None zi fails
a = self.convert_dtype([1])
b = self.convert_dtype([1, 0, -1])
zi = self.convert_dtype([2, 7])
x = self.convert_dtype([72])
ye = self.convert_dtype([74])
zfe = self.convert_dtype([7, -72])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, ye)
assert_array_almost_equal(zf, zfe)
def test_short_x_IIR(self):
# regression test for #5116
# x shorter than b, with non None zi fails
a = self.convert_dtype([1, 1])
b = self.convert_dtype([1, 0, -1])
zi = self.convert_dtype([2, 7])
x = self.convert_dtype([72])
ye = self.convert_dtype([74])
zfe = self.convert_dtype([-67, -72])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, ye)
assert_array_almost_equal(zf, zfe)
def test_do_not_modify_a_b_IIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, -1])
b0 = b.copy()
a = self.convert_dtype([0.5, -0.5])
a0 = a.copy()
y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])
y_f = lfilter(b, a, x)
assert_array_almost_equal(y_f, y_r)
assert_equal(b, b0)
assert_equal(a, a0)
def test_do_not_modify_a_b_FIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, 0, 1])
b0 = b.copy()
a = self.convert_dtype([2])
a0 = a.copy()
y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.])
y_f = lfilter(b, a, x)
assert_array_almost_equal(y_f, y_r)
assert_equal(b, b0)
assert_equal(a, a0)
class TestLinearFilterFloat32(_TestLinearFilter):
dtype = np.dtype('f')
class TestLinearFilterFloat64(_TestLinearFilter):
dtype = np.dtype('d')
class TestLinearFilterFloatExtended(_TestLinearFilter):
dtype = np.dtype('g')
class TestLinearFilterComplex64(_TestLinearFilter):
dtype = np.dtype('F')
class TestLinearFilterComplex128(_TestLinearFilter):
dtype = np.dtype('D')
class TestLinearFilterComplexExtended(_TestLinearFilter):
dtype = np.dtype('G')
class TestLinearFilterDecimal(_TestLinearFilter):
dtype = np.dtype('O')
def type(self, x):
return Decimal(str(x))
class TestLinearFilterObject(_TestLinearFilter):
dtype = np.dtype('O')
type = float
def test_lfilter_bad_object():
# lfilter: object arrays with non-numeric objects raise TypeError.
# Regression test for ticket #1452.
assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0])
assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0])
assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0])
with assert_raises(ValueError, match='common type'):
lfilter([1.], [1., 1.], ['a', 'b', 'c'])
def test_lfilter_notimplemented_input():
# Should not crash, gh-7991
assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5])
@pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short,
np.uint, int, np.ulonglong, np.ulonglong,
np.float32, np.float64, np.longdouble,
Decimal])
class TestCorrelateReal(object):
def _setup_rank1(self, dt):
a = np.linspace(0, 3, 4).astype(dt)
b = np.linspace(1, 2, 2).astype(dt)
y_r = np.array([0, 2, 5, 8, 3]).astype(dt)
return a, b, y_r
def equal_tolerance(self, res_dt):
# default value of keyword
decimal = 6
try:
dt_info = np.finfo(res_dt)
if hasattr(dt_info, 'resolution'):
decimal = int(-0.5*np.log10(dt_info.resolution))
except Exception:
pass
return decimal
def equal_tolerance_fft(self, res_dt):
# FFT implementations convert longdouble arguments down to
# double so don't expect better precision, see gh-9520
if res_dt == np.longdouble:
return self.equal_tolerance(np.double)
else:
return self.equal_tolerance(res_dt)
def test_method(self, dt):
if dt == Decimal:
method = choose_conv_method([Decimal(4)], [Decimal(3)])
assert_equal(method, 'direct')
else:
a, b, y_r = self._setup_rank3(dt)
y_fft = correlate(a, b, method='fft')
y_direct = correlate(a, b, method='direct')
assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance_fft(y_fft.dtype))
assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_direct.dtype))
assert_equal(y_fft.dtype, dt)
assert_equal(y_direct.dtype, dt)
def test_rank1_valid(self, dt):
a, b, y_r = self._setup_rank1(dt)
y = correlate(a, b, 'valid')
assert_array_almost_equal(y, y_r[1:4])
assert_equal(y.dtype, dt)
# See gh-5897
y = correlate(b, a, 'valid')
assert_array_almost_equal(y, y_r[1:4][::-1])
assert_equal(y.dtype, dt)
def test_rank1_same(self, dt):
a, b, y_r = self._setup_rank1(dt)
y = correlate(a, b, 'same')
assert_array_almost_equal(y, y_r[:-1])
assert_equal(y.dtype, dt)
def test_rank1_full(self, dt):
a, b, y_r = self._setup_rank1(dt)
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r)
assert_equal(y.dtype, dt)
def _setup_rank3(self, dt):
a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype(
dt)
b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype(
dt)
y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.],
[46., 432., 1062., 1840., 2672., 1698., 864., 266.],
[134., 736., 1662., 2768., 3920., 2418., 1168., 314.],
[260., 952., 1932., 3056., 4208., 2580., 1240., 332.],
[202., 664., 1290., 1984., 2688., 1590., 712., 150.],
[114., 344., 642., 960., 1280., 726., 296., 38.]],
[[23., 400., 1035., 1832., 2696., 1737., 904., 293.],
[134., 920., 2166., 3680., 5280., 3306., 1640., 474.],
[325., 1544., 3369., 5512., 7720., 4683., 2192., 535.],
[571., 1964., 3891., 6064., 8272., 4989., 2324., 565.],
[434., 1360., 2586., 3920., 5264., 3054., 1312., 230.],
[241., 700., 1281., 1888., 2496., 1383., 532., 39.]],
[[22., 214., 528., 916., 1332., 846., 430., 132.],
[86., 484., 1098., 1832., 2600., 1602., 772., 206.],
[188., 802., 1698., 2732., 3788., 2256., 1018., 218.],
[308., 1006., 1950., 2996., 4052., 2400., 1078., 230.],
[230., 692., 1290., 1928., 2568., 1458., 596., 78.],
[126., 354., 636., 924., 1212., 654., 234., 0.]]],
dtype=dt)
return a, b, y_r
def test_rank3_valid(self, dt):
a, b, y_r = self._setup_rank3(dt)
y = correlate(a, b, "valid")
assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5])
assert_equal(y.dtype, dt)
# See gh-5897
y = correlate(b, a, "valid")
assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1])
assert_equal(y.dtype, dt)
def test_rank3_same(self, dt):
a, b, y_r = self._setup_rank3(dt)
y = correlate(a, b, "same")
assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2])
assert_equal(y.dtype, dt)
def test_rank3_all(self, dt):
a, b, y_r = self._setup_rank3(dt)
y = correlate(a, b)
assert_array_almost_equal(y, y_r)
assert_equal(y.dtype, dt)
class TestCorrelate(object):
# Tests that don't depend on dtype
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'})
def test_invalid_params(self):
a = [3, 4, 5]
b = [1, 2, 3]
assert_raises(ValueError, correlate, a, b, mode='spam')
assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft')
assert_raises(ValueError, correlate, a, b, mode='ham', method='direct')
assert_raises(ValueError, correlate, a, b, mode='full', method='bacon')
assert_raises(ValueError, correlate, a, b, mode='same', method='bacon')
def test_mismatched_dims(self):
# Input arrays should have the same number of dimensions
assert_raises(ValueError, correlate, [1], 2, method='direct')
assert_raises(ValueError, correlate, 1, [2], method='direct')
assert_raises(ValueError, correlate, [1], 2, method='fft')
assert_raises(ValueError, correlate, 1, [2], method='fft')
assert_raises(ValueError, correlate, [1], [[2]])
assert_raises(ValueError, correlate, [3], 2)
def test_numpy_fastpath(self):
a = [1, 2, 3]
b = [4, 5]
assert_allclose(correlate(a, b, mode='same'), [5, 14, 23])
a = [1, 2, 3]
b = [4, 5, 6]
assert_allclose(correlate(a, b, mode='same'), [17, 32, 23])
assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12])
assert_allclose(correlate(a, b, mode='valid'), [32])
@pytest.mark.parametrize('dt', [np.csingle, np.cdouble, np.clongdouble])
class TestCorrelateComplex(object):
# The decimal precision to be used for comparing results.
# This value will be passed as the 'decimal' keyword argument of
# assert_array_almost_equal().
# Since correlate may chose to use FFT method which converts
# longdoubles to doubles internally don't expect better precision
# for longdouble than for double (see gh-9520).
def decimal(self, dt):
if dt == np.clongdouble:
dt = np.cdouble
return int(2 * np.finfo(dt).precision / 3)
def _setup_rank1(self, dt, mode):
np.random.seed(9)
a = np.random.randn(10).astype(dt)
a += 1j * np.random.randn(10).astype(dt)
b = np.random.randn(8).astype(dt)
b += 1j * np.random.randn(8).astype(dt)
y_r = (correlate(a.real, b.real, mode=mode) +
correlate(a.imag, b.imag, mode=mode)).astype(dt)
y_r += 1j * (-correlate(a.real, b.imag, mode=mode) +
correlate(a.imag, b.real, mode=mode))
return a, b, y_r
def test_rank1_valid(self, dt):
a, b, y_r = self._setup_rank1(dt, 'valid')
y = correlate(a, b, 'valid')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
# See gh-5897
y = correlate(b, a, 'valid')
assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
def test_rank1_same(self, dt):
a, b, y_r = self._setup_rank1(dt, 'same')
y = correlate(a, b, 'same')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
def test_rank1_full(self, dt):
a, b, y_r = self._setup_rank1(dt, 'full')
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
def test_swap_full(self, dt):
d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt)
k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt)
y = correlate(d, k)
assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j])
def test_swap_same(self, dt):
d = [0.+0.j, 1.+1.j, 2.+2.j]
k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j]
y = correlate(d, k, mode="same")
assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j])
def test_rank3(self, dt):
a = np.random.randn(10, 8, 6).astype(dt)
a += 1j * np.random.randn(10, 8, 6).astype(dt)
b = np.random.randn(8, 6, 4).astype(dt)
b += 1j * np.random.randn(8, 6, 4).astype(dt)
y_r = (correlate(a.real, b.real)
+ correlate(a.imag, b.imag)).astype(dt)
y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1)
assert_equal(y.dtype, dt)
def test_rank0(self, dt):
a = np.array(np.random.randn()).astype(dt)
a += 1j * np.array(np.random.randn()).astype(dt)
b = np.array(np.random.randn()).astype(dt)
b += 1j * np.array(np.random.randn()).astype(dt)
y_r = (correlate(a.real, b.real)
+ correlate(a.imag, b.imag)).astype(dt)
y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1)
assert_equal(y.dtype, dt)
assert_equal(correlate([1], [2j]), correlate(1, 2j))
assert_equal(correlate([2j], [3j]), correlate(2j, 3j))
assert_equal(correlate([3j], [4]), correlate(3j, 4))
class TestCorrelate2d(object):
def test_consistency_correlate_funcs(self):
# Compare np.correlate, signal.correlate, signal.correlate2d
a = np.arange(5)
b = np.array([3.2, 1.4, 3])
for mode in ['full', 'valid', 'same']:
assert_almost_equal(np.correlate(a, b, mode=mode),
signal.correlate(a, b, mode=mode))
assert_almost_equal(np.squeeze(signal.correlate2d([a], [b],
mode=mode)),
signal.correlate(a, b, mode=mode))
# See gh-5897
if mode == 'valid':
assert_almost_equal(np.correlate(b, a, mode=mode),
signal.correlate(b, a, mode=mode))
assert_almost_equal(np.squeeze(signal.correlate2d([b], [a],
mode=mode)),
signal.correlate(b, a, mode=mode))
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'})
def test_complex_input(self):
assert_equal(signal.correlate2d([[1]], [[2j]]), -2j)
assert_equal(signal.correlate2d([[2j]], [[3j]]), 6)
assert_equal(signal.correlate2d([[3j]], [[4]]), 12j)
class TestLFilterZI(object):
def test_basic(self):
a = np.array([1.0, -1.0, 0.5])
b = np.array([1.0, 0.0, 2.0])
zi_expected = np.array([5.0, -1.0])
zi = lfilter_zi(b, a)
assert_array_almost_equal(zi, zi_expected)
def test_scale_invariance(self):
# Regression test. There was a bug in which b was not correctly
# rescaled when a[0] was nonzero.
b = np.array([2, 8, 5])
a = np.array([1, 1, 8])
zi1 = lfilter_zi(b, a)
zi2 = lfilter_zi(2*b, 2*a)
assert_allclose(zi2, zi1, rtol=1e-12)
class TestFiltFilt(object):
filtfilt_kind = 'tf'
def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None,
method='pad', irlen=None):
if self.filtfilt_kind == 'tf':
b, a = zpk2tf(*zpk)
return filtfilt(b, a, x, axis, padtype, padlen, method, irlen)
elif self.filtfilt_kind == 'sos':
sos = zpk2sos(*zpk)
return sosfiltfilt(sos, x, axis, padtype, padlen)
def test_basic(self):
zpk = tf2zpk([1, 2, 3], [1, 2, 3])
out = self.filtfilt(zpk, np.arange(12))
assert_allclose(out, arange(12), atol=1e-11)
def test_sine(self):
rate = 2000
t = np.linspace(0, 1.0, rate + 1)
# A signal with low frequency and a high frequency.
xlow = np.sin(5 * 2 * np.pi * t)
xhigh = np.sin(250 * 2 * np.pi * t)
x = xlow + xhigh
zpk = butter(8, 0.125, output='zpk')
# r is the magnitude of the largest pole.
r = np.abs(zpk[1]).max()
eps = 1e-5
# n estimates the number of steps for the
# transient to decay by a factor of eps.
n = int(np.ceil(np.log(eps) / np.log(r)))
# High order lowpass filter...
y = self.filtfilt(zpk, x, padlen=n)
# Result should be just xlow.
err = np.abs(y - xlow).max()
assert_(err < 1e-4)
# A 2D case.
x2d = np.vstack([xlow, xlow + xhigh])
y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1)
assert_equal(y2d.shape, x2d.shape)
err = np.abs(y2d - xlow).max()
assert_(err < 1e-4)
# Use the previous result to check the use of the axis keyword.
# (Regression test for ticket #1620)
y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0)
assert_equal(y2d, y2dt.T)
def test_axis(self):
# Test the 'axis' keyword on a 3D array.
x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12)
zpk = butter(3, 0.125, output='zpk')
y0 = self.filtfilt(zpk, x, padlen=0, axis=0)
y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1)
assert_array_equal(y0, np.swapaxes(y1, 0, 1))
y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2)
assert_array_equal(y0, np.swapaxes(y2, 0, 2))
def test_acoeff(self):
if self.filtfilt_kind != 'tf':
return # only necessary for TF
# test for 'a' coefficient as single number
out = signal.filtfilt([.5, .5], 1, np.arange(10))
assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14)
def test_gust_simple(self):
if self.filtfilt_kind != 'tf':
pytest.skip('gust only implemented for TF systems')
# The input array has length 2. The exact solution for this case
# was computed "by hand".
x = np.array([1.0, 2.0])
b = np.array([0.5])
a = np.array([1.0, -0.5])
y, z1, z2 = _filtfilt_gust(b, a, x)
assert_allclose([z1[0], z2[0]],
[0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]])
assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1],
0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]])
def test_gust_scalars(self):
if self.filtfilt_kind != 'tf':
pytest.skip('gust only implemented for TF systems')
# The filter coefficients are both scalars, so the filter simply
# multiplies its input by b/a. When it is used in filtfilt, the
# factor is (b/a)**2.
x = np.arange(12)
b = 3.0
a = 2.0
y = filtfilt(b, a, x, method="gust")
expected = (b/a)**2 * x
assert_allclose(y, expected)
class TestSOSFiltFilt(TestFiltFilt):
filtfilt_kind = 'sos'
def test_equivalence(self):
"""Test equivalence between sosfiltfilt and filtfilt"""
x = np.random.RandomState(0).randn(1000)
for order in range(1, 6):
zpk = signal.butter(order, 0.35, output='zpk')
b, a = zpk2tf(*zpk)
sos = zpk2sos(*zpk)
y = filtfilt(b, a, x)
y_sos = sosfiltfilt(sos, x)
assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order)
def filtfilt_gust_opt(b, a, x):
"""
An alternative implementation of filtfilt with Gustafsson edges.
This function computes the same result as
`scipy.signal.signaltools._filtfilt_gust`, but only 1-d arrays
are accepted. The problem is solved using `fmin` from `scipy.optimize`.
`_filtfilt_gust` is significanly faster than this implementation.
"""
def filtfilt_gust_opt_func(ics, b, a, x):
"""Objective function used in filtfilt_gust_opt."""
m = max(len(a), len(b)) - 1
z0f = ics[:m]
z0b = ics[m:]
y_f = lfilter(b, a, x, zi=z0f)[0]
y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1]
y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1]
y_bf = lfilter(b, a, y_b, zi=z0f)[0]
value = np.sum((y_fb - y_bf)**2)
return value
m = max(len(a), len(b)) - 1
zi = lfilter_zi(b, a)
ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi))
result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x),
xtol=1e-10, ftol=1e-12,
maxfun=10000, maxiter=10000,
full_output=True, disp=False)
opt, fopt, niter, funcalls, warnflag = result
if warnflag > 0:
raise RuntimeError("minimization failed in filtfilt_gust_opt: "
"warnflag=%d" % warnflag)
z0f = opt[:m]
z0b = opt[m:]
# Apply the forward-backward filter using the computed initial
# conditions.
y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1]
y = lfilter(b, a, y_b, zi=z0f)[0]
return y, z0f, z0b
def check_filtfilt_gust(b, a, shape, axis, irlen=None):
# Generate x, the data to be filtered.
np.random.seed(123)
x = np.random.randn(*shape)
# Apply filtfilt to x. This is the main calculation to be checked.
y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen)
# Also call the private function so we can test the ICs.
yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
# filtfilt_gust_opt is an independent implementation that gives the
# expected result, but it only handles 1-d arrays, so use some looping
# and reshaping shenanigans to create the expected output arrays.
xx = np.swapaxes(x, axis, -1)
out_shape = xx.shape[:-1]
yo = np.empty_like(xx)
m = max(len(a), len(b)) - 1
zo1 = np.empty(out_shape + (m,))
zo2 = np.empty(out_shape + (m,))
for indx in product(*[range(d) for d in out_shape]):
yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx])
yo = np.swapaxes(yo, -1, axis)
zo1 = np.swapaxes(zo1, -1, axis)
zo2 = np.swapaxes(zo2, -1, axis)
assert_allclose(y, yo, rtol=1e-9, atol=1e-10)
assert_allclose(yg, yo, rtol=1e-9, atol=1e-10)
assert_allclose(zg1, zo1, rtol=1e-9, atol=1e-10)
assert_allclose(zg2, zo2, rtol=1e-9, atol=1e-10)
def test_choose_conv_method():
for mode in ['valid', 'same', 'full']:
for ndims in [1, 2]:
n, k, true_method = 8, 6, 'direct'
x = np.random.randn(*((n,) * ndims))
h = np.random.randn(*((k,) * ndims))
method = choose_conv_method(x, h, mode=mode)
assert_equal(method, true_method)
method_try, times = choose_conv_method(x, h, mode=mode, measure=True)
assert_(method_try in {'fft', 'direct'})
assert_(type(times) is dict)
assert_('fft' in times.keys() and 'direct' in times.keys())
n = 10
for not_fft_conv_supp in ["complex256", "complex192"]:
if hasattr(np, not_fft_conv_supp):
x = np.ones(n, dtype=not_fft_conv_supp)
h = x.copy()
assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
x = np.array([2**51], dtype=np.int64)
h = x.copy()
assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
x = [Decimal(3), Decimal(2)]
h = [Decimal(1), Decimal(4)]
assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
def test_filtfilt_gust():
# Design a filter.
z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk')
# Find the approximate impulse response length of the filter.
eps = 1e-10
r = np.max(np.abs(p))
approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
np.random.seed(123)
b, a = zpk2tf(z, p, k)
for irlen in [None, approx_impulse_len]:
signal_len = 5 * approx_impulse_len
# 1-d test case
check_filtfilt_gust(b, a, (signal_len,), 0, irlen)
# 3-d test case; test each axis.
for axis in range(3):
shape = [2, 2, 2]
shape[axis] = signal_len
check_filtfilt_gust(b, a, shape, axis, irlen)
# Test case with length less than 2*approx_impulse_len.
# In this case, `filtfilt_gust` should behave the same as if
# `irlen=None` was given.
length = 2*approx_impulse_len - 50
check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len)
class TestDecimate(object):
def test_bad_args(self):
x = np.arange(12)
assert_raises(TypeError, signal.decimate, x, q=0.5, n=1)
assert_raises(TypeError, signal.decimate, x, q=2, n=0.5)
def test_basic_IIR(self):
x = np.arange(12)
y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round()
assert_array_equal(y, x[::2])
def test_basic_FIR(self):
x = np.arange(12)
y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round()
assert_array_equal(y, x[::2])
def test_shape(self):
# Regression test for ticket #1480.
z = np.zeros((30, 30))
d0 = signal.decimate(z, 2, axis=0, zero_phase=False)
assert_equal(d0.shape, (15, 30))
d1 = signal.decimate(z, 2, axis=1, zero_phase=False)
assert_equal(d1.shape, (30, 15))
def test_phaseshift_FIR(self):
with suppress_warnings() as sup:
sup.filter(BadCoefficients, "Badly conditioned filter")
self._test_phaseshift(method='fir', zero_phase=False)
def test_zero_phase_FIR(self):
with suppress_warnings() as sup:
sup.filter(BadCoefficients, "Badly conditioned filter")
self._test_phaseshift(method='fir', zero_phase=True)
def test_phaseshift_IIR(self):
self._test_phaseshift(method='iir', zero_phase=False)
def test_zero_phase_IIR(self):
self._test_phaseshift(method='iir', zero_phase=True)
def _test_phaseshift(self, method, zero_phase):
rate = 120
rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3
t_tot = int(100) # Need to let antialiasing filters settle
t = np.arange(rate*t_tot+1) / float(rate)
# Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts
freqs = np.array(rates_to) * 0.8 / 2
d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t)
* signal.windows.tukey(t.size, 0.1))
for rate_to in rates_to:
q = rate // rate_to
t_to = np.arange(rate_to*t_tot+1) / float(rate_to)
d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to)
* signal.windows.tukey(t_to.size, 0.1))
# Set up downsampling filters, match v0.17 defaults
if method == 'fir':
n = 30
system = signal.dlti(signal.firwin(n + 1, 1. / q,
window='hamming'), 1.)
elif method == 'iir':
n = 8
wc = 0.8*np.pi/q
system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi))
# Calculate expected phase response, as unit complex vector
if zero_phase is False:
_, h_resps = signal.freqz(system.num, system.den,
freqs/rate*2*np.pi)
h_resps /= np.abs(h_resps)
else:
h_resps = np.ones_like(freqs)
y_resamps = signal.decimate(d.real, q, n, ftype=system,
zero_phase=zero_phase)
# Get phase from complex inner product, like CSD
h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1)
h_resamps /= np.abs(h_resamps)
subnyq = freqs < 0.5*rate_to
# Complex vectors should be aligned, only compare below nyquist
assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0,
atol=1e-3, rtol=1e-3)
def test_auto_n(self):
# Test that our value of n is a reasonable choice (depends on
# the downsampling factor)
sfreq = 100.
n = 1000
t = np.arange(n) / sfreq
# will alias for decimations (>= 15)
x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t)
assert_allclose(np.linalg.norm(x), 1., rtol=1e-3)
x_out = signal.decimate(x, 30, ftype='fir')
assert_array_less(np.linalg.norm(x_out), 0.01)
class TestHilbert(object):
def test_bad_args(self):
x = np.array([1.0 + 0.0j])
assert_raises(ValueError, hilbert, x)
x = np.arange(8.0)
assert_raises(ValueError, hilbert, x, N=0)
def test_hilbert_theoretical(self):
# test cases by Ariel Rokem
decimal = 14
pi = np.pi
t = np.arange(0, 2 * pi, pi / 256)
a0 = np.sin(t)
a1 = np.cos(t)
a2 = np.sin(2 * t)
a3 = np.cos(2 * t)
a = np.vstack([a0, a1, a2, a3])
h = hilbert(a)
h_abs = np.abs(h)
h_angle = np.angle(h)
h_real = np.real(h)
# The real part should be equal to the original signals:
assert_almost_equal(h_real, a, decimal)
# The absolute value should be one everywhere, for this input:
assert_almost_equal(h_abs, np.ones(a.shape), decimal)
# For the 'slow' sine - the phase should go from -pi/2 to pi/2 in
# the first 256 bins:
assert_almost_equal(h_angle[0, :256],
np.arange(-pi / 2, pi / 2, pi / 256),
decimal)
# For the 'slow' cosine - the phase should go from 0 to pi in the
# same interval:
assert_almost_equal(
h_angle[1, :256], np.arange(0, pi, pi / 256), decimal)
# The 'fast' sine should make this phase transition in half the time:
assert_almost_equal(h_angle[2, :128],
np.arange(-pi / 2, pi / 2, pi / 128),
decimal)
# Ditto for the 'fast' cosine:
assert_almost_equal(
h_angle[3, :128], np.arange(0, pi, pi / 128), decimal)
# The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia
assert_almost_equal(h[1].imag, a0, decimal)
def test_hilbert_axisN(self):
# tests for axis and N arguments
a = np.arange(18).reshape(3, 6)
# test axis
aa = hilbert(a, axis=-1)
assert_equal(hilbert(a.T, axis=0), aa.T)
# test 1d
assert_almost_equal(hilbert(a[0]), aa[0], 14)
# test N
aan = hilbert(a, N=20, axis=-1)
assert_equal(aan.shape, [3, 20])
assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3])
# the next test is just a regression test,
# no idea whether numbers make sense
a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j,
1.000000000000000e+00 - 2.047794505137069j,
1.999999999999999e+00 - 2.244055555687583j,
3.000000000000000e+00 - 1.262750302935009j,
4.000000000000000e+00 - 1.066489252384493j,
5.000000000000000e+00 + 2.918022706971047j,
8.881784197001253e-17 + 3.845658908989067j,
-9.444121133484362e-17 + 0.985044202202061j,
-1.776356839400251e-16 + 1.332257797702019j,
-3.996802888650564e-16 + 0.501905089898885j,
1.332267629550188e-16 + 0.668696078880782j,
-1.192678053963799e-16 + 0.235487067862679j,
-1.776356839400251e-16 + 0.286439612812121j,
3.108624468950438e-16 + 0.031676888064907j,
1.332267629550188e-16 - 0.019275656884536j,
-2.360035624836702e-16 - 0.1652588660287j,
0.000000000000000e+00 - 0.332049855010597j,
3.552713678800501e-16 - 0.403810179797771j,
8.881784197001253e-17 - 0.751023775297729j,
9.444121133484362e-17 - 0.79252210110103j])
assert_almost_equal(aan[0], a0hilb, 14, 'N regression')
class TestHilbert2(object):
def test_bad_args(self):
# x must be real.
x = np.array([[1.0 + 0.0j]])
assert_raises(ValueError, hilbert2, x)
# x must be rank 2.
x = np.arange(24).reshape(2, 3, 4)
assert_raises(ValueError, hilbert2, x)
# Bad value for N.
x = np.arange(16).reshape(4, 4)
assert_raises(ValueError, hilbert2, x, N=0)
assert_raises(ValueError, hilbert2, x, N=(2, 0))
assert_raises(ValueError, hilbert2, x, N=(2,))
class TestPartialFractionExpansion(object):
def test_invresz_one_coefficient_bug(self):
# Regression test for issue in gh-4646.
r = [1]
p = [2]
k = [0]
a_expected = [1.0, 0.0]
b_expected = [1.0, -2.0]
a_observed, b_observed = invresz(r, p, k)
assert_allclose(a_observed, a_expected)
assert_allclose(b_observed, b_expected)
def test_invres_distinct_roots(self):
# This test was inspired by github issue 2496.
r = [3 / 10, -1 / 6, -2 / 15]
p = [0, -2, -5]
k = []
a_expected = [1, 3]
b_expected = [1, 7, 10, 0]
a_observed, b_observed = invres(r, p, k)
assert_allclose(a_observed, a_expected)
assert_allclose(b_observed, b_expected)
rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum')
# With the default tolerance, the rtype does not matter
# for this example.
for rtype in rtypes:
a_observed, b_observed = invres(r, p, k, rtype=rtype)
assert_allclose(a_observed, a_expected)
assert_allclose(b_observed, b_expected)
# With unrealistically large tolerances, repeated roots may be inferred
# and the rtype comes into play.
ridiculous_tolerance = 1e10
for rtype in rtypes:
a, b = invres(r, p, k, tol=ridiculous_tolerance, rtype=rtype)
def test_invres_repeated_roots(self):
r = [3 / 20, -7 / 36, -1 / 6, 2 / 45]
p = [0, -2, -2, -5]
k = []
a_expected = [1, 3]
b_expected = [1, 9, 24, 20, 0]
rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum')
for rtype in rtypes:
a_observed, b_observed = invres(r, p, k, rtype=rtype)
assert_allclose(a_observed, a_expected)
assert_allclose(b_observed, b_expected)
def test_invres_bad_rtype(self):
r = [3 / 20, -7 / 36, -1 / 6, 2 / 45]
p = [0, -2, -2, -5]
k = []
assert_raises(ValueError, invres, r, p, k, rtype='median')
class TestVectorstrength(object):
def test_single_1dperiod(self):
events = np.array([.5])
period = 5.
targ_strength = 1.
targ_phase = .1
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_single_2dperiod(self):
events = np.array([.5])
period = [1, 2, 5.]
targ_strength = [1.] * 3
targ_phase = np.array([.5, .25, .1])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_array_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_equal_1dperiod(self):
events = np.array([.25, .25, .25, .25, .25, .25])
period = 2
targ_strength = 1.
targ_phase = .125
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_equal_2dperiod(self):
events = np.array([.25, .25, .25, .25, .25, .25])
period = [1, 2, ]
targ_strength = [1.] * 2
targ_phase = np.array([.25, .125])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_spaced_1dperiod(self):
events = np.array([.1, 1.1, 2.1, 4.1, 10.1])
period = 1
targ_strength = 1.
targ_phase = .1
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_spaced_2dperiod(self):
events = np.array([.1, 1.1, 2.1, 4.1, 10.1])
period = [1, .5]
targ_strength = [1.] * 2
targ_phase = np.array([.1, .2])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_partial_1dperiod(self):
events = np.array([.25, .5, .75])
period = 1
targ_strength = 1. / 3.
targ_phase = .5
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_partial_2dperiod(self):
events = np.array([.25, .5, .75])
period = [1., 1., 1., 1.]
targ_strength = [1. / 3.] * 4
targ_phase = np.array([.5, .5, .5, .5])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_opposite_1dperiod(self):
events = np.array([0, .25, .5, .75])
period = 1.
targ_strength = 0
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
def test_opposite_2dperiod(self):
events = np.array([0, .25, .5, .75])
period = [1.] * 10
targ_strength = [0.] * 10
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
def test_2d_events_ValueError(self):
events = np.array([[1, 2]])
period = 1.
assert_raises(ValueError, vectorstrength, events, period)
def test_2d_period_ValueError(self):
events = 1.
period = np.array([[1]])
assert_raises(ValueError, vectorstrength, events, period)
def test_zero_period_ValueError(self):
events = 1.
period = 0
assert_raises(ValueError, vectorstrength, events, period)
def test_negative_period_ValueError(self):
events = 1.
period = -1
assert_raises(ValueError, vectorstrength, events, period)
class TestSOSFilt(object):
# For sosfilt we only test a single datatype. Since sosfilt wraps
# to lfilter under the hood, it's hopefully good enough to ensure
# lfilter is extensively tested.
dt = np.float64
# The test_rank* tests are pulled from _TestLinearFilter
def test_rank1(self):
x = np.linspace(0, 5, 6).astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, -0.5]).astype(self.dt)
# Test simple IIR
y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt)
assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r)
# Test simple FIR
b = np.array([1, 1]).astype(self.dt)
# NOTE: This was changed (rel. to TestLinear...) to add a pole @zero:
a = np.array([1, 0]).astype(self.dt)
y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt)
assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r)
b = [1, 1, 0]
a = [1, 0, 0]
x = np.ones(8)
sos = np.concatenate((b, a))
sos.shape = (1, 6)
y = sosfilt(sos, x)
assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2])
def test_rank2(self):
shape = (4, 3)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
x = x.astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]],
dtype=self.dt)
y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12],
[18, -16, 18]], dtype=self.dt)
y = sosfilt(tf2sos(b, a), x, axis=0)
assert_array_almost_equal(y_r2_a0, y)
y = sosfilt(tf2sos(b, a), x, axis=1)
assert_array_almost_equal(y_r2_a1, y)
def test_rank3(self):
shape = (4, 3, 2)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
# Test last axis
y = sosfilt(tf2sos(b, a), x)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j]))
def test_initial_conditions(self):
b1, a1 = signal.butter(2, 0.25, 'low')
b2, a2 = signal.butter(2, 0.75, 'low')
b3, a3 = signal.butter(2, 0.75, 'low')
b = np.convolve(np.convolve(b1, b2), b3)
a = np.convolve(np.convolve(a1, a2), a3)
sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3]))
x = np.random.rand(50)
# Stopping filtering and continuing
y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6))
y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]]
assert_allclose(y_true, lfilter(b, a, x))
y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2)))
y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]]
assert_allclose(y_true, y_sos)
# Use a step function
zi = sosfilt_zi(sos)
x = np.ones(8)
y, zf = sosfilt(sos, x, zi=zi)
assert_allclose(y, np.ones(8))
assert_allclose(zf, zi)
# Initial condition shape matching
x.shape = (1, 1) + x.shape # 3D
assert_raises(ValueError, sosfilt, sos, x, zi=zi)
zi_nd = zi.copy()
zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1])
assert_raises(ValueError, sosfilt, sos, x,
zi=zi_nd[:, :, :, [0, 1, 1]])
y, zf = sosfilt(sos, x, zi=zi_nd)
assert_allclose(y[0, 0], np.ones(8))
assert_allclose(zf[:, 0, 0, :], zi)
def test_initial_conditions_3d_axis1(self):
# Test the use of zi when sosfilt is applied to axis 1 of a 3-d input.
# Input array is x.
x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3))
# Design a filter in ZPK format and convert to SOS
zpk = signal.butter(6, 0.35, output='zpk')
sos = zpk2sos(*zpk)
nsections = sos.shape[0]
# Filter along this axis.
axis = 1
# Initial conditions, all zeros.
shp = list(x.shape)
shp[axis] = 2
shp = [nsections] + shp
z0 = np.zeros(shp)
# Apply the filter to x.
yf, zf = sosfilt(sos, x, axis=axis, zi=z0)
# Apply the filter to x in two stages.
y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0)
y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1)
# y should equal yf, and z2 should equal zf.
y = np.concatenate((y1, y2), axis=axis)
assert_allclose(y, yf, rtol=1e-10, atol=1e-13)
assert_allclose(z2, zf, rtol=1e-10, atol=1e-13)
# let's try the "step" initial condition
zi = sosfilt_zi(sos)
zi.shape = [nsections, 1, 2, 1]
zi = zi * x[:, 0:1, :]
y = sosfilt(sos, x, axis=axis, zi=zi)[0]
# check it against the TF form
b, a = zpk2tf(*zpk)
zi = lfilter_zi(b, a)
zi.shape = [1, zi.size, 1]
zi = zi * x[:, 0:1, :]
y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0]
assert_allclose(y, y_tf, rtol=1e-10, atol=1e-13)
def test_bad_zi_shape(self):
# The shape of zi is checked before using any values in the
# arguments, so np.empty is fine for creating the arguments.
x = np.empty((3, 15, 3))
sos = np.empty((4, 6))
zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3)
assert_raises(ValueError, sosfilt, sos, x, zi=zi, axis=1)
def test_sosfilt_zi(self):
sos = signal.butter(6, 0.2, output='sos')
zi = sosfilt_zi(sos)
y, zf = sosfilt(sos, np.ones(40), zi=zi)
assert_allclose(zf, zi, rtol=1e-13)
# Expected steady state value of the step response of this filter:
ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1))
assert_allclose(y, ss, rtol=1e-13)
class TestDeconvolve(object):
def test_basic(self):
# From docstring example
original = [0, 1, 0, 0, 1, 1, 0, 0]
impulse_response = [2, 1]
recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0]
recovered, remainder = signal.deconvolve(recorded, impulse_response)
assert_allclose(recovered, original)
class TestDetrend(object):
def test_basic(self):
detrended = detrend(array([1, 2, 3]))
detrended_exact = array([0, 0, 0])
assert_array_almost_equal(detrended, detrended_exact)
def test_copy(self):
x = array([1, 1.2, 1.5, 1.6, 2.4])
copy_array = detrend(x, overwrite_data=False)
inplace = detrend(x, overwrite_data=True)
assert_array_almost_equal(copy_array, inplace)
| bsd-3-clause | -3,161,383,540,988,734,500 | 37.089404 | 98 | 0.508061 | false |
jacobmorzinski/mailusage | gssapisasl.py | 1 | 5798 | #!/usr/bin/env python
# https://tools.ietf.org/html/rfc4752
import base64
import kerberos
AUTH_GSS_CONTINUE = kerberos.AUTH_GSS_CONTINUE # 0
AUTH_GSS_COMPLETE = kerberos.AUTH_GSS_COMPLETE # 1
# Bit-masks for SASL security layers
GSS_AUTH_P_NONE = 1
GSS_AUTH_P_INTEGRITY = 2
GSS_AUTH_P_PRIVACY = 4
STATE_ONE = 1 # initialize context
STATE_TWO = 2 # decide protection+username
class GSSAPI_SASL(object):
'''Use it like this:
host = 'imap.server.example.com'
user = 'username'
with gssapisasl.GSSAPI_SASL('imap@{}'.format(host), user) as gs:
tag,data = m.authenticate('gssapi', gs.callback)
'''
service = ''
username = ''
context = None
state = STATE_ONE
def __init__(self, service, username):
'''service is in the form service@host
for example [email protected]
username will be passed to the server'''
self.service = service
self.username = username
def __enter__(self):
'''Initialize the GSS context.'''
rc,ctx = kerberos.authGSSClientInit(self.service)
if (rc != AUTH_GSS_CONTINUE and rc != AUTH_GSS_COMPLETE):
raise Exception("Bad GSSAPI return code: {}".format(rc))
self.context = ctx
return self
def __exit__(self, exc_type, exc_value, traceback):
'''Clean up the GSS context.'''
rc = kerberos.authGSSClientClean(self.context)
if (rc != AUTH_GSS_CONTINUE and rc != AUTH_GSS_COMPLETE):
raise Exception("Bad GSSAPI return code: {}".format(rc))
self.context = None
def callback(self,response):
'''Callback for use with imaplib.authenticate(mech,callback)
Will be repeatedly called with data from server,
and its return data passed back to server.
'''
response = "".join(str(response).encode('base64').splitlines())
## print "Entering callback with response={}".format(response)
ctx = self.context
if ctx is None:
raise Exception("GSS context is None.")
# GSSAPI SASL: two states
# First negotiate GSS security context
# Then negotiate security protection layer
if (self.state == STATE_ONE):
# Negotiating security context
rc = kerberos.authGSSClientStep(ctx, response)
if (rc != AUTH_GSS_CONTINUE and rc != AUTH_GSS_COMPLETE):
raise Exception("Bad GSSAPI return code: {}".format(rc))
elif (rc == AUTH_GSS_COMPLETE):
# -> State transition
self.state = STATE_TWO
payload = kerberos.authGSSClientResponse(ctx)
elif (self.state == STATE_TWO):
# Negotiating protection layer
rc = kerberos.authGSSClientUnwrap(ctx, response)
if (rc != AUTH_GSS_CONTINUE and rc != AUTH_GSS_COMPLETE):
raise Exception("Bad GSSAPI return code: {}".format(rc))
data = kerberos.authGSSClientResponse(ctx)
# At this point, the protocol says we should unwrap a
# security mask from the leading bytes of the decoded
# data. However we can't, because the C code in
# kerberosgss.c forces GSS_AUTH_P_NONE and also does
# not allow setting conf_flag in the wrap.
### Stuff we should do, but can't ###
# bytestring = base64.b64decode(data)
# bytes = struct.unpack('4B', bytestring)
# bufsiz = ((bytes[1] << 8) + bytes[2] << 8) + bytes[3]
# security_mask = bytes[0]
# for layer in 4,2,1:
# then choose a desired_security layer from security_mask
# bytestring = struct.pack('4B', desired_security, *bytes[1:])
# then wrap with conf_flag suitable for the desired_security
### End stuff ###
# So instead of unwrapping a security mask, we just
# assert that we use GSS_AUTH_P_NONE ('\x01')
bytestring = base64.b64decode(data)
newdata = '\x01' + bytestring[1:]
newdata = str(newdata).encode('base64')
rc = kerberos.authGSSClientWrap(ctx, newdata, self.username)
if (rc != AUTH_GSS_CONTINUE and rc != AUTH_GSS_COMPLETE):
raise Exception("Bad GSSAPI return code: {}".format(rc))
payload = kerberos.authGSSClientResponse(ctx)
else:
raise Exception("Unexpected state: {}".format(self.state))
if payload is None:
payload = ''
## print "Leaving callback with payload={}".format(payload)
payload = str(payload).decode('base64')
return payload
# If you were doing it by hand in a REPL, it might look like:
#
# m = imaplib.IMAP4_SSL('imap.exchange.mit.edu')
# service = '[email protected]'
# m.sslobj.send('x authenticate gssapi\r\n')
# ps = m.sslobj.recv() ; ps
# rc,vc = kerberos.authGSSClientInit(service)
# rc = kerberos.authGSSClientStep(vc,"") # is it 1 or 0 ? it is 0
# pc = kerberos.authGSSClientResponse(vc) ; pc
# m.sslobj.send((pc or '') + '\r\n')
# ps = m.sslobj.recv() ; ps
# ps = ps[2:].strip()
# rc = kerberos.authGSSClientStep(vc,ps) ; rc # is it 1 or 0 ? it is 1
# pc = kerberos.authGSSClientResponse(vc) ; pc
# m.sslobj.send((pc or '') + '\r\n')
# ps = m.sslobj.recv() ; ps
# ps = ps[2:].strip()
# rc = kerberos.authGSSClientUnwrap(vc,ps) ; rc
# pc = kerberos.authGSSClientResponse(vc) ; pc
### flags = base64.b64decode(pc)
# rc = kerberos.authGSSClientWrap(vc, pc, 'jmorzins') ; rc
# pc = kerberos.authGSSClientResponse(vc) ; pc
# m.sslobj.send((pc or '') + '\r\n')
# ps = m.sslobj.recv() ; ps
# rc = authGSSClientClean(vc)
# m.logout()
| mit | -6,011,563,831,559,920,000 | 35.465409 | 75 | 0.593825 | false |
TomBaxter/waterbutler | tests/core/test_metadata.py | 3 | 4228 | import hashlib
from tests import utils
class TestBaseMetadata:
def test_file_metadata(self):
file_metadata = utils.MockFileMetadata()
assert file_metadata.is_file is True
assert file_metadata.is_folder is False
def test_folder_metadata(self):
folder_metadata = utils.MockFolderMetadata()
assert folder_metadata.is_file is False
assert folder_metadata.is_folder is True
def test_file_json_api_serialize(self):
file_metadata = utils.MockFileMetadata()
serialized = file_metadata.json_api_serialized('n0d3z')
link_suffix = '/v1/resources/n0d3z/providers/MockProvider/Foo.name'
etag = hashlib.sha256('{}::{}'.format('MockProvider', 'etag').encode('utf-8')).hexdigest()
assert serialized['id'] == 'MockProvider/Foo.name'
assert serialized['type'] == 'files'
assert serialized['attributes'] == {
'extra': {},
'kind': 'file',
'name': 'Foo.name',
'path': '/Foo.name',
'provider': 'MockProvider',
'materialized': '/Foo.name',
'etag': etag,
'contentType': 'application/octet-stream',
'modified': 'never',
'modified_utc': 'never',
'created_utc': 'always',
'size': 1337,
'resource': 'n0d3z',
}
assert 'new_folder' not in serialized['links']
assert serialized['links']['move'].endswith(link_suffix)
assert serialized['links']['upload'].endswith(link_suffix + '?kind=file')
assert serialized['links']['download'].endswith(link_suffix)
assert serialized['links']['delete'].endswith(link_suffix)
def test_folder_json_api_serialize(self):
folder_metadata = utils.MockFolderMetadata()
serialized = folder_metadata.json_api_serialized('n0d3z')
link_suffix = '/v1/resources/n0d3z/providers/MockProvider/Bar/'
etag = hashlib.sha256('{}::{}'.format('MockProvider', 'etag').encode('utf-8')).hexdigest()
assert serialized['id'] == 'MockProvider/Bar/'
assert serialized['type'] == 'files'
assert serialized['attributes'] == {
'extra': {},
'kind': 'folder',
'name': 'Bar',
'path': '/Bar/',
'provider': 'MockProvider',
'materialized': '/Bar/',
'etag': etag,
'size': None,
'resource': 'n0d3z',
}
assert serialized['links']['new_folder'].endswith(link_suffix + '?kind=folder')
assert serialized['links']['move'].endswith(link_suffix)
assert serialized['links']['upload'].endswith(link_suffix + '?kind=file')
assert 'download' not in serialized['links']
assert serialized['links']['delete'].endswith(link_suffix)
def test_folder_json_api_size_serialize(self):
folder_metadata = utils.MockFolderMetadata()
folder_metadata.children = [utils.MockFileMetadata()]
serialized = folder_metadata.json_api_serialized('n0d3z')
child = serialized['attributes']['children'][0]
etag = hashlib.sha256('{}::{}'.format('MockProvider', 'etag').encode('utf-8')).hexdigest()
assert len(serialized['attributes']['children']) == 1
assert child == {
'extra': {},
'kind': 'file',
'name': 'Foo.name',
'path': '/Foo.name',
'provider': 'MockProvider',
'materialized': '/Foo.name',
'etag': etag,
'contentType': 'application/octet-stream',
'modified': 'never',
'modified_utc': 'never',
'created_utc': 'always',
'size': 1337,
}
def test_file_revision_json_api_serialize(self):
file_revision_metadata = utils.MockFileRevisionMetadata()
serialized = file_revision_metadata.json_api_serialized()
assert serialized['id'] == 1
assert serialized['type'] == 'file_versions'
assert serialized['attributes'] == {
'extra': {},
'version': 1,
'modified': 'never',
'modified_utc': 'never',
'versionIdentifier': 'versions',
}
| apache-2.0 | 2,801,363,890,221,615,000 | 37.788991 | 98 | 0.567408 | false |
briancoutinho0905/2dsampling | src/arch/x86/isa/insts/x87/data_transfer_and_conversion/convert_and_load_or_store_integer.py | 29 | 2493 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# Copyright (c) 2015 Advanced Micro Devices, Inc.
#
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
# Steve Reinhardt
microcode = '''
# fild common case
def macroop FILD_M {
ldifp87 ufp1, seg, sib, disp
movfp st(-1), ufp1, spm=-1
};
# fild with RIP-relative addressing
def macroop FILD_P {
rdip t7
ldifp87 ufp1, seg, riprel, disp
movfp st(-1), ufp1, spm=-1
};
# FIST
# FISTP
# FISTTP
'''
| bsd-3-clause | -2,986,842,188,052,121,600 | 41.982759 | 72 | 0.771761 | false |
jeremyfix/pylearn2 | pylearn2/datasets/preprocessing.py | 5 | 58496 | """
Functionality for preprocessing Datasets.
"""
__authors__ = "Ian Goodfellow, David Warde-Farley, Guillaume Desjardins, " \
"and Mehdi Mirza"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow", "David Warde-Farley", "Guillaume Desjardins",
"Mehdi Mirza"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import copy
import logging
import time
import warnings
import os
import numpy
from theano.compat.six.moves import xrange
import scipy
try:
from scipy import linalg
except ImportError:
warnings.warn("Could not import scipy.linalg")
import theano
from theano import function, tensor
from pylearn2.blocks import Block
from pylearn2.linear.conv2d import Conv2D
from pylearn2.space import Conv2DSpace, VectorSpace
from pylearn2.expr.preprocessing import global_contrast_normalize
from pylearn2.utils.insert_along_axis import insert_columns
from pylearn2.utils import sharedX
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.rng import make_np_rng
from pylearn2.utils import contains_nan
log = logging.getLogger(__name__)
convert_axes = Conv2DSpace.convert_numpy
class Preprocessor(object):
"""
Abstract class.
An object that can preprocess a dataset.
Preprocessing a dataset implies changing the data that
a dataset actually stores. This can be useful to save
memory--if you know you are always going to access only
the same processed version of the dataset, it is better
to process it once and discard the original.
Preprocessors are capable of modifying many aspects of
a dataset. For example, they can change the way that it
converts between different formats of data. They can
change the number of examples that a dataset stores.
In other words, preprocessors can do a lot more than
just example-wise transformations of the examples stored
in the dataset.
"""
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
Parameters
----------
dataset : Dataset
The dataset to act on.
can_fit : bool
If True, the Preprocessor can adapt internal parameters
based on the contents of dataset. Otherwise it must not
fit any parameters, or must re-use old ones.
Subclasses should still have this default to False, so
that the behavior of the preprocessors is uniform.
Notes
-----
Typical usage:
.. code-block:: python
# Learn PCA preprocessing and apply it to the training set
my_pca_preprocessor.apply(training_set, can_fit = True)
# Now apply the same transformation to the test set
my_pca_preprocessor.apply(test_set, can_fit = False)
This method must take a dataset, rather than a numpy ndarray, for a
variety of reasons:
- Preprocessors should work on any dataset, and not all
datasets will store their data as ndarrays.
- Preprocessors often need to change a dataset's
metadata. For example, suppose you have a
DenseDesignMatrix dataset of images. If you implement
a fovea Preprocessor that reduces the dimensionality
of images by sampling them finely near the center and
coarsely with blurring at the edges, then your
preprocessor will need to change the way that the
dataset converts example vectors to images for
visualization.
"""
raise NotImplementedError(str(type(self)) +
" does not implement an apply method.")
def invert(self):
"""
Do any necessary prep work to be able to support the "inverse" method
later. Default implementation is no-op.
"""
pass
class ExamplewisePreprocessor(Preprocessor):
"""
Abstract class.
A Preprocessor that restricts the actions it can do in its
apply method so that it could be implemented as a Block's
perform method.
In other words, this Preprocessor can't modify the Dataset's
metadata, etc.
TODO: can these things fit themselves in their apply method?
That seems like a difference from Block.
"""
def as_block(self):
raise NotImplementedError(str(type(self)) +
" does not implement as_block.")
class BlockPreprocessor(ExamplewisePreprocessor):
"""
An ExamplewisePreprocessor implemented by a Block.
Parameters
----------
block : WRITEME
"""
def __init__(self, block):
self.block = block
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
assert not can_fit
dataset.X = self.block.perform(dataset.X)
class Pipeline(Preprocessor):
"""
A Preprocessor that sequentially applies a list
of other Preprocessors.
Parameters
----------
items : WRITEME
"""
def __init__(self, items=None):
self.items = items if items is not None else []
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
for item in self.items:
item.apply(dataset, can_fit)
class ExtractGridPatches(Preprocessor):
"""
Converts a dataset of images into a dataset of patches extracted along a
regular grid from each image. The order of the images is
preserved.
Parameters
----------
patch_shape : WRITEME
patch_stride : WRITEME
"""
def __init__(self, patch_shape, patch_stride):
self.patch_shape = patch_shape
self.patch_stride = patch_stride
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
X = dataset.get_topological_view()
num_topological_dimensions = len(X.shape) - 2
if num_topological_dimensions != len(self.patch_shape):
raise ValueError("ExtractGridPatches with "
+ str(len(self.patch_shape))
+ " topological dimensions called on"
+ " dataset with " +
str(num_topological_dimensions) + ".")
num_patches = X.shape[0]
max_strides = [X.shape[0] - 1]
for i in xrange(num_topological_dimensions):
patch_width = self.patch_shape[i]
data_width = X.shape[i + 1]
last_valid_coord = data_width - patch_width
if last_valid_coord < 0:
raise ValueError('On topological dimension ' + str(i) +
', the data has width ' + str(data_width) +
' but the requested patch width is ' +
str(patch_width))
stride = self.patch_stride[i]
if stride == 0:
max_stride_this_axis = 0
else:
max_stride_this_axis = last_valid_coord / stride
num_strides_this_axis = max_stride_this_axis + 1
max_strides.append(max_stride_this_axis)
num_patches *= num_strides_this_axis
# batch size
output_shape = [num_patches]
# topological dimensions
for dim in self.patch_shape:
output_shape.append(dim)
# number of channels
output_shape.append(X.shape[-1])
output = numpy.zeros(output_shape, dtype=X.dtype)
channel_slice = slice(0, X.shape[-1])
coords = [0] * (num_topological_dimensions + 1)
keep_going = True
i = 0
while keep_going:
args = [coords[0]]
for j in xrange(num_topological_dimensions):
coord = coords[j + 1] * self.patch_stride[j]
args.append(slice(coord, coord + self.patch_shape[j]))
args.append(channel_slice)
patch = X[args]
output[i, :] = patch
i += 1
# increment coordinates
j = 0
keep_going = False
while not keep_going:
if coords[-(j + 1)] < max_strides[-(j + 1)]:
coords[-(j + 1)] += 1
keep_going = True
else:
coords[-(j + 1)] = 0
if j == num_topological_dimensions:
break
j = j + 1
dataset.set_topological_view(output)
# fix lables
if dataset.y is not None:
dataset.y = numpy.repeat(dataset.y, num_patches / X.shape[0])
class ReassembleGridPatches(Preprocessor):
"""
Converts a dataset of patches into a dataset of full examples.
This is the inverse of ExtractGridPatches for patch_stride=patch_shape.
Parameters
----------
orig_shape : WRITEME
patch_shape : WRITEME
"""
def __init__(self, orig_shape, patch_shape):
self.patch_shape = patch_shape
self.orig_shape = orig_shape
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
patches = dataset.get_topological_view()
num_topological_dimensions = len(patches.shape) - 2
if num_topological_dimensions != len(self.patch_shape):
raise ValueError("ReassembleGridPatches with " +
str(len(self.patch_shape)) +
" topological dimensions called on dataset " +
" with " +
str(num_topological_dimensions) + ".")
num_patches = patches.shape[0]
num_examples = num_patches
for im_dim, patch_dim in zip(self.orig_shape, self.patch_shape):
if im_dim % patch_dim != 0:
raise Exception('Trying to assemble patches of shape ' +
str(self.patch_shape) + ' into images of ' +
'shape ' + str(self.orig_shape))
patches_this_dim = im_dim / patch_dim
if num_examples % patches_this_dim != 0:
raise Exception('Trying to re-assemble ' + str(num_patches) +
' patches of shape ' + str(self.patch_shape) +
' into images of shape ' + str(self.orig_shape)
)
num_examples /= patches_this_dim
# batch size
reassembled_shape = [num_examples]
# topological dimensions
for dim in self.orig_shape:
reassembled_shape.append(dim)
# number of channels
reassembled_shape.append(patches.shape[-1])
reassembled = numpy.zeros(reassembled_shape, dtype=patches.dtype)
channel_slice = slice(0, patches.shape[-1])
coords = [0] * (num_topological_dimensions + 1)
max_strides = [num_examples - 1]
for dim, pd in zip(self.orig_shape, self.patch_shape):
assert dim % pd == 0
max_strides.append(dim / pd - 1)
keep_going = True
i = 0
while keep_going:
args = [coords[0]]
for j in xrange(num_topological_dimensions):
coord = coords[j + 1]
args.append(slice(coord * self.patch_shape[j],
(coord + 1) * self.patch_shape[j]))
next_shape_coord = reassembled.shape[j + 1]
assert (coord + 1) * self.patch_shape[j] <= next_shape_coord
args.append(channel_slice)
try:
patch = patches[i, :]
except IndexError:
reraise_as(IndexError('Gave index of ' + str(i) +
', : into thing of shape ' +
str(patches.shape)))
reassembled[args] = patch
i += 1
j = 0
keep_going = False
while not keep_going:
if coords[-(j + 1)] < max_strides[-(j + 1)]:
coords[-(j + 1)] += 1
keep_going = True
else:
coords[-(j + 1)] = 0
if j == num_topological_dimensions:
break
j = j + 1
dataset.set_topological_view(reassembled)
# fix labels
if dataset.y is not None:
dataset.y = dataset.y[::patches.shape[0] / reassembled_shape[0]]
class ExtractPatches(Preprocessor):
"""
Converts an image dataset into a dataset of patches
extracted at random from the original dataset.
Parameters
----------
patch_shape : WRITEME
num_patches : WRITEME
rng : WRITEME
"""
def __init__(self, patch_shape, num_patches, rng=None):
self.patch_shape = patch_shape
self.num_patches = num_patches
self.start_rng = make_np_rng(copy.copy(rng),
[1, 2, 3],
which_method="randint")
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
rng = copy.copy(self.start_rng)
X = dataset.get_topological_view()
num_topological_dimensions = len(X.shape) - 2
if num_topological_dimensions != len(self.patch_shape):
raise ValueError("ExtractPatches with "
+ str(len(self.patch_shape))
+ " topological dimensions called on "
+ "dataset with "
+ str(num_topological_dimensions) + ".")
# batch size
output_shape = [self.num_patches]
# topological dimensions
for dim in self.patch_shape:
output_shape.append(dim)
# number of channels
output_shape.append(X.shape[-1])
output = numpy.zeros(output_shape, dtype=X.dtype)
channel_slice = slice(0, X.shape[-1])
for i in xrange(self.num_patches):
args = []
args.append(rng.randint(X.shape[0]))
for j in xrange(num_topological_dimensions):
max_coord = X.shape[j + 1] - self.patch_shape[j]
coord = rng.randint(max_coord + 1)
args.append(slice(coord, coord + self.patch_shape[j]))
args.append(channel_slice)
output[i, :] = X[args]
dataset.set_topological_view(output)
dataset.y = None
class ExamplewiseUnitNormBlock(Block):
"""
A block that takes n-tensors, with training examples indexed along
the first axis, and normalizes each example to lie on the unit
sphere.
Parameters
----------
input_space : WRITEME
"""
def __init__(self, input_space=None):
super(ExamplewiseUnitNormBlock, self).__init__()
self.input_space = input_space
def __call__(self, batch):
"""
.. todo::
WRITEME
"""
if self.input_space:
self.input_space.validate(batch)
squared_batch = batch ** 2
squared_norm = squared_batch.sum(axis=1)
norm = tensor.sqrt(squared_norm)
return batch / norm
def set_input_space(self, space):
"""
.. todo::
WRITEME
"""
self.input_space = space
def get_input_space(self):
"""
.. todo::
WRITEME
"""
if self.input_space is not None:
return self.input_space
raise ValueError("No input space was specified for this Block (%s). "
"You can call set_input_space to correct that." %
str(self))
def get_output_space(self):
"""
.. todo::
WRITEME
"""
return self.get_input_space()
class MakeUnitNorm(ExamplewisePreprocessor):
"""
.. todo::
WRITEME
"""
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
X = dataset.get_design_matrix()
X_norm = numpy.sqrt(numpy.sum(X ** 2, axis=1))
X /= X_norm[:, None]
dataset.set_design_matrix(X)
def as_block(self):
"""
.. todo::
WRITEME
"""
return ExamplewiseUnitNormBlock()
class ExamplewiseAddScaleTransform(Block):
"""
A block that encodes an per-feature addition/scaling transform.
The addition/scaling can be done in either order.
Parameters
----------
add : array_like or scalar, optional
Array or array-like object or scalar, to be added to each
training example by this Block.
multiply : array_like, optional
Array or array-like object or scalar, to be element-wise
multiplied with each training example by this Block.
multiply_first : bool, optional
Whether to perform the multiplication before the addition.
(default is False).
input_space : Space, optional
The input space describing the data
"""
def __init__(self, add=None, multiply=None, multiply_first=False,
input_space=None):
self._add = numpy.asarray(add)
self._multiply = numpy.asarray(multiply)
# TODO: put the constant somewhere sensible.
if multiply is not None:
self._has_zeros = numpy.any(abs(multiply) < 1e-14)
else:
self._has_zeros = False
self._multiply_first = multiply_first
self.input_space = input_space
def _multiply(self, batch):
"""
.. todo::
WRITEME
"""
if self._multiply is not None:
batch *= self._multiply
return batch
def _add(self, batch):
"""
.. todo::
WRITEME
"""
if self._add is not None:
batch += self._add
return batch
def __call__(self, batch):
"""
.. todo::
WRITEME
"""
if self.input_space:
self.input_space.validate(batch)
cur = batch
if self._multiply_first:
batch = self._add(self._multiply(batch))
else:
batch = self._multiply(self._add(batch))
return batch
def inverse(self):
"""
.. todo::
WRITEME
"""
if self._multiply is not None and self._has_zeros:
raise ZeroDivisionError("%s transformation not invertible "
"due to (near-) zeros in multiplicand" %
self.__class__.__name__)
else:
mult_inverse = self._multiply ** -1.
return self.__class__(add=-self._add, multiply=mult_inverse,
multiply_first=not self._multiply_first)
def set_input_space(self, space):
"""
.. todo::
WRITEME
"""
self.input_space = space
def get_input_space(self):
"""
.. todo::
WRITEME
"""
if self.input_space is not None:
return self.input_space
raise ValueError("No input space was specified for this Block (%s). "
"You can call set_input_space to correct that." %
str(self))
def get_output_space(self):
"""
.. todo::
WRITEME
"""
return self.get_input_space()
class RemoveMean(ExamplewisePreprocessor):
"""
Subtracts the mean along a given axis, or from every element
if `axis=None`.
Parameters
----------
axis : int or None, optional
Axis over which to take the mean, with the exact same
semantics as the `axis` parameter of `numpy.mean`.
"""
def __init__(self, axis=0):
self._axis = axis
self._mean = None
def apply(self, dataset, can_fit=True):
"""
.. todo::
WRITEME
"""
X = dataset.get_design_matrix()
if can_fit:
self._mean = X.mean(axis=self._axis)
else:
if self._mean is None:
raise ValueError("can_fit is False, but RemoveMean object "
"has no stored mean or standard deviation")
X -= self._mean
dataset.set_design_matrix(X)
def as_block(self):
"""
.. todo::
WRITEME
"""
if self._mean is None:
raise ValueError("can't convert %s to block without fitting"
% self.__class__.__name__)
return ExamplewiseAddScaleTransform(add=-self._mean)
class Standardize(ExamplewisePreprocessor):
"""
Subtracts the mean and divides by the standard deviation.
Parameters
----------
global_mean : bool, optional
If `True`, subtract the (scalar) mean over every element
in the design matrix. If `False`, subtract the mean from
each column (feature) separately. Default is `False`.
global_std : bool, optional
If `True`, after centering, divide by the (scalar) standard
deviation of every element in the design matrix. If `False`,
divide by the column-wise (per-feature) standard deviation.
Default is `False`.
std_eps : float, optional
Stabilization factor added to the standard deviations before
dividing, to prevent standard deviations very close to zero
from causing the feature values to blow up too much.
Default is `1e-4`.
"""
def __init__(self, global_mean=False, global_std=False, std_eps=1e-4):
self._global_mean = global_mean
self._global_std = global_std
self._std_eps = std_eps
self._mean = None
self._std = None
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
X = dataset.get_design_matrix()
if can_fit:
self._mean = X.mean() if self._global_mean else X.mean(axis=0)
self._std = X.std() if self._global_std else X.std(axis=0)
else:
if self._mean is None or self._std is None:
raise ValueError("can_fit is False, but Standardize object "
"has no stored mean or standard deviation")
new = (X - self._mean) / (self._std_eps + self._std)
dataset.set_design_matrix(new)
def as_block(self):
"""
.. todo::
WRITEME
"""
if self._mean is None or self._std is None:
raise ValueError("can't convert %s to block without fitting"
% self.__class__.__name__)
return ExamplewiseAddScaleTransform(add=-self._mean,
multiply=self._std ** -1)
class ColumnSubsetBlock(Block):
"""
.. todo::
WRITEME
"""
def __init__(self, columns, total):
self._columns = columns
self._total = total
def __call__(self, batch):
"""
.. todo::
WRITEME
"""
if batch.ndim != 2:
raise ValueError("Only two-dimensional tensors are supported")
return batch.dimshuffle(1, 0)[self._columns].dimshuffle(1, 0)
def inverse(self):
"""
.. todo::
WRITEME
"""
return ZeroColumnInsertBlock(self._columns, self._total)
def get_input_space(self):
"""
.. todo::
WRITEME
"""
return VectorSpace(dim=self._total)
def get_output_space(self):
"""
.. todo::
WRITEME
"""
return VectorSpace(dim=self._columns)
class ZeroColumnInsertBlock(Block):
def __init__(self, columns, total):
"""
.. todo::
WRITEME
"""
self._columns = columns
self._total = total
def __call__(self, batch):
"""
.. todo::
WRITEME
"""
if batch.ndim != 2:
raise ValueError("Only two-dimensional tensors are supported")
return insert_columns(batch, self._total, self._columns)
def inverse(self):
"""
.. todo::
WRITEME
"""
return ColumnSubsetBlock(self._columns, self._total)
def get_input_space(self):
"""
.. todo::
WRITEME
"""
return VectorSpace(dim=self._columns)
def get_output_space(self):
"""
.. todo::
WRITEME
"""
return VectorSpace(dim=self._total)
class RemoveZeroColumns(ExamplewisePreprocessor):
"""
.. todo::
WRITEME
"""
_eps = 1e-8
def __init__(self):
self._block = None
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
design_matrix = dataset.get_design_matrix()
mean = design_matrix.mean(axis=0)
var = design_matrix.var(axis=0)
columns, = numpy.where((var < self._eps) & (mean < self._eps))
self._block = ColumnSubsetBlock
def as_block(self):
"""
.. todo::
WRITEME
"""
if self._block is None:
raise ValueError("can't convert %s to block without fitting"
% self.__class__.__name__)
return self._block
class RemapInterval(ExamplewisePreprocessor):
"""
.. todo::
WRITEME
"""
# TODO: Implement as_block
def __init__(self, map_from, map_to):
assert map_from[0] < map_from[1] and len(map_from) == 2
assert map_to[0] < map_to[1] and len(map_to) == 2
self.map_from = [numpy.float(x) for x in map_from]
self.map_to = [numpy.float(x) for x in map_to]
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
X = dataset.get_design_matrix()
X = (X - self.map_from[0]) / numpy.diff(self.map_from)
X = X * numpy.diff(self.map_to) + self.map_to[0]
dataset.set_design_matrix(X)
class PCA_ViewConverter(object):
"""
.. todo::
WRITEME
Parameters
----------
to_pca : WRITEME
to_input : WRITEME
to_weights : WRITEME
orig_view_converter : WRITEME
"""
def __init__(self, to_pca, to_input, to_weights, orig_view_converter):
self.to_pca = to_pca
self.to_input = to_input
self.to_weights = to_weights
if orig_view_converter is None:
raise ValueError("It doesn't make any sense to make a PCA view "
"converter when there's no original view "
"converter to define a topology in the first "
"place.")
self.orig_view_converter = orig_view_converter
def view_shape(self):
"""
.. todo::
WRITEME
"""
return self.orig_view_converter.shape
def design_mat_to_topo_view(self, X):
"""
.. todo::
WRITEME
"""
to_input = self.to_input(X)
return self.orig_view_converter.design_mat_to_topo_view(to_input)
def design_mat_to_weights_view(self, X):
"""
.. todo::
WRITEME
"""
to_weights = self.to_weights(X)
return self.orig_view_converter.design_mat_to_weights_view(to_weights)
def topo_view_to_design_mat(self, V):
"""
.. todo::
WRITEME
"""
return self.to_pca(self.orig_view_converter.topo_view_to_design_mat(V))
def get_formatted_batch(self, batch, dspace):
"""
.. todo::
WRITEME
"""
if isinstance(dspace, VectorSpace):
# Return the batch in the original storage space
dspace.np_validate(batch)
return batch
else:
# Uncompress and go through the original view converter
to_input = self.to_input(batch)
return self.orig_view_converter.get_formatted_batch(to_input,
dspace)
class PCA(object):
"""
.. todo::
WRITEME
Parameters
----------
num_components : WRITEME
"""
def __init__(self, num_components):
self._num_components = num_components
self._pca = None
# TODO: Is storing these really necessary? This computation
# can't really be merged since we're basically creating the
# functions in apply(); I see no reason to keep these around.
self._input = tensor.matrix()
self._output = tensor.matrix()
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
if self._pca is None:
if not can_fit:
raise ValueError("can_fit is False, but PCA preprocessor "
"object has no fitted model stored")
from pylearn2 import pca
self._pca = pca.CovEigPCA(self._num_components)
self._pca.train(dataset.get_design_matrix())
self._transform_func = function([self._input],
self._pca(self._input))
self._invert_func = function([self._output],
self._pca.reconstruct(self._output))
self._convert_weights_func = function(
[self._output],
self._pca.reconstruct(self._output, add_mean=False)
)
orig_data = dataset.get_design_matrix()
dataset.set_design_matrix(
self._transform_func(dataset.get_design_matrix())
)
proc_data = dataset.get_design_matrix()
orig_var = orig_data.var(axis=0)
proc_var = proc_data.var(axis=0)
assert proc_var[0] > orig_var.max()
log.info('original variance: {0}'.format(orig_var.sum()))
log.info('processed variance: {0}'.format(proc_var.sum()))
if hasattr(dataset, 'view_converter'):
if dataset.view_converter is not None:
new_converter = PCA_ViewConverter(self._transform_func,
self._invert_func,
self._convert_weights_func,
dataset.view_converter)
dataset.view_converter = new_converter
class Downsample(object):
"""
Downsamples the topological view
Parameters
----------
sampling_factor : list or array
One element for each topological
dimension of the data
"""
def __init__(self, sampling_factor):
self.sampling_factor = sampling_factor
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
X = dataset.get_topological_view()
d = len(X.shape) - 2
assert d in [2, 3]
assert X.dtype == 'float32' or X.dtype == 'float64'
if d == 2:
X = X.reshape([X.shape[0], X.shape[1], X.shape[2], 1, X.shape[3]])
kernel_size = 1
kernel_shape = [X.shape[-1]]
for factor in self.sampling_factor:
kernel_size *= factor
kernel_shape.append(factor)
if d == 2:
kernel_shape.append(1)
kernel_shape.append(X.shape[-1])
kernel_value = 1. / float(kernel_size)
kernel = numpy.zeros(kernel_shape, dtype=X.dtype)
for i in xrange(X.shape[-1]):
kernel[i, :, :, :, i] = kernel_value
from theano.tensor.nnet.Conv3D import conv3D
X_var = tensor.TensorType(broadcastable=[s == 1 for s in X.shape],
dtype=X.dtype)()
downsampled = conv3D(X_var, kernel, numpy.zeros(X.shape[-1], X.dtype),
kernel_shape[1:-1])
f = function([X_var], downsampled)
X = f(X)
if d == 2:
X = X.reshape([X.shape[0], X.shape[1], X.shape[2], X.shape[4]])
dataset.set_topological_view(X)
class GlobalContrastNormalization(Preprocessor):
"""
.. todo::
WRITEME properly
See the docstring for `global_contrast_normalize` in
`pylearn2.expr.preprocessing`.
Parameters
----------
batch_size : int or None, optional
If specified, read, apply and write the transformed data
in batches no larger than `batch_size`.
sqrt_bias : float, optional
Defaults to 0 if nothing is specified
use_std : bool, optional
Defaults to False if nothing is specified
"""
def __init__(self, subtract_mean=True,
scale=1., sqrt_bias=0., use_std=False, min_divisor=1e-8,
batch_size=None):
self._subtract_mean = subtract_mean
self._use_std = use_std
self._sqrt_bias = sqrt_bias
self._scale = scale
self._min_divisor = min_divisor
if batch_size is not None:
batch_size = int(batch_size)
assert batch_size > 0, "batch_size must be positive"
self._batch_size = batch_size
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
if self._batch_size is None:
X = global_contrast_normalize(dataset.get_design_matrix(),
scale=self._scale,
subtract_mean=self._subtract_mean,
use_std=self._use_std,
sqrt_bias=self._sqrt_bias,
min_divisor=self._min_divisor)
dataset.set_design_matrix(X)
else:
data = dataset.get_design_matrix()
data_size = data.shape[0]
last = (numpy.floor(data_size / float(self._batch_size)) *
self._batch_size)
for i in xrange(0, data_size, self._batch_size):
stop = i + self._batch_size
log.info("GCN processing data from %d to %d" % (i, stop))
X = data[i:stop]
X = global_contrast_normalize(
X,
scale=self._scale,
subtract_mean=self._subtract_mean,
use_std=self._use_std,
sqrt_bias=self._sqrt_bias,
min_divisor=self._min_divisor)
dataset.set_design_matrix(X, start=i)
class ZCA(Preprocessor):
"""
Performs ZCA whitening.
.. TODO::
WRITEME properly
add reference
Parameters
----------
n_components : WRITEME
n_drop_components : WRITEME
filter_bias : float, optional
TODO: verify that default of 0.1 is what was used in the
Coates and Ng paper, add reference
store_inverse : bool, optional
When self.apply(dataset, can_fit=True) store not just the
preprocessing matrix, but its inverse. This is necessary when
using this preprocessor to instantiate a ZCA_Dataset.
"""
def __init__(self, n_components=None, n_drop_components=None,
filter_bias=0.1, store_inverse=True):
warnings.warn("This ZCA preprocessor class is known to yield very "
"different results on different platforms. If you plan "
"to conduct experiments with this preprocessing on "
"multiple machines, it is probably a good idea to do "
"the preprocessing on a single machine and copy the "
"preprocessed datasets to the others, rather than "
"preprocessing the data independently in each "
"location.")
# TODO: test to see if differences across platforms
# e.g., preprocessing STL-10 patches in LISA lab versus on
# Ian's Ubuntu 11.04 machine
# are due to the problem having a bad condition number or due to
# different version numbers of scipy or something
self.n_components = n_components
self.n_drop_components = n_drop_components
self.copy = True
self.filter_bias = numpy.cast[theano.config.floatX](filter_bias)
self.has_fit_ = False
self.store_inverse = store_inverse
self.P_ = None # set by fit()
self.inv_P_ = None # set by fit(), if self.store_inverse is True
# Analogous to DenseDesignMatrix.design_loc. If not None, the
# matrices P_ and inv_P_ will be saved together in <save_path>
# (or <save_path>.npz, if the suffix is omitted).
self.matrices_save_path = None
@staticmethod
def _gpu_matrix_dot(matrix_a, matrix_b, matrix_c=None):
"""
Performs matrix multiplication.
Attempts to use the GPU if it's available. If the matrix multiplication
is too big to fit on the GPU, this falls back to the CPU after throwing
a warning.
Parameters
----------
matrix_a : WRITEME
matrix_b : WRITEME
matrix_c : WRITEME
"""
if not hasattr(ZCA._gpu_matrix_dot, 'theano_func'):
ma, mb = theano.tensor.matrices('A', 'B')
mc = theano.tensor.dot(ma, mb)
ZCA._gpu_matrix_dot.theano_func = \
theano.function([ma, mb], mc, allow_input_downcast=True)
theano_func = ZCA._gpu_matrix_dot.theano_func
try:
if matrix_c is None:
return theano_func(matrix_a, matrix_b)
else:
matrix_c[...] = theano_func(matrix_a, matrix_b)
return matrix_c
except MemoryError:
warnings.warn('Matrix multiplication too big to fit on GPU. '
'Re-doing with CPU. Consider using '
'THEANO_FLAGS="device=cpu" for your next '
'preprocessor run')
return numpy.dot(matrix_a, matrix_b, matrix_c)
@staticmethod
def _gpu_mdmt(mat, diags):
"""
Performs the matrix multiplication M * D * M^T.
First tries to do this on the GPU. If this throws a MemoryError, it
falls back to the CPU, with a warning message.
Parameters
----------
mat : WRITEME
diags : WRITEME
"""
floatX = theano.config.floatX
# compile theano function
if not hasattr(ZCA._gpu_mdmt, 'theano_func'):
t_mat = theano.tensor.matrix('M')
t_diags = theano.tensor.vector('D')
result = theano.tensor.dot(t_mat * t_diags, t_mat.T)
ZCA._gpu_mdmt.theano_func = theano.function(
[t_mat, t_diags],
result,
allow_input_downcast=True)
try:
# function()-call above had to downcast the data. Emit warnings.
if str(mat.dtype) != floatX:
warnings.warn('Implicitly converting mat from dtype=%s to '
'%s for gpu' % (mat.dtype, floatX))
if str(diags.dtype) != floatX:
warnings.warn('Implicitly converting diag from dtype=%s to '
'%s for gpu' % (diags.dtype, floatX))
return ZCA._gpu_mdmt.theano_func(mat, diags)
except MemoryError:
# fall back to cpu
warnings.warn('M * D * M^T was too big to fit on GPU. '
'Re-doing with CPU. Consider using '
'THEANO_FLAGS="device=cpu" for your next '
'preprocessor run')
return numpy.dot(mat * diags, mat.T)
def set_matrices_save_path(self, matrices_save_path):
"""
Analogous to DenseDesignMatrix.use_design_loc().
If a matrices_save_path is set, when this ZCA is pickled, the internal
parameter matrices will be saved separately to `matrices_save_path`, as
a numpy .npz archive. This uses half the memory that a normal pickling
does.
Parameters
----------
matrices_save_path : WRITEME
"""
if matrices_save_path is not None:
assert isinstance(matrices_save_path, str)
matrices_save_path = os.path.abspath(matrices_save_path)
if os.path.isdir(matrices_save_path):
raise IOError('Matrix save path "%s" must not be an existing '
'directory.')
assert matrices_save_path[-1] not in ('/', '\\')
if not os.path.isdir(os.path.split(matrices_save_path)[0]):
raise IOError('Couldn\'t find parent directory:\n'
'\t"%s"\n'
'\t of matrix path\n'
'\t"%s"')
self.matrices_save_path = matrices_save_path
def __getstate__(self):
"""
Used by pickle. Returns a dictionary to pickle in place of
self.__dict__.
If self.matrices_save_path is set, this saves the matrices P_ and
inv_P_ separately in matrices_save_path as a .npz archive, which uses
much less space & memory than letting pickle handle them.
"""
result = copy.copy(self.__dict__) # shallow copy
if self.matrices_save_path is not None:
matrices = {'P_': self.P_}
if self.inv_P_ is not None:
matrices['inv_P_'] = self.inv_P_
numpy.savez(self.matrices_save_path, **matrices)
# Removes the matrices from the dictionary to be pickled.
for key, matrix in matrices.items():
del result[key]
return result
def __setstate__(self, state):
"""
Used to unpickle.
Parameters
----------
state : dict
The dictionary created by __setstate__, presumably unpickled
from disk.
"""
# Patch old pickle files
if 'matrices_save_path' not in state:
state['matrices_save_path'] = None
if state['matrices_save_path'] is not None:
matrices = numpy.load(state['matrices_save_path'])
# puts matrices' items into state, overriding any colliding keys in
# state.
state = dict(state.items() + matrices.items())
del matrices
self.__dict__.update(state)
def fit(self, X):
"""
Fits this `ZCA` instance to a design matrix `X`.
Parameters
----------
X : ndarray
A matrix where each row is a datum.
Notes
-----
Implementation details:
Stores result as `self.P_`.
If self.store_inverse is true, this also computes `self.inv_P_`.
"""
assert X.dtype in ['float32', 'float64']
assert not contains_nan(X)
assert len(X.shape) == 2
n_samples = X.shape[0]
if self.copy:
X = X.copy()
# Center data
self.mean_ = numpy.mean(X, axis=0)
X -= self.mean_
log.info('computing zca of a {0} matrix'.format(X.shape))
t1 = time.time()
bias = self.filter_bias * scipy.sparse.identity(X.shape[1],
theano.config.floatX)
covariance = ZCA._gpu_matrix_dot(X.T, X) / X.shape[0] + bias
t2 = time.time()
log.info("cov estimate took {0} seconds".format(t2 - t1))
t1 = time.time()
eigs, eigv = linalg.eigh(covariance)
t2 = time.time()
log.info("eigh() took {0} seconds".format(t2 - t1))
assert not contains_nan(eigs)
assert not contains_nan(eigv)
assert eigs.min() > 0
if self.n_components:
eigs = eigs[:self.n_components]
eigv = eigv[:, :self.n_components]
if self.n_drop_components:
eigs = eigs[self.n_drop_components:]
eigv = eigv[:, self.n_drop_components:]
t1 = time.time()
sqrt_eigs = numpy.sqrt(eigs)
try:
self.P_ = ZCA._gpu_mdmt(eigv, 1.0 / sqrt_eigs)
except MemoryError:
warnings.warn()
self.P_ = numpy.dot(eigv * (1.0 / sqrt_eigs), eigv.T)
t2 = time.time()
assert not contains_nan(self.P_)
self.has_fit_ = True
if self.store_inverse:
self.inv_P_ = ZCA._gpu_mdmt(eigv, sqrt_eigs)
else:
self.inv_P_ = None
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
# Compiles apply.x_minus_mean_times_p(), a numeric Theano function that
# evauates dot(X - mean, P)
if not hasattr(ZCA, '_x_minus_mean_times_p'):
x_symbol = tensor.matrix('X')
mean_symbol = tensor.vector('mean')
p_symbol = tensor.matrix('P_')
new_x_symbol = tensor.dot(x_symbol - mean_symbol, p_symbol)
ZCA._x_minus_mean_times_p = theano.function([x_symbol,
mean_symbol,
p_symbol],
new_x_symbol)
X = dataset.get_design_matrix()
assert X.dtype in ['float32', 'float64']
if not self.has_fit_:
assert can_fit
self.fit(X)
new_X = ZCA._gpu_matrix_dot(X - self.mean_, self.P_)
dataset.set_design_matrix(new_X)
def inverse(self, X):
"""
.. todo::
WRITEME
"""
assert X.ndim == 2
return self._gpu_matrix_dot(X, self.inv_P_) + self.mean_
class LeCunLCN(ExamplewisePreprocessor):
"""
Yann LeCun local contrast normalization
.. todo::
WRITEME properly
Parameters
----------
img_shape : WRITEME
kernel_size : int, optional
local contrast kernel size
batch_size: int, optional
If dataset is based on PyTables use a batch size smaller than
10000. Otherwise any batch size diffrent than datasize is not
supported yet.
threshold : float
Threshold for denominator
channels : list or None, optional
List of channels to normalize.
If none, will apply it on all channels.
"""
def __init__(self, img_shape, kernel_size=7, batch_size=5000,
threshold=1e-4, channels=None):
self._img_shape = img_shape
self._kernel_size = kernel_size
self._batch_size = batch_size
self._threshold = threshold
if channels is None:
self._channels = range(3)
else:
if isinstance(channels, list) or isinstance(channels, tuple):
self._channels = channels
elif isinstance(channels, int):
self._channels = [channels]
else:
raise ValueError("channesl should be either a list or int")
def transform(self, x):
"""
.. todo::
WRITEME properly
Parameters
----------
X : WRITEME
data with axis [b, 0, 1, c]
"""
for i in self._channels:
assert isinstance(i, int)
assert i >= 0 and i <= x.shape[3]
x[:, :, :, i] = lecun_lcn(x[:, :, :, i],
self._img_shape,
self._kernel_size,
self._threshold)
return x
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
axes = ['b', 0, 1, 'c']
data_size = dataset.X.shape[0]
if self._channels is None:
self._channels
last = (numpy.floor(data_size / float(self._batch_size)) *
self._batch_size)
for i in xrange(0, data_size, self._batch_size):
stop = (i + numpy.mod(data_size, self._batch_size)
if i >= last else
i + self._batch_size)
log.info("LCN processing data from {0} to {1}".format(i, stop))
transformed = self.transform(convert_axes(
dataset.get_topological_view(dataset.X[i:stop, :]),
dataset.view_converter.axes, axes))
transformed = convert_axes(transformed,
axes,
dataset.view_converter.axes)
if self._batch_size != data_size:
if isinstance(dataset.X, numpy.ndarray):
# TODO have a separate class for non pytables datasets
transformed = convert_axes(transformed,
dataset.view_converter.axes,
['b', 0, 1, 'c'])
transformed = transformed.reshape(transformed.shape[0],
transformed.shape[1] *
transformed.shape[2] *
transformed.shape[3])
dataset.X[i:stop] = transformed
else:
dataset.set_topological_view(transformed,
dataset.view_converter.axes,
start=i)
if self._batch_size == data_size:
dataset.set_topological_view(transformed,
dataset.view_converter.axes)
class RGB_YUV(ExamplewisePreprocessor):
"""
Converts image color channels from rgb to yuv and vice versa
Parameters
----------
rgb_yuv : bool, optional
If true converts from rgb to yuv,
if false converts from yuv to rgb
batch_size : int, optional
Batch_size to make conversions in batches
"""
def __init__(self, rgb_yuv=True, batch_size=5000):
self._batch_size = batch_size
self._rgb_yuv = rgb_yuv
def yuv_rgb(self, x):
"""
.. todo::
WRITEME
"""
y = x[:, :, :, 0]
u = x[:, :, :, 1]
v = x[:, :, :, 2]
r = y + 1.13983 * v
g = y - 0.39465 * u - 0.58060 * v
b = y + 2.03211 * u
x[:, :, :, 0] = r
x[:, :, :, 1] = g
x[:, :, :, 2] = b
return x
def rgb_yuv(self, x):
"""
.. todo::
WRITEME
"""
r = x[:, :, :, 0]
g = x[:, :, :, 1]
b = x[:, :, :, 2]
y = 0.299 * r + 0.587 * g + 0.114 * b
u = -0.14713 * r - 0.28886 * g + 0.436 * b
v = 0.615 * r - 0.51499 * g - 0.10001 * b
x[:, :, :, 0] = y
x[:, :, :, 1] = u
x[:, :, :, 2] = v
return x
def transform(self, x, dataset_axes):
"""
.. todo::
WRITEME
"""
axes = ['b', 0, 1, 'c']
x = convert_axes(x, dataset_axes, axes)
if self._rgb_yuv:
x = self.rgb_yuv(x)
else:
x = self.yuv_rgb(x)
x = convert_axes(x, axes, dataset_axes)
return x
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
X = dataset.X
data_size = X.shape[0]
last = (numpy.floor(data_size / float(self._batch_size)) *
self._batch_size)
for i in xrange(0, data_size, self._batch_size):
stop = (i + numpy.mod(data_size, self._batch_size)
if i >= last else
i + self._batch_size)
log.info("RGB_YUV processing data from {0} to {1}".format(i, stop))
data = dataset.get_topological_view(X[i:stop])
transformed = self.transform(data, dataset.view_converter.axes)
# TODO have a separate class for non pytables datasets
# or add start option to dense_design_matrix
if isinstance(dataset.X, numpy.ndarray):
transformed = convert_axes(transformed,
dataset.view_converter.axes,
['b', 0, 1, 'c'])
transformed = transformed.reshape(transformed.shape[0],
transformed.shape[1] *
transformed.shape[2] *
transformed.shape[3])
dataset.X[i:stop] = transformed
else:
dataset.set_topological_view(transformed,
dataset.view_converter.axes,
start=i)
class CentralWindow(Preprocessor):
"""
Preprocesses an image dataset to contain only the central window.
Parameters
----------
window_shape : WRITEME
"""
def __init__(self, window_shape):
self.__dict__.update(locals())
del self.self
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
w_rows, w_cols = self.window_shape
arr = dataset.get_topological_view()
try:
axes = dataset.view_converter.axes
except AttributeError:
reraise_as(NotImplementedError("I don't know how to tell what the "
"axes of this kind of dataset "
"are."))
needs_transpose = not axes[1:3] == (0, 1)
if needs_transpose:
arr = numpy.transpose(arr,
(axes.index('c'),
axes.index(0),
axes.index(1),
axes.index('b')))
r_off = (arr.shape[1] - w_rows) // 2
c_off = (arr.shape[2] - w_cols) // 2
new_arr = arr[:, r_off:r_off + w_rows, c_off:c_off + w_cols, :]
if needs_transpose:
index_map = tuple(('c', 0, 1, 'b').index(axis) for axis in axes)
new_arr = numpy.transpose(new_arr, index_map)
dataset.set_topological_view(new_arr, axes=axes)
def lecun_lcn(input, img_shape, kernel_shape, threshold=1e-4):
"""
Yann LeCun's local contrast normalization
Original code in Theano by: Guillaume Desjardins
Parameters
----------
input : WRITEME
img_shape : WRITEME
kernel_shape : WRITEME
threshold : WRITEME
"""
input = input.reshape((input.shape[0], input.shape[1], input.shape[2], 1))
X = tensor.matrix(dtype=input.dtype)
X = X.reshape((len(input), img_shape[0], img_shape[1], 1))
filter_shape = (1, 1, kernel_shape, kernel_shape)
filters = sharedX(gaussian_filter(kernel_shape).reshape(filter_shape))
input_space = Conv2DSpace(shape=img_shape, num_channels=1)
transformer = Conv2D(filters=filters, batch_size=len(input),
input_space=input_space,
border_mode='full')
convout = transformer.lmul(X)
# For each pixel, remove mean of 9x9 neighborhood
mid = int(numpy.floor(kernel_shape / 2.))
centered_X = X - convout[:, mid:-mid, mid:-mid, :]
# Scale down norm of 9x9 patch if norm is bigger than 1
transformer = Conv2D(filters=filters,
batch_size=len(input),
input_space=input_space,
border_mode='full')
sum_sqr_XX = transformer.lmul(X ** 2)
denom = tensor.sqrt(sum_sqr_XX[:, mid:-mid, mid:-mid, :])
per_img_mean = denom.mean(axis=[1, 2])
divisor = tensor.largest(per_img_mean.dimshuffle(0, 'x', 'x', 1), denom)
divisor = tensor.maximum(divisor, threshold)
new_X = centered_X / divisor
new_X = tensor.flatten(new_X, outdim=3)
f = function([X], new_X)
return f(input)
def gaussian_filter(kernel_shape):
"""
.. todo::
WRITEME
Parameters
----------
kernel_shape : WRITEME
"""
x = numpy.zeros((kernel_shape, kernel_shape),
dtype=theano.config.floatX)
def gauss(x, y, sigma=2.0):
Z = 2 * numpy.pi * sigma ** 2
return 1. / Z * numpy.exp(-(x ** 2 + y ** 2) / (2. * sigma ** 2))
mid = numpy.floor(kernel_shape / 2.)
for i in xrange(0, kernel_shape):
for j in xrange(0, kernel_shape):
x[i, j] = gauss(i - mid, j - mid)
return x / numpy.sum(x)
class ShuffleAndSplit(Preprocessor):
"""
.. todo::
WRITEME properly
Allocates a numpy rng with the specified seed.
Note: this must be a seed, not a RandomState. A new RandomState is
re-created with the same seed every time the preprocessor is called.
This way if you save the preprocessor and re-use it later it will give
the same dataset regardless of whether you save the preprocessor before
or after applying it.
Shuffles the data, then takes examples in range (start, stop)
Parameters
----------
seed : WRITEME
start : int
WRITEME
stop : int
WRITEME
"""
def __init__(self, seed, start, stop):
self.__dict__.update(locals())
del self.self
def apply(self, dataset, can_fit=False):
"""
.. todo::
WRITEME
"""
start = self.start
stop = self.stop
rng = make_np_rng(self.seed, which_method="randint")
X = dataset.X
y = dataset.y
if y is not None:
assert X.shape[0] == y.shape[0]
for i in xrange(X.shape[0]):
j = rng.randint(X.shape[0])
tmp = X[i, :].copy()
X[i, :] = X[j, :].copy()
X[j, :] = tmp
if y is not None:
tmp = y[i, :].copy()
y[i, :] = y[j, :].copy()
y[j, :] = tmp
assert start >= 0
assert stop > start
assert stop <= X.shape[0]
dataset.X = X[start:stop, :]
if y is not None:
dataset.y = y[start:stop, :]
| bsd-3-clause | -3,315,863,821,247,924,000 | 30.114894 | 79 | 0.520736 | false |
priya-pp/Tacker | tacker/db/migration/alembic_migrations/versions/22f5385a3d3f_add_status_to_vims.py | 3 | 1072 | # Copyright 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add status to vims
Revision ID: 22f5385a3d3f
Revises: 5246a6bd410f
Create Date: 2016-05-12 13:29:30.615609
"""
# revision identifiers, used by Alembic.
revision = '22f5385a3d3f'
down_revision = '5f88e86b35c7'
from alembic import op
import sqlalchemy as sa
def upgrade(active_plugins=None, options=None):
op.add_column('vims',
sa.Column('status', sa.String(255),
nullable=False, server_default=''))
| apache-2.0 | 6,872,407,668,500,319,000 | 29.628571 | 78 | 0.702425 | false |
OpenInternet/copilot | copilot/utils/file_sys.py | 2 | 2343 | import os
import subprocess
#stat logging
import logging
log = logging.getLogger(__name__)
#Adapted from https://stackoverflow.com/questions/6840711/writing-a-file-to-a-usb-stick-in-linux-with-python
# Feel like a https://www.eff.org/files/images_insert/defcon20-script-kitty-detail.jpg
def get_usb_dirs():
""" Get the directory for all mounted USB devices.
"""
devices = []
log.info("Getting all USB devices.")
with open("/proc/partitions") as partitionsFile:
lines = partitionsFile.readlines()[2:] #Skips the header lines
for line in lines:
words = [x.strip() for x in line.split()]
minorNumber = int(words[1])
deviceName = words[3]
if minorNumber % 16 == 0:
path = "/sys/class/block/" + deviceName
if os.path.islink(path):
if os.path.realpath(path).find("/usb") > 0:
devices.append('/dev/'+deviceName)
log.debug("USB devices found: {0}".format(devices))
log.debug("Getting mount points.")
mounts = []
for line in subprocess.check_output(['mount', '-l']).split('\n'):
parts = line.split(' ')
if len(parts) > 2:
if parts[0][:-1] in devices:
mounts.append(parts[2])
log.debug("USB mount points found: {0}".format(mounts))
return mounts
def get_likely_usb():
""" Get the last mounted USB device.
Because many devices can have many USB's attached to them
this function tries to pick the most likely desired USB.
To do this, this function picks the last mounted USB. We
hope that a persons tendency to unplug and plug back in
a USB when retrying will ensure that this correctly ID's
the USB desired.
"""
log.debug("Getting USB device.")
all_usb = get_usb_dirs()
if len(all_usb) > 0:
likely_usb = all_usb[-1]
log.debug("Likely USB device {0} found.".format(likely_usb))
return likely_usb
else:
log.debug("No USB devices found.")
def is_usb():
""" Check if there are mounted USB's."""
log.debug("Checking if there are mounted USB's")
all_usb = get_usb_dirs()
if len(all_usb) > 0:
log.debug("Mounted USB's found")
return True
else:
log.debug("No mounted USB's found.")
return False
| lgpl-3.0 | -962,323,521,009,656,300 | 33.970149 | 108 | 0.608195 | false |
scotwk/cloud-custodian | c7n/resources/rdsparamgroup.py | 3 | 11149 | # Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from botocore.exceptions import ClientError
from c7n.actions import ActionRegistry, BaseAction
from c7n.filters import FilterRegistry
from c7n.manager import resources
from c7n.query import QueryResourceManager
from c7n.utils import (type_schema, local_session, chunks)
log = logging.getLogger('custodian.rds-param-group')
pg_filters = FilterRegistry('rds-param-group.filters')
pg_actions = ActionRegistry('rds-param-group.actions')
@resources.register('rds-param-group')
class RDSParamGroup(QueryResourceManager):
"""Resource manager for RDS parameter groups.
"""
class resource_type(object):
service = 'rds'
type = 'pg'
enum_spec = ('describe_db_parameter_groups', 'DBParameterGroups', None)
name = id = 'DBParameterGroupName'
filter_name = None
filter_type = None
dimension = 'DBParameterGroupName'
date = None
filter_registry = pg_filters
action_registry = pg_actions
pg_cluster_filters = FilterRegistry('rds-cluster-param-group.filters')
pg_cluster_actions = ActionRegistry('rds-cluster-param-group.actions')
@resources.register('rds-cluster-param-group')
class RDSClusterParamGroup(QueryResourceManager):
""" Resource manager for RDS cluster parameter groups.
"""
class resource_type(object):
service = 'rds'
type = 'cluster-pg'
enum_spec = ('describe_db_cluster_parameter_groups', 'DBClusterParameterGroups', None)
name = id = 'DBClusterParameterGroupName'
filter_name = None
filter_type = None
dimension = 'DBClusterParameterGroupName'
date = None
filter_registry = pg_cluster_filters
action_registry = pg_cluster_actions
class PGMixin(object):
def get_pg_name(self, pg):
return pg['DBParameterGroupName']
class PGClusterMixin(object):
def get_pg_name(self, pg):
return pg['DBClusterParameterGroupName']
class Copy(BaseAction):
schema = type_schema(
'copy',
**{
'required': ['name'],
'name': {'type': 'string'},
'description': {'type': 'string'},
}
)
def process(self, param_groups):
client = local_session(self.manager.session_factory).client('rds')
for param_group in param_groups:
name = self.get_pg_name(param_group)
copy_name = self.data.get('name')
copy_desc = self.data.get('description', 'Copy of {}'.format(name))
self.do_copy(client, name, copy_name, copy_desc)
self.log.info('Copied RDS parameter group %s to %s', name, copy_name)
@pg_actions.register('copy')
class PGCopy(PGMixin, Copy):
""" Action to copy an RDS parameter group.
:example:
.. code-block:: yaml
policies:
- name: rds-param-group-copy
resource: rds-param-group
filters:
- DBParameterGroupName: original_pg_name
actions:
- type: copy
name: copy_name
"""
permissions = ('rds:CopyDBParameterGroup',)
def do_copy(self, client, name, copy_name, desc):
client.copy_db_parameter_group(
SourceDBParameterGroupIdentifier=name,
TargetDBParameterGroupIdentifier=copy_name,
TargetDBParameterGroupDescription=desc
)
@pg_cluster_actions.register('copy')
class PGClusterCopy(PGClusterMixin, Copy):
""" Action to copy an RDS cluster parameter group.
:example:
.. code-block:: yaml
policies:
- name: rds-cluster-param-group-copy
resource: rds-cluster-param-group
filters:
- DBClusterParameterGroupName: original_cluster_pg_name
actions:
- type: copy
name: copy_name
"""
permissions = ('rds:CopyDBClusterParameterGroup',)
def do_copy(self, client, name, copy_name, desc):
client.copy_db_cluster_parameter_group(
SourceDBClusterParameterGroupIdentifier=name,
TargetDBClusterParameterGroupIdentifier=copy_name,
TargetDBClusterParameterGroupDescription=desc
)
class Delete(BaseAction):
schema = type_schema('delete')
def process(self, param_groups):
client = local_session(self.manager.session_factory).client('rds')
for param_group in param_groups:
name = self.get_pg_name(param_group)
try:
self.do_delete(client, name)
except ClientError as e:
if e.response['Error']['Code'] == 'DBParameterGroupNotFoundFault':
self.log.warning('RDS parameter group %s already deleted', name)
continue
raise
self.log.info('Deleted RDS parameter group: %s', name)
@pg_actions.register('delete')
class PGDelete(PGMixin, Delete):
"""Action to delete an RDS parameter group
:example:
.. code-block:: yaml
policies:
- name: rds-param-group-delete
resource: rds-param-group
filters:
- DBParameterGroupName: pg_name
actions:
- type: delete
"""
permissions = ('rds:DeleteDBParameterGroup',)
def do_delete(self, client, name):
client.delete_db_parameter_group(DBParameterGroupName=name)
@pg_cluster_actions.register('delete')
class PGClusterDelete(PGClusterMixin, Delete):
"""Action to delete an RDS cluster parameter group
:example:
.. code-block:: yaml
policies:
- name: rds-cluster-param-group-delete
resource: rds-cluster-param-group
filters:
- DBClusterParameterGroupName: cluster_pg_name
actions:
- type: delete
"""
permissions = ('rds:DeleteDBClusterParameterGroup',)
def do_delete(self, client, name):
client.delete_db_cluster_parameter_group(DBClusterParameterGroupName=name)
class Modify(BaseAction):
schema = type_schema(
'modify',
**{
'required': ['params'],
'params': {
'type': 'array',
'items': {
'type': 'object',
'required': ['name', 'value'],
'name': {'type': 'string'},
'value': {'type': 'string'},
'apply-method': {'type': 'string', 'enum': ['immediate', 'pending-reboot']}
},
},
}
)
def process(self, param_groups):
client = local_session(self.manager.session_factory).client('rds')
params = []
for param in self.data.get('params', []):
params.append({
'ParameterName': param['name'],
'ParameterValue': param['value'],
'ApplyMethod': param.get('apply-method', 'immediate'),
})
for param_group in param_groups:
name = self.get_pg_name(param_group)
# Fetch the existing parameters for this DB, so we only try to change the ones that are
# different.
cur_params = self.get_current_params(client, name)
changed_params = []
for param in params:
param_name = param['ParameterName']
if (param_name not in cur_params or
cur_params[param_name]['ParameterValue'] != param['ParameterValue']):
changed_params.append(param)
# Can only do 20 elements at a time per docs, so if we have more than that we will
# break it into multiple requests: https://goo.gl/Z6oGNv
for param_set in chunks(changed_params, 5):
self.do_modify(client, name, param_set)
self.log.info('Modified RDS parameter group %s (%i parameters changed, %i unchanged)',
name, len(changed_params), len(params) - len(changed_params))
@pg_actions.register('modify')
class PGModify(PGMixin, Modify):
"""Action to modify an RDS parameter group
:example:
.. code-block:: yaml
policies:
- name: rds-param-group-modify
resource: rds-param-group
filters:
- DBParameterGroupName: pg_name
actions:
- type: modify
params:
- name: autocommit
value: "1"
- name: max_connections
value: "100"
"""
permissions = ('rds:DescribeDBParameters', 'rds:ModifyDBParameterGroup')
def get_current_params(self, client, name):
params = client.describe_db_parameters(DBParameterGroupName=name)
return {x['ParameterName']: {
'ParameterValue': x.get('ParameterValue'),
'ApplyMethod': x['ApplyMethod']}
for x in params.get('Parameters', [])}
def do_modify(self, client, name, params):
client.modify_db_parameter_group(DBParameterGroupName=name, Parameters=params)
@pg_cluster_actions.register('modify')
class PGClusterModify(PGClusterMixin, Modify):
"""Action to modify an RDS cluster parameter group
:example:
.. code-block:: yaml
policies:
- name: rds-cluster-param-group-modify
resource: rds-cluster-param-group
filters:
- DBClusterParameterGroupName: cluster_pg_name
actions:
- type: modify
params:
- name: lower_case_table_names
value: "1"
- name: master_verify_checksum
value: "1"
"""
permissions = ('rds:DescribeDBClusterParameters', 'rds:ModifyDBClusterParameterGroup')
def get_current_params(self, client, name):
params = client.describe_db_cluster_parameters(DBClusterParameterGroupName=name)
return {x['ParameterName']: {
'ParameterValue': x.get('ParameterValue'),
'ApplyMethod': x['ApplyMethod']}
for x in params.get('Parameters', [])}
def do_modify(self, client, name, params):
client.modify_db_cluster_parameter_group(
DBClusterParameterGroupName=name,
Parameters=params
)
| apache-2.0 | 7,214,040,491,534,772,000 | 30.583569 | 99 | 0.58938 | false |
heytcass/homeassistant-config | deps/sqlalchemy/dialects/mssql/mxodbc.py | 36 | 3856 | # mssql/mxodbc.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+mxodbc
:name: mxODBC
:dbapi: mxodbc
:connectstring: mssql+mxodbc://<username>:<password>@<dsnname>
:url: http://www.egenix.com/
Execution Modes
---------------
mxODBC features two styles of statement execution, using the
``cursor.execute()`` and ``cursor.executedirect()`` methods (the second being
an extension to the DBAPI specification). The former makes use of a particular
API call specific to the SQL Server Native Client ODBC driver known
SQLDescribeParam, while the latter does not.
mxODBC apparently only makes repeated use of a single prepared statement
when SQLDescribeParam is used. The advantage to prepared statement reuse is
one of performance. The disadvantage is that SQLDescribeParam has a limited
set of scenarios in which bind parameters are understood, including that they
cannot be placed within the argument lists of function calls, anywhere outside
the FROM, or even within subqueries within the FROM clause - making the usage
of bind parameters within SELECT statements impossible for all but the most
simplistic statements.
For this reason, the mxODBC dialect uses the "native" mode by default only for
INSERT, UPDATE, and DELETE statements, and uses the escaped string mode for
all other statements.
This behavior can be controlled via
:meth:`~sqlalchemy.sql.expression.Executable.execution_options` using the
``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a
value of ``True`` will unconditionally use native bind parameters and a value
of ``False`` will unconditionally use string-escaped parameters.
"""
from ... import types as sqltypes
from ...connectors.mxodbc import MxODBCConnector
from .pyodbc import MSExecutionContext_pyodbc, _MSNumeric_pyodbc
from .base import (MSDialect,
MSSQLStrictCompiler,
_MSDateTime, _MSDate, _MSTime)
class _MSNumeric_mxodbc(_MSNumeric_pyodbc):
"""Include pyodbc's numeric processor.
"""
class _MSDate_mxodbc(_MSDate):
def bind_processor(self, dialect):
def process(value):
if value is not None:
return "%s-%s-%s" % (value.year, value.month, value.day)
else:
return None
return process
class _MSTime_mxodbc(_MSTime):
def bind_processor(self, dialect):
def process(value):
if value is not None:
return "%s:%s:%s" % (value.hour, value.minute, value.second)
else:
return None
return process
class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc):
"""
The pyodbc execution context is useful for enabling
SELECT SCOPE_IDENTITY in cases where OUTPUT clause
does not work (tables with insert triggers).
"""
# todo - investigate whether the pyodbc execution context
# is really only being used in cases where OUTPUT
# won't work.
class MSDialect_mxodbc(MxODBCConnector, MSDialect):
# this is only needed if "native ODBC" mode is used,
# which is now disabled by default.
# statement_compiler = MSSQLStrictCompiler
execution_ctx_cls = MSExecutionContext_mxodbc
# flag used by _MSNumeric_mxodbc
_need_decimal_fix = True
colspecs = {
sqltypes.Numeric: _MSNumeric_mxodbc,
sqltypes.DateTime: _MSDateTime,
sqltypes.Date: _MSDate_mxodbc,
sqltypes.Time: _MSTime_mxodbc,
}
def __init__(self, description_encoding=None, **params):
super(MSDialect_mxodbc, self).__init__(**params)
self.description_encoding = description_encoding
dialect = MSDialect_mxodbc
| mit | 3,314,913,829,221,668,000 | 33.428571 | 78 | 0.703838 | false |
JieweiWei/blog_crawler | project/blog/rotate_useragent.py | 1 | 3014 | # -*-coding:utf-8-*-
from scrapy import log
import random
from scrapy.contrib.downloadermiddleware.useragent import UserAgentMiddleware
class RotateUserAgentMiddleware(UserAgentMiddleware):
def __init__(self, user_agent=''):
self.user_agent = user_agent
def process_request(self, request, spider):
ua = random.choice(self.user_agent_list)
if ua:
#记录
log.msg('Current UserAgent: '+ua, level='INFO')
request.headers.setdefault('User-Agent', ua)
#the default user_agent_list composes chrome,I E,firefox,Mozilla,opera,netscape
#for more user agent strings,you can find it in http://www.useragentstring.com/pages/useragentstring.php
user_agent_list = [\
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
"(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
"(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
"(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
"(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
"(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
| mit | -5,324,676,225,037,087,000 | 51.807018 | 108 | 0.62691 | false |
OakNinja/svtplay-dl | lib/svtplay_dl/service/lemonwhale.py | 1 | 1998 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import re
import copy
import xml.etree.ElementTree as ET
from svtplay_dl.utils.urllib import unquote_plus
from svtplay_dl.service import Service
from svtplay_dl.utils import get_http_data
from svtplay_dl.log import log
from svtplay_dl.fetcher.http import HTTP
class Lemonwhale(Service):
supported_domains = ['svd.se']
def get(self, options):
vid = None
error, data = self.get_urldata()
if error:
log.error("Can't get data from that page")
return
if self.exclude(options):
return
match = re.search(r'video url-([^"]+)', data)
if not match:
match = re.search(r'embed.jsp\?([^"]+)"', self.get_urldata()[1])
if not match:
log.error("Can't find video id")
return
vid = match.group(1)
if not vid:
path = unquote_plus(match.group(1))
error, data = get_http_data("http://www.svd.se%s" % path)
match = re.search(r'embed.jsp\?([^"]+)', data)
if not match:
log.error("Can't find video id")
return
vid = match.group(1)
url = "http://amz.lwcdn.com/api/cache/VideoCache.jsp?%s" % vid
error, data = get_http_data(url)
if error:
log.error("Cant download video info")
return
xml = ET.XML(data)
videofile = xml.find("{http://www.lemonwhale.com/xml11}VideoFile")
mediafiles = videofile.find("{http://www.lemonwhale.com/xml11}MediaFiles")
high = mediafiles.find("{http://www.lemonwhale.com/xml11}VideoURLHigh")
if high.text:
yield HTTP(copy.copy(options), high.text, 720)
videourl = mediafiles.find(
"{http://www.lemonwhale.com/xml11}VideoURL").text
yield HTTP(copy.copy(options), videourl, 480)
| mit | -3,046,201,685,899,543,000 | 34.678571 | 82 | 0.578579 | false |
hstau/covar-cryo | covariance/distribute3Sphere.py | 1 | 1630 | """ function [results, iter] = distribute3Sphere(numPts)
% [results, iter] = distribute3Sphere(numPts)
% distributes numPts points roughly uniformly on a unit 3-sphere and
% returns the coordinates in results. Number of iterations required is
% returned in iter.
%
% Algorithm adapted from L. Lovisolo and E.A.B. da Silva, Uniform
% distribution of points on a hyper-sphere with applications to vector
% bit-plane encoding, IEE Proc.-Vis. Image Signal Process., Vol. 148, No.
% 3, June 2001
%
% Programmed February 2009
% Copyright (c) Russell Fung 2009
%
% Copyright (c) Columbia University Hstau Liao 2018 (python version)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
import numpy as np
import math
def op(numPts):
maxIter = 100
K = numPts
A3 = 4*np.pi # surface area of a unit 3-sphere
delta = math.exp(math.log(A3/K)/2.)
results = np.zeros((2*K,3)); # algorithm sometimes returns more/ less points
iter = 0
id = 0
while id!=K and iter<maxIter:
iter = iter+1
id = 0
dw1 = delta
for w1 in np.arange(0.5*dw1,np.pi,dw1):
cw1 = math.cos(w1)
sw1 = math.sin(w1)
x1 = cw1
dw2 = dw1/sw1
for w2 in np.arange(0.5*dw2,2*np.pi,dw2):
cw2 = math.cos(w2)
sw2 = math.sin(w2)
x2 = sw1*cw2
x3 = sw1*sw2
results[id,:] = np.hstack((x1, x2, x3))
id = id + 1
delta = delta*math.exp(math.log(float(id)/K)/2.)
results = results[0:K,:]
return (results,iter)
| gpl-2.0 | 421,824,531,617,861,100 | 31.6 | 80 | 0.560736 | false |
rjw57/cubbie | cubbie/fixture.py | 1 | 2649 | """
Generate fake data for the database.
These functions all require that app.debug==True.
"""
from functools import wraps
from datetime import datetime, timedelta
from faker import Faker
from flask import current_app
from mixer.backend.flask import mixer
from cubbie.model import User, Production, Performance, SalesDatum, Capability
class FixtureError(RuntimeError):
pass
# A little decorator which ensures that app.debug is True
def debug_only(f):
@wraps(f)
def wrapper(*args, **kwargs):
if not current_app.debug:
raise FixtureError("app.debug must be True")
return f(*args, **kwargs)
return wrapper
# Global Faker instance used to create fixtures.
fake = Faker()
@debug_only
def create_user_fixtures(n=5):
"""Create test fixtures for User. Requires app.debug==True."""
mixer.cycle(n).blend(User, displayname=fake.name, is_active=mixer.RANDOM)
@debug_only
def create_production_fixtures(n=5):
"""Create test fixtures for Production. Requires app.debug==True."""
mixer.cycle(n).blend(Production, name=fake.sentence, slug=fake.slug)
@debug_only
def create_performance_fixtures(n=20, production=mixer.SELECT):
"""Create test fixtures for Performance. Requires app.debug==True and >0
Productions in database.
"""
def sa(c):
return datetime.utcnow() + timedelta(minutes=10+5*c)
def ea(c):
return datetime.utcnow() + timedelta(minutes=20+15*c)
mixer.cycle(n).blend(Performance,
starts_at=mixer.sequence(sa),
ends_at=mixer.sequence(ea),
production=production,
is_cancelled=mixer.RANDOM,
is_deleted=mixer.RANDOM,
)
@debug_only
def create_sales_fixtures(n=20, performance=mixer.SELECT):
"""Create test fixtures for SalesDatum. Requires app.debug==True and >0
Performances in database.
"""
from random import seed, randint
def ma(c):
return datetime.utcnow() + timedelta(days=randint(1,100))
def sold(c):
seed(c)
return randint(0, 65)
def avail(c):
seed(c)
s = randint(0, 65)
return s + randint(0, 30)
mixer.cycle(n).blend(SalesDatum,
measured_at=mixer.sequence(ma),
performance=performance,
is_valid=mixer.RANDOM,
sold=mixer.sequence(sold),
available=mixer.sequence(avail),
)
@debug_only
def create_capability_fixtures(n=20, user=mixer.SELECT, production=mixer.SELECT):
"""Create test fixtures for Capability. Requires app.debug==True and >0
Users and >0 Productions in database.
"""
mixer.cycle(n).blend(Capability, user=user, production=production)
| mit | -15,517,915,106,571,602 | 27.180851 | 81 | 0.679124 | false |
stankovski/AutoRest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/ParameterFlattening/setup.py | 2 | 1109 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestparameterflattening"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.1.0"]
setup(
name=NAME,
version=VERSION,
description="AutoRestParameterFlattening",
author_email="",
url="",
keywords=["Swagger", "AutoRestParameterFlattening"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Resource Flattening for AutoRest
"""
)
| mit | -6,639,381,496,791,292,000 | 26.725 | 76 | 0.620379 | false |
jonfoster/pyxb2 | pyxb/utils/templates.py | 5 | 4046 | # -*- coding: utf-8 -*-
# Copyright 2009-2013, Peter A. Bigot
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functions that aid with generating text from templates and maps."""
import re
# POSIX shell variable syntax:
# Expansions with unset var
# ${var}=
# ${var+WORD}=
# ${var:+WORD}=
# ${var-WORD}=WORD
# ${var:-WORD}=WORD
# Expansions with empty var
# ${var}=
# ${var+WORD}=WORD
# ${var:+WORD}=
# ${var-WORD}=
# ${var:-WORD}=WORD
# Expansions with var=SET
# ${var}=SET
# ${var+WORD}=WORD
# ${var:+WORD}=WORD
# ${var-WORD}=SET
# ${var:-WORD}=SET
# This expression replaces markers in template text with the value
# obtained by looking up the marker in a dictionary.
# %{id} = value
_substIdPattern = re.compile("%{(?P<id>\w+)}")
# This expression performs conditional substitution: if the expression
# provided evaluates to true in a given context, then one value is
# substituted, otherwise the alternative value is substituted.
# %{?<cond>??<true>?:<false>?}
# %{?1 == 2??true?:false?}
_substConditionalPattern = re.compile("%{\?(?P<expr>.+?)\?\?(?P<true>.*?)(\?:(?P<false>.*?))?\?}", re.MULTILINE + re.DOTALL)
# This expression tests whether an identifier is defined to a non-None
# value in the context; if so, it replaces the marker with template
# text. In that replacement text, the value ?@ is replaced by the
# test expression. Contrast POSIX shell ${ID+subst}${ID-subst}
# Note: NOT by the value of the test expression. If no replacement
# text is given, the replacement '%{?@}' is used, which replaces it
# with the value of the test expression.
# %{?<id>?+<yessubst>?-?<nosubst>}}
# %{?maybe_text?+?@ is defined to be %{?@}?}
_substIfDefinedPattern = re.compile("%{\?(?P<id>\w+)(\?\+(?P<repl>.*?))?(\?\-(?P<ndrepl>.*?))?\?}", re.MULTILINE + re.DOTALL)
# The pattern which, if present in the body of a IfDefined block, is
# replaced by the test expression.
_substDefinedBodyPattern = re.compile("\?@")
def _bodyIfDefinedPattern (match_object, dictionary):
global _substDefinedBodyPattern
id = match_object.group('id')
repl = match_object.group('repl')
ndrepl = match_object.group('ndrepl')
value = dictionary.get(id)
if value is not None:
if repl:
return _substDefinedBodyPattern.sub(id, repl)
if ndrepl:
return ''
return _substDefinedBodyPattern.sub(id, '%{?@}')
else:
if ndrepl:
return _substDefinedBodyPattern.sub(id, ndrepl)
return ''
def _bodyConditionalPattern (match_object, dictionary):
global _substDefinedBodyPattern
expr = match_object.group('expr')
true = match_object.group('true')
false = match_object.group('false')
value = None
try:
value = eval(expr, dictionary)
except Exception as e:
return '%%{EXCEPTION: %s}' % (e,)
if value:
return _substDefinedBodyPattern.sub(expr, true)
if false is not None:
return _substDefinedBodyPattern.sub(expr, false)
return ''
def replaceInText (text, **dictionary):
global _substIfDefinedPattern
global _substConditionalPattern
global _substIdPattern
global _substDefinedBodyPattern
rv = text
rv = _substIfDefinedPattern.sub(lambda _x: _bodyIfDefinedPattern(_x, dictionary), rv)
rv = _substConditionalPattern.sub(lambda _x: _bodyConditionalPattern(_x, dictionary), rv)
rv = _substIdPattern.sub(
lambda _x,_map=dictionary:
_map.get(_x.group('id'), '%%{MISSING:%s}' % (_x.group('id'),))
, rv)
return rv
| apache-2.0 | 2,848,421,639,949,725,700 | 35.125 | 125 | 0.661641 | false |
paweljasinski/ironpython3 | Tests/modules/type_related/_struct_test.py | 3 | 10431 | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from iptest.assert_util import *
import _struct
def pack(f, *v):
return _struct.Struct(f).pack(*v)
def unpack(f, *v):
return _struct.Struct(f).unpack(*v)
def calcsize(f):
return _struct.Struct(f).size
def test_sanity():
mapping = {
'c': 'a',
'b': ord('b'),
'B': ord('c'),
'h': -123,
'H': 123,
'i': -12345,
'l': -123456789,
'I': 12345,
'L': 123456789,
'q': -1000000000,
'Q': 1000000000,
'f': 3.14,
'd': -0.3439,
'6s': 'string',
'15p': 'another string'
}
for (k, v) in mapping.iteritems():
s = pack(k, v)
v2 = unpack(k, s)
if isinstance(v, float):
AlmostEqual(v, v2[0])
else:
AreEqual(v, v2[0])
AreEqual(pack(' c\t', 'a'), 'a')
def test_padding_len():
AreEqual(unpack('4xi','\x00\x01\x02\x03\x01\x00\x00\x00'), (1,))
def test_cp3092():
for format in [ "i", "I", "l", "L"]:
mem = "\x01\x00\x00\x00" * 8
AreEqual(len(mem), 32)
fmt = "<8" + format
AreEqual(calcsize(fmt), 32)
AreEqual(unpack(fmt, mem), (1,)*8)
fmt = "<2" + format + "4x5" + format
AreEqual(calcsize(fmt), 32)
AreEqual(unpack(fmt, mem), (1,)*7)
fmt = "<" + format + format + "4x5" + format
AreEqual(calcsize(fmt), 32)
AreEqual(unpack(fmt, mem), (1,)*7)
fmt = "<32x"
AreEqual(calcsize(fmt), 32)
AreEqual(unpack(fmt, mem), ())
fmt = "<28x" + format
AreEqual(calcsize(fmt), 32)
AreEqual(unpack(fmt, mem), (1,))
def test_cp9347():
temp_list = [("2xB", '\x00\x00\xff', 255),
("4s4x", 'AAAA\x00\x00\x00\x00', "AAAA"),
("x", '\x00'),
("ix", '\x01\x00\x00\x00\x00', 1),
("ix", '\x01\x00\x00\x80\x00', -(2**(32-1)-1)),
("xI", '\x00\x00\x00\x00\xff\xff\xff\xff', 2**32-1),
("xlx", '\x00\x00\x00\x00x\xec\xff\xff\x00', -5000),
("LxL", '~\x00\x00\x00\x00\x00\x00\x00~\x00\x00\x00', 126, 126),
("LxxL", '~\x00\x00\x00\x00\x00\x00\x00~\x00\x00\x00', 126, 126),
("32xLL", '\x00' *32 + '~\x00\x00\x00~\x00\x00\x00', 126, 126),
("LxL8xLL", '~\x00\x00\x00\x00\x00\x00\x00~\x00\x00\x00' + '\x00'*8 + '~\x00\x00\x00'*2, 126, 126, 126, 126),
]
for stuff in temp_list:
format = stuff[0]
expected_val = stuff[1]
params = stuff[2:]
actual = pack(format, *params)
AreEqual(expected_val, actual)
AreEqual(unpack(format, actual),
params)
def test_negative():
AssertError(_struct.error, pack, 'x', 1)
AssertError(_struct.error, unpack, 'hh', pack('h', 1))
AssertError(_struct.error, pack, 'a', 1)
# BUG: 1033
# such chars should be in the leading position only
for x in '=@<>!':
AssertError(_struct.error, pack, 'h'+x+'h', 1, 2)
AssertError(_struct.error, pack, 'c', 300)
def test_calcsize_alignment():
'''
TODO: Side by side test?
'''
struct_format = "xcbBhHiIlLqQfdspP"
expected = {'lB': 5, 'PQ': 16, 'BB': 2, 'BL': 8, 'lL': 8, 'BH': 4, 'ci': 8,
'lH': 6, 'lI': 8, 'ch': 4, 'BP': 8, 'BQ': 16, 'lP': 8,
'lQ': 16, 'PH': 6, 'Bd': 16, 'Bf': 8, 'lb': 5, 'lc': 5,
'Bb': 2, 'Bc': 2, 'Bl': 8, 'sd': 16, 'll': 8, 'Bh': 4, 'Bi': 8,
'lh': 6, 'li': 8, 'fb': 5, 'cc': 2, 'Bp': 2, 'Bq': 16, 'lp': 5,
'cb': 2, 'sI': 8, 'Bx': 2, 'lx': 5, 'qQ': 16, 'qP': 12,
'dl': 12, 'dh': 10, 'di': 12, 'df': 12, 'dd': 16, 'db': 9,
'dc': 9, 'BI': 8, 'sB': 2, 'qB': 9, 'dx': 9, 'qI': 12, 'qH':10,
'qL': 12, 'dp': 9, 'dq': 16, 'qq': 16, 'qp': 9, 'qs': 9,
'dH': 10, 'dI': 12, 'Bs': 2, 'dB': 9, 'qc': 9, 'qb': 9, 'qd': 16,
'qx': 9, 'qi': 12, 'qh': 10, 'ph': 4, 'ql': 12, 'dP': 12, 'dQ': 16,
'fp': 5, 'Pp': 5, 'Pq': 16, 'fq': 16, 'sH': 4, 'HP': 8, 'HQ': 16,
'Pb': 5, 'Pc': 5, 'HH': 4, 'HI': 8, 'Pf': 8, 'HL': 8, 'HB': 3,
'pi': 8, 'Ph': 6, 'Pi': 8, 'cq': 16, 'Pl': 8, 'Hx': 3, 'cp': 2,
'fH': 6, 'Hs': 3, 'Hp': 3, 'Hq': 16, 'PB': 5, 'fx': 5, 'Hh': 4,
'Hi': 8, 'Hl': 8, 'Qx': 9, 'Hb': 3, 'Hc': 3, 'pH': 4, 'PI': 8,
'Hf': 8, 'Hd': 16, 'bd': 16, 'lf': 8, 'bf': 8, 'fI': 8, 'pQ': 16,
'bb': 2, 'bc': 2, 'bl': 8, 'qf': 12, 'bh': 4, 'bi': 8, 'cH': 4,
'bp': 2, 'bq': 16, 'ld': 16, 'bs': 2, 'pI': 8, 'pP': 8, 'bx': 2,
'Ps': 5, 'bB': 2, 'bL': 8, 'cI': 8, 'bH': 4, 'bI': 8, 'sx': 2,
'ds': 9, 'fc': 5, 'bP': 8, 'bQ': 16, 'px': 2, 'Pd': 16, 'Qd': 16,
'xh': 4, 'xi': 8, 'xl': 8, 'cl': 8, 'xb': 2, 'xc': 2, 'sL': 8,
'xf': 8, 'cf': 8, 'xd': 16, 'cd': 16, 'pB': 2, 'fh': 6,
'xx': 2, 'cx': 2, 'pp': 2, 'Px': 5, 'fi': 8, 'cs': 2, 'xs': 2,
'xp': 2, 'xq': 16, 'pL': 8, 'ps': 2, 'xH': 4, 'xI': 8,
'lq': 16, 'xL': 8, 'cL': 8, 'xB': 2, 'cB': 2, 'sf': 8, 'PL': 8,
'pb': 2, 'pc': 2, 'pf': 8, 'pd': 16, 'xP': 8, 'xQ': 16,
'Ll': 8, 'pl': 8, 'ls': 5, 'fP': 8, 'hx': 3, 'QP': 12, 'hs': 3,
'hp': 3, 'hq': 16, 'hh': 4, 'hi': 8, 'hl': 8, 'hb': 3, 'hc': 3,
'hf': 8, 'cQ': 16, 'hd': 16, 'cP': 8, 'sc': 2, 'hP': 8,
'hQ': 16, 'fQ': 16, 'ss': 2, 'hH': 4, 'hI': 8, 'hL': 8, 'hB': 3,
'sq': 16, 'Ls': 5, 'Lf': 8, 'ix': 5, 'Ld': 16, 'sb': 2, 'Lb': 5,
'Lc': 5, 'iq': 16, 'ip': 5, 'is': 5, 'Lh': 6, 'Li': 8, 'ii': 8,
'ih': 6, 'il': 8, 'Lp': 5, 'Lq': 16, 'ic': 5, 'ib': 5,
'id': 16, 'Lx': 5, 'if': 8, 'LB': 5, 'iQ': 16, 'iP': 8,
'LL': 8, 'pq': 16, 'si': 8, 'LH': 6, 'LI': 8, 'iI': 8,
'iH': 6, 'sh': 4, 'iL': 8, 'LP': 8, 'LQ': 16, 'iB': 5,
'Qq': 16, 'Qp': 9, 'Qs': 9, 'fs': 5, 'IQ': 16, 'IP': 8,
'sQ': 16, 'sP': 8, 'PP': 8, 'II': 8, 'IH': 6, 'Qc': 9,
'Qb': 9, 'fd': 16, 'IL': 8, 'ff': 8, 'Qf': 12, 'Qi': 12,
'Qh': 10, 'IB': 5, 'fl': 8, 'Ql': 12, 'QQ': 16, 'Ix': 5,
'dL': 12, 'Iq': 16, 'Ip': 5, 'Is': 5, 'sp': 2, 'QL': 12,
'Ii': 8, 'Ih': 6, 'fB': 5, 'QB': 9, 'Il': 8, 'sl': 8,
'QI': 12, 'QH': 10, 'Ic': 5,'Ib': 5, 'fL': 8, 'Id': 16, 'If': 8}
for x in struct_format:
for y in struct_format:
temp_str = str(x) + str(y)
if is_64 and "P" in temp_str:
continue #CodePlex 17683 - we need to test against 64-bit CPython
Assert(expected[temp_str] == calcsize(temp_str),
"_struct.Struct(" + temp_str + ").size is broken")
def test_new_init():
"""tests for calling __new__/__init__ directly on the Struct object"""
for x in (_struct.Struct.__new__(_struct.Struct), _struct.Struct.__new__(_struct.Struct, a = 2)):
# state of uninitialized object...
AreEqual(x.size, -1)
AreEqual(x.format, None)
AssertErrorWithMessage(_struct.error, "pack requires exactly -1 arguments", x.pack)
AssertErrorWithMessage(_struct.error, "unpack requires a string argument of length -1", x.unpack, '')
# invalid format passed to __init__ - format string is updated but old format info is stored...
a = _struct.Struct('c')
try:
a.__init__('bad')
AssertUnreachable()
except _struct.error, e:
pass
AreEqual(a.format, 'bad')
AreEqual(a.pack('1'), '1')
AreEqual(a.unpack('1'), ('1', ))
# and then back to a valid format
a.__init__('i')
AreEqual(a.format, 'i')
AreEqual(a.pack(0), '\x00\x00\x00\x00')
AreEqual(a.unpack('\x00\x00\x00\x00'), (0, ))
@skip("silverlight") # no weak refs on Silverlight
def test_weakref():
"""weakrefs to struct objects are supported"""
x = _struct.Struct('i')
import _weakref
AreEqual(_weakref.proxy(x).size, x.size)
def test_cp16476():
for expected, encoded_val in [(156909, '\xedd\x02\x00'),
(sys.maxint, '\xff\xff\xff\x7f'),
(sys.maxint-1, '\xfe\xff\xff\x7f'),
(sys.maxint-2, '\xfd\xff\xff\x7f'),
(sys.maxint+1, '\x00\x00\x00\x80'),
(sys.maxint+2, '\x01\x00\x00\x80'),
(sys.maxint+3, '\x02\x00\x00\x80'),
(2**16, '\x00\x00\x01\x00'),
(2**16+1, '\x01\x00\x01\x00'),
(2**16-1, '\xff\xff\x00\x00'),
(0, '\x00\x00\x00\x00'),
(1, '\x01\x00\x00\x00'),
]:
actual_val = unpack('I', encoded_val)
AreEqual((expected,), actual_val)
AreEqual(type(expected), type(actual_val[0]))
def test_unpack_from():
'''
TODO: just a sanity test for now. Needs far more testing.
'''
import array
_struct.unpack_from("", array.array("c"))
AreEqual(_struct.unpack_from("", array.array("c")),
())
#--MAIN------------------------------------------------------------------------
run_test(__name__)
| apache-2.0 | -3,611,325,586,160,018,400 | 41.060484 | 126 | 0.417218 | false |
joansmith/openmicroscopy | components/tools/OmeroPy/test/unit/clitest/test_admin.py | 2 | 10383 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test of the omero admin control.
Copyright 2008 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import pytest
from path import path
from glob import glob
import omero
import omero.clients
from omero.cli import CLI, NonZeroReturnCode
from omero.plugins.admin import AdminControl
from omero.plugins.prefs import PrefsControl
from mocks import MockCLI
omeroDir = path(os.getcwd()) / "build"
GRID_FILES = ["templates.xml", "default.xml", "windefault.xml"]
ETC_FILES = ["ice.config", "master.cfg", "internal.cfg"]
@pytest.fixture(autouse=True)
def tmpadmindir(tmpdir):
etc_dir = tmpdir.mkdir('etc')
etc_dir.mkdir('grid')
tmpdir.mkdir('var')
templates_dir = etc_dir.mkdir('templates')
templates_dir.mkdir('grid')
old_templates_dir = path() / ".." / ".." / ".." / "etc" / "templates"
for f in glob(old_templates_dir / "*.cfg"):
path(f).copy(path(templates_dir))
for f in glob(old_templates_dir / "grid" / "*.xml"):
path(f).copy(path(templates_dir / "grid"))
path(old_templates_dir / "ice.config").copy(path(templates_dir))
return path(tmpdir)
class TestAdmin(object):
@pytest.fixture(autouse=True)
def setup_method(self, tmpadmindir):
# Other setup
self.cli = MockCLI()
self.cli.dir = tmpadmindir
self.cli.register("admin", AdminControl, "TEST")
self.cli.register("config", PrefsControl, "TEST")
def teardown_method(self, method):
self.cli.teardown_method(method)
def invoke(self, string, fails=False):
try:
self.cli.invoke(string, strict=True)
if fails:
assert False, "Failed to fail"
except:
if not fails:
raise
def testMain(self):
try:
self.invoke("")
except NonZeroReturnCode:
# Command-loop not implemented
pass
#
# Async first because simpler
#
def XtestStartAsync(self):
# DISABLED: https://trac.openmicroscopy.org.uk/ome/ticket/10584
self.cli.addCall(0)
self.cli.checksIceVersion()
self.cli.checksStatus(1) # I.e. not running
self.invoke("admin startasync")
self.cli.assertCalled()
self.cli.assertStderr(
['No descriptor given. Using etc/grid/default.xml'])
def testStopAsyncRunning(self):
self.cli.checksStatus(0) # I.e. running
self.cli.addCall(0)
self.invoke("admin stopasync")
self.cli.assertStderr([])
self.cli.assertStdout([])
def testStopAsyncNotRunning(self):
self.cli.checksStatus(1) # I.e. not running
self.invoke("admin stopasync", fails=True)
self.cli.assertStderr(["Server not running"])
self.cli.assertStdout([])
def testStop(self):
self.cli.checksStatus(0) # I.e. running
self.cli.addCall(0)
self.cli.checksStatus(1) # I.e. not running
self.invoke("admin stop")
self.cli.assertStderr([])
self.cli.assertStdout(['Waiting on shutdown. Use CTRL-C to exit'])
#
# STATUS
#
def testStatusNodeFails(self):
# Setup the call to bin/omero admin ice node
popen = self.cli.createPopen()
popen.wait().AndReturn(1)
self.cli.mox.ReplayAll()
pytest.raises(NonZeroReturnCode, self.invoke, "admin status")
def testStatusSMFails(self):
# Setup the call to bin/omero admin ice node
popen = self.cli.createPopen()
popen.wait().AndReturn(0)
# Setup the call to session manager
control = self.cli.controls["admin"]
control._intcfg = lambda: ""
def sm(*args):
raise Exception("unknown")
control.session_manager = sm
self.cli.mox.ReplayAll()
pytest.raises(NonZeroReturnCode, self.invoke, "admin status")
def testStatusPasses(self, tmpdir, monkeypatch):
ice_config = tmpdir / 'ice.config'
ice_config.write('omero.host=localhost\nomero.port=4064')
monkeypatch.setenv("ICE_CONFIG", ice_config)
# Setup the call to bin/omero admin ice node
popen = self.cli.createPopen()
popen.wait().AndReturn(0)
# Setup the call to session manager
control = self.cli.controls["admin"]
control._intcfg = lambda: ""
def sm(*args):
class A(object):
def create(self, *args):
raise omero.WrappedCreateSessionException()
return A()
control.session_manager = sm
self.cli.mox.ReplayAll()
self.invoke("admin status")
assert 0 == self.cli.rv
def check_registry(topdir, prefix='', registry=4061, **kwargs):
for key in ['master.cfg', 'internal.cfg']:
s = path(topdir / "etc" / key).text()
assert 'tcp -h 127.0.0.1 -p %s%s' % (prefix, registry) in s
def check_ice_config(topdir, prefix='', ssl=4064, **kwargs):
config_text = path(topdir / "etc" / "ice.config").text()
pattern = re.compile('^omero.port=\d+$', re.MULTILINE)
matches = pattern.findall(config_text)
assert matches == ["omero.port=%s%s" % (prefix, ssl)]
def check_default_xml(topdir, prefix='', tcp=4063, ssl=4064, **kwargs):
routerport = (
'<variable name="ROUTERPORT" value="%s%s"/>' % (prefix, ssl))
insecure_routerport = (
'<variable name="INSECUREROUTER" value="OMERO.Glacier2'
'/router:tcp -p %s%s -h @omero.host@"/>' % (prefix, tcp))
client_endpoints = (
'client-endpoints="ssl -p ${ROUTERPORT}:tcp -p %s%s"'
% (prefix, tcp))
for key in ['default.xml', 'windefault.xml']:
s = path(topdir / "etc" / "grid" / key).text()
assert routerport in s
assert insecure_routerport in s
assert client_endpoints in s
class TestJvmCfg(object):
"""Test template files regeneration"""
@pytest.fixture(autouse=True)
def setup_method(self, tmpadmindir):
self.cli = CLI()
self.cli.register("admin", AdminControl, "TEST")
self.cli.register("config", PrefsControl, "TEST")
self.args = ["admin", "jvmcfg"]
self.cli.dir = path(tmpadmindir)
def testNoTemplatesGeneration(self):
"""Test no template files are generated by the jvmcfg subcommand"""
# Test non-existence of configuration files
for f in GRID_FILES:
assert not os.path.exists(path(self.cli.dir) / "etc" / "grid" / f)
for f in ETC_FILES:
assert not os.path.exists(path(self.cli.dir) / "etc" / f)
# Call the jvmcf command and test file genearation
self.cli.invoke(self.args, strict=True)
for f in GRID_FILES:
assert not os.path.exists(path(self.cli.dir) / "etc" / "grid" / f)
for f in ETC_FILES:
assert not os.path.exists(path(self.cli.dir) / "etc" / f)
@pytest.mark.parametrize(
'suffix', ['', '.blitz', '.indexer', '.pixeldata', '.repository'])
def testInvalidJvmCfgStrategy(self, suffix, tmpdir):
"""Test invalid JVM strategy configuration leads to CLI error"""
key = "omero.jvmcfg.strategy%s" % suffix
self.cli.invoke(["config", "set", key, "bad"], strict=True)
with pytest.raises(NonZeroReturnCode):
self.cli.invoke(self.args, strict=True)
class TestRewrite(object):
"""Test template files regeneration"""
@pytest.fixture(autouse=True)
def setup_method(self, tmpadmindir):
self.cli = CLI()
self.cli.register("admin", AdminControl, "TEST")
self.cli.register("config", PrefsControl, "TEST")
self.args = ["admin", "rewrite"]
self.cli.dir = path(tmpadmindir)
def testTemplatesGeneration(self):
"""Test template files are generated by the rewrite subcommand"""
# Test non-existence of configuration files
for f in GRID_FILES:
assert not os.path.exists(path(self.cli.dir) / "etc" / "grid" / f)
for f in ETC_FILES:
assert not os.path.exists(path(self.cli.dir) / "etc" / f)
# Call the jvmcf command and test file genearation
self.cli.invoke(self.args, strict=True)
for f in GRID_FILES:
assert os.path.exists(path(self.cli.dir) / "etc" / "grid" / f)
for f in ETC_FILES:
assert os.path.exists(path(self.cli.dir) / "etc" / f)
def testForceRewrite(self, monkeypatch):
"""Test template regeneration while the server is running"""
# Call the jvmcfg command and test file genearation
monkeypatch.setattr(AdminControl, "status", lambda *args, **kwargs: 0)
with pytest.raises(NonZeroReturnCode):
self.cli.invoke(self.args, strict=True)
def testOldTemplates(self):
old_templates = path(__file__).dirname() / ".." / "old_templates.xml"
old_templates.copy(
path(self.cli.dir) / "etc" / "templates" / "grid" /
"templates.xml")
with pytest.raises(NonZeroReturnCode):
self.cli.invoke(self.args, strict=True)
@pytest.mark.parametrize('prefix', [None, 1])
@pytest.mark.parametrize('registry', [None, 111])
@pytest.mark.parametrize('tcp', [None, 222])
@pytest.mark.parametrize('ssl', [None, 333])
def testExplicitPorts(self, registry, ssl, tcp, prefix, monkeypatch):
"""
Test the omero.ports.xxx configuration properties during the generation
of the configuration files
"""
# Skip the JVM settings calculation for this test
monkeypatch.setattr(omero.install.jvmcfg, "adjust_settings",
lambda x, y: {})
kwargs = {}
if prefix:
kwargs["prefix"] = prefix
if registry:
kwargs["registry"] = registry
if tcp:
kwargs["tcp"] = tcp
if ssl:
kwargs["ssl"] = ssl
for (k, v) in kwargs.iteritems():
self.cli.invoke(
["config", "set", "omero.ports.%s" % k, "%s" % v],
strict=True)
self.cli.invoke(self.args, strict=True)
check_ice_config(self.cli.dir, **kwargs)
check_registry(self.cli.dir, **kwargs)
check_default_xml(self.cli.dir, **kwargs)
| gpl-2.0 | 6,743,931,801,898,122,000 | 32.066879 | 79 | 0.603486 | false |
outshines/opendcp | octans/octans/config.py | 5 | 1519 | #!/usr/bin/env python
#
# Copyright (C) 2016 Weibo Inc.
#
# This file is part of Opendcp.
#
# Opendcp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# Opendcp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Opendcp. if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
# Author: WhiteBlue
# Time : 2016/07/20
import yaml
'''
Load config items in config.yml
'''
class Config:
def __init__(self):
self._params = {
"mysql": "",
"get_key_url": "",
"pool_size": "",
"pool_recycle": ""
}
# read config
def get(self, key):
return self._params[key]
# Load config file
# param config_file: config file with path
def load(self, config_file):
with open(config_file) as f:
params = yaml.safe_load(f)
for key in self._params.keys():
if key not in params:
raise Exception("param '{}' not found in config file")
self._params[key] = params[key]
| gpl-2.0 | -768,751,680,258,237,400 | 27.660377 | 78 | 0.61817 | false |
rosmo/ansible | lib/ansible/modules/network/meraki/meraki_mr_l3_firewall.py | 5 | 9579 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Kevin Breit (@kbreit) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: meraki_mr_l3_firewall
short_description: Manage MR access point layer 3 firewalls in the Meraki cloud
version_added: "2.7"
description:
- Allows for creation, management, and visibility into layer 3 firewalls implemented on Meraki MR access points.
- Module is not idempotent as of current release.
options:
state:
description:
- Create or modify an organization.
type: str
choices: [ present, query ]
default: present
org_name:
description:
- Name of organization.
type: str
org_id:
description:
- ID of organization.
type: int
net_name:
description:
- Name of network containing access points.
type: str
net_id:
description:
- ID of network containing access points.
type: str
number:
description:
- Number of SSID to apply firewall rule to.
type: int
aliases: [ ssid_number ]
ssid_name:
description:
- Name of SSID to apply firewall rule to.
type: str
aliases: [ ssid ]
allow_lan_access:
description:
- Sets whether devices can talk to other devices on the same LAN.
type: bool
default: yes
rules:
description:
- List of firewall rules.
type: list
suboptions:
policy:
description:
- Specifies the action that should be taken when rule is hit.
type: str
choices: [ allow, deny ]
protocol:
description:
- Specifies protocol to match against.
type: str
choices: [ any, icmp, tcp, udp ]
dest_port:
description:
- Comma-seperated list of destination ports to match.
type: str
dest_cidr:
description:
- Comma-separated list of CIDR notation networks to match.
type: str
comment:
description:
- Optional comment describing the firewall rule.
type: str
author:
- Kevin Breit (@kbreit)
extends_documentation_fragment: meraki
'''
EXAMPLES = r'''
- name: Create single firewall rule
meraki_mr_l3_firewall:
auth_key: abc123
state: present
org_name: YourOrg
net_id: 12345
number: 1
rules:
- comment: Integration test rule
policy: allow
protocol: tcp
dest_port: 80
dest_cidr: 192.0.2.0/24
allow_lan_access: no
delegate_to: localhost
- name: Enable local LAN access
meraki_mr_l3_firewall:
auth_key: abc123
state: present
org_name: YourOrg
net_id: 123
number: 1
rules:
allow_lan_access: yes
delegate_to: localhost
- name: Query firewall rules
meraki_mr_l3_firewall:
auth_key: abc123
state: query
org_name: YourOrg
net_name: YourNet
number: 1
delegate_to: localhost
'''
RETURN = r'''
'''
import os
from ansible.module_utils.basic import AnsibleModule, json, env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec
def assemble_payload(meraki):
params_map = {'policy': 'policy',
'protocol': 'protocol',
'dest_port': 'destPort',
'dest_cidr': 'destCidr',
'comment': 'comment',
}
rules = []
for rule in meraki.params['rules']:
proposed_rule = dict()
for k, v in rule.items():
proposed_rule[params_map[k]] = v
rules.append(proposed_rule)
payload = {'rules': rules}
return payload
def get_rules(meraki, net_id, number):
path = meraki.construct_path('get_all', net_id=net_id, custom={'number': number})
response = meraki.request(path, method='GET')
if meraki.status == 200:
return response
def get_ssid_number(name, data):
for ssid in data:
if name == ssid['name']:
return ssid['number']
return False
def get_ssids(meraki, net_id):
path = meraki.construct_path('get_all', net_id=net_id)
return meraki.request(path, method='GET')
def main():
# define the available arguments/parameters that a user can pass to
# the module
fw_rules = dict(policy=dict(type='str', choices=['allow', 'deny']),
protocol=dict(type='str', choices=['tcp', 'udp', 'icmp', 'any']),
dest_port=dict(type='str'),
dest_cidr=dict(type='str'),
comment=dict(type='str'),
)
argument_spec = meraki_argument_spec()
argument_spec.update(state=dict(type='str', choices=['present', 'query'], default='present'),
net_name=dict(type='str'),
net_id=dict(type='str'),
number=dict(type='str', aliases=['ssid_number']),
ssid_name=dict(type='str', aliases=['ssid']),
rules=dict(type='list', default=None, elements='dict', options=fw_rules),
allow_lan_access=dict(type='bool', default=True),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
meraki = MerakiModule(module, function='mr_l3_firewall')
meraki.params['follow_redirects'] = 'all'
query_urls = {'mr_l3_firewall': '/networks/{net_id}/ssids/{number}/l3FirewallRules'}
update_urls = {'mr_l3_firewall': '/networks/{net_id}/ssids/{number}/l3FirewallRules'}
meraki.url_catalog['get_all'].update(query_urls)
meraki.url_catalog['update'] = update_urls
payload = None
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
# FIXME: Work with Meraki so they can implement a check mode
if module.check_mode:
meraki.exit_json(**meraki.result)
# execute checks for argument completeness
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.params['org_id']
orgs = None
if org_id is None:
orgs = meraki.get_orgs()
for org in orgs:
if org['name'] == meraki.params['org_name']:
org_id = org['id']
net_id = meraki.params['net_id']
if net_id is None:
if orgs is None:
orgs = meraki.get_orgs()
net_id = meraki.get_net_id(net_name=meraki.params['net_name'],
data=meraki.get_nets(org_id=org_id))
number = meraki.params['number']
if meraki.params['ssid_name']:
number = get_ssid_number(meraki.params['ssid_name'], get_ssids(meraki, net_id))
if meraki.params['state'] == 'query':
meraki.result['data'] = get_rules(meraki, net_id, number)
elif meraki.params['state'] == 'present':
rules = get_rules(meraki, net_id, number)
path = meraki.construct_path('get_all', net_id=net_id, custom={'number': number})
if meraki.params['rules']:
payload = assemble_payload(meraki)
else:
payload = dict()
update = False
try:
if len(rules) != len(payload['rules']): # Quick and simple check to avoid more processing
update = True
if update is False:
for r in range(len(rules) - 2):
if meraki.is_update_required(rules[r], payload[r]) is True:
update = True
except KeyError:
pass
if rules[len(rules) - 2] != meraki.params['allow_lan_access']:
update = True
if update is True:
payload['allowLanAccess'] = meraki.params['allow_lan_access']
response = meraki.request(path, method='PUT', payload=json.dumps(payload))
if meraki.status == 200:
meraki.result['data'] = response
meraki.result['changed'] = True
else:
meraki.result['data'] = rules
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,731,177,552,439,459,000 | 31.917526 | 112 | 0.584821 | false |
Subsets and Splits