repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mgh14/470ai | bzagents/Agent.py | 1 | 7556 | import telnetlib
import sys
import time
import random
import math
import GnuplotUtil
from tankUtil import *
class Agent(object):
# constants
SERVER_DELIMITER = "\n"
LIST_START = "start" + SERVER_DELIMITER
LIST_END = "end" + SERVER_DELIMITER
SERVER_CONNECT_ACKNOWLEDGED = "bzrobots 1" + SERVER_DELIMITER
NOT_SET = "not_set"
# member variables
ipAddr = NOT_SET
port = NOT_SET
socket = NOT_SET
constants = dict()
iHaveEnemyFlag = False
worldHalfSize = NOT_SET
myBaseCoords = NOT_SET
myFlagStand = NOT_SET
def __init__(self, ip, port):
self.ipAddr = ip
self.port = port
# connect to telnet bzrflag server
self.socket = telnetlib.Telnet(ip, port)
response = self.socket.read_until(self.SERVER_DELIMITER)
if (response == self.SERVER_CONNECT_ACKNOWLEDGED):
print "connect to server: successful"
else:
print "failed connection!"
sys.exit(-1)
# register and prepare agent
self.registerAgent()
self.loadConstants()
self.setMyBase()
self.setMyFlagStand()
def registerAgent(self):
self.socket.write("agent 1" + self.SERVER_DELIMITER)
print "Registration Successful on port " + str(self.port)
def loadConstants(self):
constList = self._query("constants")
for item in constList:
self.constants[item[0]] = item[1]
self.worldHalfSize = int(self.constants["worldsize"]) / 2
print self.constants
def setMyBase(self):
bases = self._query("bases")
for base in bases:
if(base[0] == self.constants["team"]):
point1 = self.getAdjustedPoint((int(float(base[1])),int(float(base[2]))))
point2 = self.getAdjustedPoint((int(float(base[3])),int(float(base[4]))))
point3 = self.getAdjustedPoint((int(float(base[5])),int(float(base[6]))))
point4 = self.getAdjustedPoint((int(float(base[7])),int(float(base[8]))))
self.myBaseCoords = [point1,point2,point3,point4]
return
print "Error: no base assigned!"
def setMyFlagStand(self):
flags = self._query("flags")
for flag in flags:
if(flag[0] == self.constants["team"]):
flagPoint = self.getAdjustedPoint((int(float(flag[2])),int(float(flag[3]))))
self.myFlagStand = [flagPoint[0],flagPoint[1]]
def commandAgent(self, command):
#print "Cmd: " + command
self.socket.write(command + self.SERVER_DELIMITER)
responseLine1 = self.socket.read_until(self.SERVER_DELIMITER).rstrip()
responseLine2 = self.socket.read_until(self.SERVER_DELIMITER)
#print "ResponseL1: " + responseLine1
#print "ResponseL2: " + responseLine2
def stop(self, tankNum):
self.commandAgent("angvel " + str(tankNum) + " 0")
self.commandAgent("speed " + str(tankNum) + " 0")
def closeSocket(self):
self.socket.close()
# for game queries
def _query(self, queryCommand):
self.socket.write(queryCommand + self.SERVER_DELIMITER)
response = self.socket.read_until(self.SERVER_DELIMITER).rstrip();
stringList = self.socket.read_until(self.LIST_END)
stringList = stringList[len(self.LIST_START):-1*(len(self.LIST_END) + 1)] # parse off 'begin\n' and 'end\n'
listOfLines = stringList.split(self.SERVER_DELIMITER) # split strings by newline
# split each line by whitespace
lineArrays = []
for line in listOfLines:
array = line.split()
array.pop(0)
lineArrays.append(array)
return lineArrays
def _getRawResponse(self, queryCommand):
#print "query: " + query
self.socket.write(queryCommand + self.SERVER_DELIMITER)
response = self.socket.read_until(self.SERVER_DELIMITER).rstrip();
#print "ResponseL1: " + response
stringList = self.socket.read_until(self.LIST_END)
return stringList
def printList(self,listToPrint):
print "List:"
for current in listToPrint:
print str(current)
print "(end list)"
def _isCoordinateInBase(self, coords):
# top-right corner check
trCorner = (coords[0] < self.myBaseCoords[0][0] and coords[1] < self.myBaseCoords[0][1])
# bottom-right corner check
brCorner = (coords[0] < self.myBaseCoords[1][0] and coords[1] > self.myBaseCoords[1][1])
# bottom-left corner check
blCorner = (coords[0] > self.myBaseCoords[2][0] and coords[1] > self.myBaseCoords[2][1])
# top-left corner check
tlCorner = (coords[0] > self.myBaseCoords[3][0] and coords[1] < self.myBaseCoords[3][1])
return (trCorner and brCorner and blCorner and tlCorner)
def _isMyFlagInMyBase(self):
flags = self._query("flags")
for flag in flags:
if(flag[0] == self.constants["team"]):
return self._isCoordinateInBase(self._getMyFlagPosition())
return -1
def _isMyFlagCaptured(self):
flags = self._query("flags")
for flag in flags:
if(flag[0] == self.constants["team"]):
return (not (flag[1] == self.constants["team"]))
return -1
def _getMyFlagPosition(self):
flags = self._query("flags")
for flag in flags:
if(flag[0] == self.constants["team"]):
flagPoint = self.getAdjustedPoint((int(float(flag[2])),int(float(flag[3]))))
return [flagPoint[0],flagPoint[1]]
return [-10000,-10000] # represents an error (should be found above)
def _getEnemyFlagPositions(self):
flags = self._query("flags")
positions = []
for flag in flags:
if(flag[0] == self.constants["team"]):
continue
flagPos = self.getAdjustedPoint((int(float(flag[2])),int(float(flag[3]))))
positions.append(flagPos)
return positions
def _iHaveEnemyFlag(self):
flags = self._query("flags")
for flag in flags:
if(flag[0] == self.constants["team"]): # don't count my own flag
continue
if(flag[1] == self.constants["team"]):
return True
return False
def _getCurrentPositionOfTank(self,tankNum):
tankInfo = self._query("mytanks")[tankNum]
return self.getAdjustedPoint([float(tankInfo[6]),float(tankInfo[7])])
def distance(self, a , b):
return math.sqrt((b[1]-a[1])**2+(b[0]-a[0])**2)
def getDesiredAngle(self, tankNum, pointToVisit):
currentPosition = self._getCurrentPositionOfTank(tankNum)
return self.getAdjustedAngle(math.atan2(pointToVisit[1]-currentPosition[1],
pointToVisit[0]-currentPosition[0]))
def setAngularVelocity(self, tankNum, angVel, desiredAngle):
tankInfo = self._query("mytanks")[tankNum]
currAngle = self.getAdjustedAngle(tankInfo[8])
absAngVel = abs(angVel)
# figure out which way the tank should turn
if(desiredAngle - currAngle > 0):
if(desiredAngle - math.pi > currAngle):
angVel = -1 * absAngVel
else:
angVel = absAngVel
else:
if(desiredAngle + math.pi > currAngle):
angVel = -1 * absAngVel
else:
angVel = absAngVel
self.commandAgent("angvel " + str(tankNum) + " " + str(angVel))
def setAngularVelocityByPoint(self, tankNum, angVel, pointToVisit):
self.setAngularVelocity(tankNum, angVel, self.getDesiredAngle(tankNum,pointToVisit))
def getAdjustedAngle(self,rawAngle):
rawAngle = float(rawAngle)
twoPi = 2*math.pi
if(rawAngle > twoPi):
return math.fmod(rawAngle,twoPi)
if(rawAngle >= 0) and (rawAngle < math.pi):
return rawAngle
if(rawAngle < 0):
return twoPi + rawAngle
return rawAngle
def getAdjustedPoint(self,point):
return [self.worldHalfSize + point[0],self.worldHalfSize + point[1]]
def getMyPosition(self, tankNum):
mytanks = self._query("mytanks")
tankInfo = mytanks[tankNum]
return self.getAdjustedPoint([float(tankInfo[6]),float(tankInfo[7])])
def getMyAngle(self, tankNum):
mytanks = self._query("mytanks")
tankInfo = mytanks[tankNum]
return self.getAdjustedAngle(float(tankInfo[8]))
def play(self): # driver function for beginning AI simulation
print "no implemented play method: tanks will just sit."
| gpl-3.0 | -7,796,457,630,180,964,000 | 27.730038 | 110 | 0.69468 | false |
ssindow/ZMeter | MeterGUI.py | 1 | 29977 | # -*- coding: utf-8 -*-
"""
GUI Code
"""
import LinkGPIB
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from pyqtgraph.widgets.RemoteGraphicsView import RemoteGraphicsView
import numpy as np
from time import sleep, strftime
## Constants
versionTxt = 'CellShot V2.6.3'
observerModel = 2602 #2461
sourceModel = 2401
timerPeriod = .4
bufferDepth = 1
plotWindow = 2048*4
plotSubsampling = 22
remotePlot = False
setAntialias = False
class MeterGUI(QtGui.QMainWindow):
# User-Defined constants
triggered = False
defaultTopThreshold = '3.6E+6'
defaultBotThreshold = '3.4E+6'
defaultPercentage = '8'
obSettingUpdated = False # True
ijSettingUpdated = False # True
revertPolarityClicked = False
defaultYMax = '2.5E+6'
defaultYMin = '0.5E+0'
def __init__(self, readport='COM8', baudrate=230400, bufferSize=bufferDepth,
winSize=(800,400)): ## COM port and baudrate are for serial comm.
super(MeterGUI, self).__init__()
self.lastUpdate = 0.0
self.startTime = 0.1
self.data = np.empty((16 * plotWindow, 2))
self.ptr = 0
self.avgFPS = 0.0
# self.kb = KeyBoard()
self.baseLineValue = float(self.defaultBotThreshold)
self.topLineValue = float(self.defaultTopThreshold)
self.botLineValue = float(self.defaultBotThreshold)
self.percentThreshold = float(self.defaultPercentage)
self.placeholder = 'Messages\nto be shown\nhere.'
self.injectionCount = 0
# QTimer
self.timer = QtCore.QTimer()
# GUI Layout
pg.setConfigOptions(antialias=setAntialias)
self.layout = pg.LayoutWidget()
# self.kb = QtGui.QGraphicsView(self)
self.layout.installEventFilter(self)
# self.setCentralWidget(self.layout)
self.createLayout(winSize)
def initLink(self, externalInjector = True):
# Initialize Link
if self.dcheck.isChecked():
self.lastUpdate = pg.ptime.time()
self.startTime = pg.ptime.time()
else:
try:
handler.connGPIB(bufferDepth)
handler.initMeter()
sleep(3.5)
# handler.setMeterBuffer(bufferDepth)
if observerModel == 2401:
handler.resetTimer()
self.startTime = float(handler.dev[0].query('SYST:TIME?'))
except:
print('Failed.')
# Fill in GUI Boxes
self.vsenseBox.setText(str(handler.sourceVolt))
self.ilimitBox.setText(str(handler.sourceILim))
self.ishotBox.setText(str(handler.injectCurr))
self.vclampBox.setText(str(handler.injectVLim))
self.tPulseBox.setText(str(handler.injectWidth))
def runTimer(self):
self.timer.timeout.connect(self.update)
self.timer.start(timerPeriod)
## All GPIB assumed.
def createLayout(self, winSize):
# GUI Items Declaration
self.revertPolarityBtn = QtGui.QPushButton('Revert\nPolarity')
self.revertPolarityBtn.clicked.connect(self.revertPolarity)
self.expbtn = QtGui.QPushButton('Export')
self.expbtn.clicked.connect(self.csvExport)
self.rstbtn = QtGui.QPushButton('Reset')
self.rstbtn.clicked.connect(self.resetData)
self.filterEnChk = QtGui.QCheckBox('Analog Filter')
self.filterEnChk.clicked.connect(self.filterSetting)
self.filterEnChk.setChecked(False)
self.ymaxBox = QtGui.QLineEdit(self.defaultYMax)
self.ymaxBox.editingFinished.connect(self.yrangeSetting)
self.ymaxBox.setFixedWidth(60)
self.ymaxBox.setAlignment(QtCore.Qt.AlignRight)
self.yminBox = QtGui.QLineEdit(self.defaultYMin)
self.yminBox.editingFinished.connect(self.yrangeSetting)
self.yminBox.setFixedWidth(60)
self.yminBox.setAlignment(QtCore.Qt.AlignRight)
self.autoYChk = QtGui.QCheckBox('Auto Y-Range')
self.autoYChk.clicked.connect(self.yrangeSetting)
self.autoYChk.setChecked(True)
self.dcheck = QtGui.QCheckBox('Debug')
self.dcheck.setChecked(False)
self.measureEnChk = QtGui.QCheckBox('Measure Enable')
self.measureEnChk.setChecked(False)
self.startBtn = QtGui.QPushButton('Start')
self.startBtn.clicked.connect(self.startObserve)
self.plotEnChk = QtGui.QCheckBox('Plot Enable')
self.plotEnChk.setChecked(True)
self.msgLab = QtGui.QLabel(self.placeholder)
self.autoInjectChk = QtGui.QCheckBox('Auto Injection')
self.autoInjectChk.setChecked(True)
self.autoInjectChk.clicked.connect(self.aijUpdate)
self.topThresholdBox = QtGui.QLineEdit(self.defaultTopThreshold)
self.topThresholdBox.editingFinished.connect(self.thresholdSetting)
self.topThresholdBox.setFixedWidth(60)
self.topThresholdBox.setAlignment(QtCore.Qt.AlignRight)
self.botThresholdBox = QtGui.QLineEdit(self.defaultBotThreshold)
self.botThresholdBox.editingFinished.connect(self.thresholdSetting)
self.botThresholdBox.setFixedWidth(60)
self.botThresholdBox.setAlignment(QtCore.Qt.AlignRight)
self.adaptThresholdChk = QtGui.QCheckBox('Adaptive Th')
self.adaptThresholdChk.setChecked(True)
self.adaptThresholdChk.clicked.connect(self.adaptUpdate)
self.percentThresholdBox = QtGui.QLineEdit(self.defaultPercentage)
self.percentThresholdBox.editingFinished.connect(self.tpSetting)
self.percentThresholdBox.setFixedWidth(60)
self.percentThresholdBox.setAlignment(QtCore.Qt.AlignRight)
self.baseLineLab = QtGui.QLabel()
self.bsline = pg.InfiniteLine(self.baseLineValue, 0)
self.ttline = pg.InfiniteLine(self.topLineValue, 0, pen='c')
self.tbline = pg.InfiniteLine(self.botLineValue, 0, pen='m')
self.manualTrigBtn = QtGui.QPushButton('Manual\nTrigger') # trigger
self.manualTrigBtn.clicked.connect(self.setManualTrigger) #handler.dev[1].write(':INIT'))#
self.injectionCountTitleLab = QtGui.QLabel('Injection#:')
self.injectionCountLab = QtGui.QLabel(str(self.injectionCount))
self.rebaseBtn = QtGui.QPushButton('RB')
self.rebaseBtn.clicked.connect(self.resetBaseLine)
self.rezeroCountBtn = QtGui.QPushButton('RZ')
self.rezeroCountBtn.clicked.connect(self.resetInjectionCount)
self.vsenseBox = QtGui.QLineEdit() # vsense
self.ilimitBox = QtGui.QLineEdit() # ilimit
self.ishotBox = QtGui.QLineEdit() # ishot
self.vclampBox = QtGui.QLineEdit() # vclamp
self.tPulseBox = QtGui.QLineEdit() # tpulse
self.filterCntBox = QtGui.QLineEdit('22') # filterCnt
self.revertThresholdBox = QtGui.QLineEdit('2E+6') # Revert at this value
self.autoRevertChk = QtGui.QCheckBox('Auto Revert')
self.autoRevertChk.setChecked(True)
self.autoRevertChk.clicked.connect(self.autoRevert)
self.manualRevertChk = QtGui.QCheckBox('Manual Revert')
self.manualRevertChk.setChecked(True)
self.manualRevertChk.clicked.connect(self.manualRevert)
self.vsenseBox.textEdited.connect(self.setObSettingsUpdated)
self.ilimitBox.textEdited.connect(self.setObSettingsUpdated)
self.ishotBox.textEdited.connect(self.setIjSettingsUpdated)
self.vclampBox.textEdited.connect(self.setIjSettingsUpdated)
self.tPulseBox.textEdited.connect(self.setIjSettingsUpdated)
self.updateSettingsBtn = QtGui.QPushButton('Update\nSettings') # updateSettings
self.updateSettingsBtn.clicked.connect(self.updateInstSettings)
self.logBox = QtGui.QTextEdit()
# Plot Area
if remotePlot:
self.rview = RemoteGraphicsView()
self.rview.pg.setConfigOptions(antialias=setAntialias)
self.plt = self.rview.pg.PlotItem(title='Real-time Impedance Plot')
self.plt._setProxyOptions(deferGetattr=True)
self.rview.setCentralItem(self.plt)
self.tline = self.plt.plot()
else:
self.plt = pg.PlotWidget(title='Real-time Impedance Plot')
self.plt.addItem(self.bsline)
self.plt.addItem(self.ttline)
self.plt.addItem(self.tbline)
self.plt.showGrid(x=True, y=True)
self.plt.setClipToView(True)
self.plt.setLabel('bottom', 'Time', 's')
self.plt.setLabel('left', 'Impedance', 'Ω')
####################
## Set GUI Layout ##
#self.layout.addWidget(self.dcheck, row=0, col=0)
self.layout.addWidget(self.expbtn, row=0, col=0)
self.layout.addWidget(self.startBtn, row=1, col=0)
self.layout.addWidget(self.rstbtn, row=2, col=0)
self.logo = QtGui.QLabel()
self.logo.setPixmap(QtGui.QPixmap('resources\\img\\FBM_logo02.png'))
#self.logo.setFixedHeight(40)
#self.logo.setFixedWidth(300)
self.layout.addWidget(self.logo, row=0, col=1, colspan=3)
self.layout.addWidget(self.measureEnChk, row=1, col=1)
self.filterCntLab = QtGui.QLabel('Moving Avg.(2~100):')
self.layout.addWidget(self.filterCntLab, row=2, col=1)
self.filterCntLab.setFixedWidth(160)
self.filterCntLab.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.layout.addWidget(self.filterEnChk, row=1, col=2)
self.filterCntBox.setFixedWidth(40)
self.layout.addWidget(self.filterCntBox, row=2, col=2)
self.vsenseLab = QtGui.QLabel('V_sense:')
self.vsenseLab.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.layout.addWidget(self.vsenseLab, row=1, col=3)
self.ilimitLab = QtGui.QLabel('I_limit:')
self.ilimitLab.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.layout.addWidget(self.ilimitLab, row=2, col=3)
# Polarity Reversion
self.revertPolarityBtn.setFixedWidth(60)
self.layout.addWidget(self.revertPolarityBtn, row=0, col=4)
# V-sense
self.vsenseBox.setFixedWidth(60)
self.layout.addWidget(self.vsenseBox, row=1, col=4)
# I-limit
self.ilimitBox.setFixedWidth(60)
self.layout.addWidget(self.ilimitBox, row=2, col=4)
# Update Settings
self.updateSettingsBtn.setFixedWidth(60)
self.layout.addWidget(self.updateSettingsBtn, row=0, col=5)
# Ishot Box
self.ishotLab = QtGui.QLabel('I_shot:')
self.ishotLab.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.layout.addWidget(self.ishotLab, row=1, col=5)
self.ishotBox.setFixedWidth(60)
self.layout.addWidget(self.ishotBox, row=1, col=6)
# Vclamp box
self.vclampLab = QtGui.QLabel('V_clamp:')
self.vclampLab.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.layout.addWidget(self.vclampLab, row=2, col=5)
self.vclampBox.setFixedWidth(60)
self.layout.addWidget(self.vclampBox, row=2, col=6)
# tPulse Box
self.tPulseLab = QtGui.QLabel('t_Pulse:')
self.tPulseLab.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.layout.addWidget(self.tPulseLab, row=3, col=5)
self.tPulseBox.setFixedWidth(60)
self.layout.addWidget(self.tPulseBox, row=3, col=6)
# Manual Trig
self.manualTrigBtn.setFixedWidth(60)
self.layout.addWidget(self.manualTrigBtn, row=0, col=6)
self.layout.addWidget(self.autoRevertChk, row=4, col=5, colspan=2)
self.revertThresholdLab = QtGui.QLabel('Flip @')
self.revertThresholdLab.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.layout.addWidget(self.revertThresholdLab, row=5, col=5)
self.revertThresholdBox.setFixedWidth(60)
self.layout.addWidget(self.revertThresholdBox, row=5, col=6)
self.layout.addWidget(self.autoInjectChk, row=6, col=5, colspan=2)
self.layout.addWidget(self.botThresholdBox, row=7, col=5)
self.layout.addWidget(self.topThresholdBox, row=7, col=6)
self.layout.addWidget(self.baseLineLab, row=8, col=5)
self.rebaseBtn.setFixedWidth(30)
self.layout.addWidget(self.rebaseBtn, row=8, col=6)
# Peak counter
self.injectionCountTitleLab.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignBottom)
self.layout.addWidget(self.injectionCountTitleLab, row=9, col=5, colspan=1)
self.injectionCountLab.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.layout.addWidget(self.injectionCountLab, row=10, col=5)
self.rezeroCountBtn.setFixedWidth(30)
self.layout.addWidget(self.rezeroCountBtn, row=10, col=6)
# Version
self.versionLab = QtGui.QLabel(versionTxt)
self.versionLab.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.layout.addWidget(self.versionLab, row=0, col=7, colspan=2)
# Plot Speed
self.layout.addWidget(self.msgLab, row=1, col=7, rowspan=2, colspan=2)
self.msgLab.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.layout.addWidget(self.manualRevertChk, row=4, col=7, colspan=2)
self.layout.addWidget(self.plotEnChk, row=5, col=7, colspan=2)
self.layout.addWidget(self.autoYChk, row=6, col=7, colspan=2)
self.layout.addWidget(self.yminBox, row=7, col=7)
self.layout.addWidget(self.ymaxBox, row=7, col=8)
self.layout.addWidget(self.adaptThresholdChk, row=9, col=7, colspan=2)
self.layout.addWidget(self.percentThresholdBox, row=10, col=7)
self.layout.addWidget(QtGui.QLabel('%'), row=10, col=8)
self.layout.addWidget(self.logBox, row=11, col=5, colspan=4)
if remotePlot:
self.layout.addWidget(self.rview, row=3, col=0, rowspan=10, colspan=5)
else:
self.layout.addWidget(self.plt, row=3, col=0, rowspan=10, colspan=5)
self.layout.resize(*winSize)
self.layout.show()
self.curve = self.plt.plot()
# if remotePlot:
def startObserve(self):
if (observerModel == 2602) :
handler.dev[0].write('startObserve()')
print('Starting...')
self.measureEnChk.setChecked(True)
def aijUpdate(self):
if observerModel == 2602:
if self.autoInjectChk.isChecked():
handler.dev[0].write('Aij(true)')
else:
handler.dev[0].write('Aij(false)')
def resetBaseLine(self):
self.baseLineValue = float(self.defaultBotThreshold)
self.topLineValue = float(self.defaultTopThreshold)
self.botLineValue = float(self.defaultBotThreshold)
handler.dev[0].write('reb()')
def resetInjectionCount(self):
self.injectionCount = 0
handler.dev[0].write('rez()')
def setManualTrigger(self):
if observerModel == 2602:
handler.dev[0].write('genPulse(ijWidth)')
else:
handler.dev[1].write(':INIT')
def setObSettingsUpdated(self):
self.obSettingUpdated = True
def setIjSettingsUpdated(self):
self.ijSettingUpdated = True
print('Injector setting has been changed.')
#print(self.ijSettingUpdated)
#print((self.ijSettingUpdated == True) )
#print((self.obSettingUpdated != True) & (self.ijSettingUpdated != True))
def updateInstSettings(self):
if ((self.obSettingUpdated != True) & (self.ijSettingUpdated != True)):
print('No setting has been changed.')
else:
if self.obSettingUpdated:
self.obSettingUpdated = False
self.ijSettingUpdated = False
handler.sourceVolt = float(self.vsenseBox.text())
handler.sourceILim = float(self.ilimitBox.text())
if observerModel == 2602:
handler.dev[0].write('updateSMUA('+ str(handler.sourceVolt)
+ ',' + str(handler.sourceILim) + ')')
print('updateSMUA('+ str(handler.sourceVolt)
+ ',' + str(handler.sourceILim) + ')')
# updating function
if self.ijSettingUpdated:
self.ijSettingUpdated = False
handler.injectCurr = float(self.ishotBox.text())
handler.injectVLim = float(self.vclampBox.text())
handler.injectWidth = float(self.tPulseBox.text())
if observerModel == 2602:
handler.dev[0].write('updateSMUB(' + str(handler.injectCurr)
+ ',' + str(handler.injectVLim) + ')')
handler.dev[0].write('ijWidth = '+str(handler.injectWidth))
else:
handler.dev[1].write(':SOUR:CURR:LEV ' + str(handler.injectCurr))
handler.dev[1].write(':SENS:VOLT:PROT ' + str(handler.injectVLim))
def adaptUpdate(self):
if observerModel == 2602:
if self.adaptThresholdChk.isChecked():
percent = self.percentThresholdBox.text()
else:
percent = '0'
print('adaptiveT(' + percent + ')')
handler.dev[0].write('adaptiveT(' + percent + ')')
def revertPolarity(self):
if observerModel == 2602:
#handler.dev[0].write('rvt()')
print('Obsolete function since V2.5.6')
else:
self.obSettingUpdated = ~self.obSettingUpdated # Why?
self.revertPolarityClicked = True
handler.sourceVolt = -handler.sourceVolt
self.vsenseBox.setText(str(handler.sourceVolt))
print('Source is changed to %02.1f' % handler.sourceVolt)
def csvExport(self):
filename = strftime('%Y%m%d_%H%M')+'.csv'
np.savetxt(filename, self.data, fmt='%.06f',delimiter=',',header='|Z|(ohms),time(s)')
choice = QtGui.QMessageBox.information(self, 'Message',
"Impedance data is saved in "+filename+'!',
QtGui.QMessageBox.Ok)
if choice == QtGui.QMessageBox.Ok:
print("Exporting confirmed.")
def resetData(self):
self.ptr = 0
self.data = np.empty((16 * plotWindow, 2))
self.avgFPS = 0.0
self.baseLineValue = float(self.defaultBotThreshold)
self.topLineValue = float(self.defaultTopThreshold)
self.botLineValue = float(self.defaultBotThreshold)
if self.dcheck.isChecked():
self.startTime = pg.ptime.time()
elif observerModel == 2401:
self.startTime = 0
handler.dev[0].write(':SYST:TIME:RES')
return
def autoRevert(self):
global handler
if self.autoRevertChk.isChecked():
if observerModel == 2602:
handler.dev[0].write('revertThreshold = ' + self.revertThresholdBox.text())
handler.dev[0].write('autoRevert = true')
print('Auto polarity reversion is applied.')
else:
if observerModel == 2602:
handler.dev[0].write('autoRevert = false')
print('Auto polarity reversion is cancelled.')
return
def manualRevert(self):
global handler
if self.manualRevertChk.isChecked():
if observerModel == 2602:
handler.dev[0].write('manualRevert = true')
print('Manual polarity reversion is enabled.')
else:
if observerModel == 2602:
handler.dev[0].write('manualRevert = false')
print('Manual polarity reversion is disabled.')
return
def filterSetting(self):
global handler
if self.filterEnChk.isChecked():
#self.ser.write(b'RES:FILT ON\n')
if observerModel == 2602:
handler.dev[0].write('smua.measure.filter.count = '+ self.filterCntBox.text())
handler.dev[0].write('smua.measure.filter.enable = smua.FILTER_ON')
elif observerModel == 2401:
handler.dev[0].write(':SENS:AVER ON')
elif observerModel == 2461:
#if observerModel == 2461:
handler.dev[0].write(':SENS:CURR:AVER ON')
print('Analog LPF is applied.')
else:
#self.ser.write(b'RES:FILT OFF\n')
if observerModel == 2602:
handler.dev[0].write('smua.measure.filter.enable = smua.FILTER_OFF')
elif observerModel == 2401:
handler.dev[0].write(':SENS:AVER OFF')
elif observerModel == 2461:
handler.dev[0].write(':SENS:CURR:AVER OFF')
print('Analog LPF is disabled.')
return
def yrangeUpdate(self):
self.defaultYMax = self.ymaxBox.text()
self.defaultYMin = self.yminBox.text()
print(self.defaultYMax, self.defaultYMin)
return
def yrangeSetting(self):
self.yrangeUpdate()
if self.autoYChk.isChecked():
self.plt.enableAutoRange(axis='y')
print('Auto Y-range is set.')
else:
# self.plt.setRange(yRange=[float(self.ymin),float(self.ymax)],update=True,disableAutoRange=True)
self.plt.disableAutoRange(axis='y')
self.plt.setYRange(float(self.defaultYMin), float(self.defaultYMax))
print('Manual Y-range is set.')
return
def thresholdSetting(self):
self.defaultTopThreshold = self.topThresholdBox.text()
self.defaultBotThreshold = self.botThresholdBox.text()
self.topLineValue = float(self.defaultTopThreshold)
self.botLineValue = float(self.defaultBotThreshold)
self.ttline.setValue(self.topLineValue)
self.tbline.setValue(self.botLineValue)
def tpSetting(self):
self.percentThreshold = float(self.percentThresholdBox.text())
def update(self):
if self.measureEnChk.isChecked():
## Expanding data buffer
self.ptr += bufferDepth
if self.ptr >= self.data.shape[0]:
tmp = self.data
self.data = np.empty((self.data.shape[0]*2,2))
self.data[:tmp.shape[0],:] = tmp
print('Expanding data buffer...')
# Updating Z-data
if self.dcheck.isChecked():
now = pg.ptime.time()
self.data[self.ptr - bufferDepth:self.ptr, 0] = np.random.normal(size=bufferDepth)
self.data[self.ptr, 1] = now - self.startTime
for i in range(1, bufferDepth):
self.data[self.ptr - i, 1] = self.data[self.ptr, 1] - (now - self.lastUpdate) * i / float(bufferDepth)
else:
# Pre-processing may be necessary
self.data[self.ptr-bufferDepth : self.ptr] = self.getData()
if self.revertPolarityClicked:
self.revertPolarityClicked = False
if observerModel == 2461:
handler.dev[0].write(':SOUR:VOLT '+str(handler.sourceVolt)+'\n')
if self.plotEnChk.isChecked() & ((self.ptr / bufferDepth) % plotSubsampling == 0):
now = self.data[self.ptr-1, 1]
try:
fps = 1 / (now - self.lastUpdate)
except:
fps = 1
if self.ptr < plotWindow: ## Plot is not moving at this point
self.curve.setData(x=self.data[:self.ptr, 1], y=self.data[:self.ptr, 0], _callSync='off')
self.sigma = np.std(self.data[:self.ptr, 0])
else: # Moving plot
self.curve.setData(x=self.data[self.ptr - plotWindow:self.ptr, 1],
y=self.data[self.ptr - plotWindow:self.ptr, 0], _callSync='off')
self.sigma = np.std(self.data[self.ptr - plotWindow:self.ptr, 0])
self.bsline.setValue(self.baseLineValue)
self.ttline.setValue(self.topLineValue)
self.tbline.setValue(self.botLineValue)
self.avgFPS = self.avgFPS * 0.95 + fps * 0.05
self.msgLab.setText('Plotting\n@%02.2ffps\n(%0.1fpoints/s)\nσ=%.2e' % \
(self.avgFPS, self.avgFPS * bufferDepth * plotSubsampling, self.sigma))
self.baseLineLab.setText('%02.2e' % self.baseLineValue)
self.injectionCountLab.setText(str(self.injectionCount))
self.lastUpdate = now
return
def getData(self):
global handler
## GPIB
try:
#received = handler.dev[0].read().split(',')
if observerModel == 2602:
try:
received = handler.dev[0].read().rstrip('\n').split(',')
except:
print(received)
#handler.dev[0].read()
#if (handler.dev[0].read()!="TSP>\n"): print('Non-prompt output detected.')
elif observerModel == 2461:
handler.dev[0].write(':READ:DIG? "defbuffer1",READ,REL')
#handler.dev[0].write(':READ? "defbuffer1",READ,REL')
received = LinkGPIB.formatSRE(handler.dev[0].read_raw())
elif observerModel == 2401:
received = LinkGPIB.formatSRE(handler.dev[0].read_raw())
#received = handler.dev[0].query(':READ?').split(',')
#received = handler.readMeterBuffer().split(',')
#print(received)
readValues = np.empty((bufferDepth,2)) # Z, t
if observerModel == 2602: # re-coded for V2.5
# Cell detection: Source Voltage and Read Current
readValues[:,0] = received[0::6] # Z, t, base, bot, top, ijCnt
readValues[:,1] = received[1::6]
self.baseLineValue = float(received[2])
self.botLineValue = received[3]
self.topLineValue = received[4]
self.injectionCount = int(received[5])
#
# if self.autoInjectChk.isChecked():
# if ~self.triggered & (np.max(readValues[:, 0]) > self.topLineValue):
# handler.dev[1].write(':INIT')
# self.triggered = True
# self.injectionCount += 1 # Counts number of injections
# elif (np.min(readValues[:,0]) < self.botLineValue):
# if (np.min(readValues[:,0]) < self.baseLineValue):
# self.baseLineValue = self.baseLineValue * 0.996 + np.average(readValues[:, 0]) * 0.004
# else:
# self.baseLineValue = self.baseLineValue * 0.98 + np.average(readValues[:, 0]) * 0.02
# if self.adaptThresholdChk.isChecked():
# self.botLineValue = self.baseLineValue * (1 + 0.006 * self.percentThreshold)
# self.topLineValue = self.baseLineValue * (1 + 0.01 * self.percentThreshold)
# #print(self.baseLineValue)
# if self.triggered:
# self.triggered = False
elif observerModel == 2461:
# Cell detection: Source Voltage and Read Current
readValues[:,0] = [ handler.sourceVolt / x for x in received[0::2]]
## debugging - source current and read voltage
#readValues[:,0] = [x / handler.sourceCurr for x in received[0::2]]
#print(readValues[:,0])
if self.autoInjectChk.isChecked():
if ~self.triggered & (np.max(readValues[:, 0]) > self.topLineValue):
handler.dev[1].write(':INIT')
self.triggered = True
elif (np.min(readValues[:,0]) < self.botLineValue):
self.baseLineValue = self.baseLineValue * 0.95 + np.average(readValues[:, 0]) * 0.05
if self.adaptThresholdChk.isChecked():
self.botLineValue = self.baseLineValue * (1 + 0.006 * self.percentThreshold)
self.topLineValue = self.baseLineValue * (1 + 0.01 * self.percentThreshold)
#print(self.baseLineValue)
if self.triggered:
self.triggered = False
readValues[:,1] = received[1::2]
else:
readValues[:,0] = received[0::2]
readValues[:,1] = received[1::2]
#print(readValues[:,1])
#print(readValues)
except:
readValues = np.random.normal(size=(bufferDepth,2))
return readValues
def eventFilter(self, source, event):
if (source is self.layout and
event.type() == QtCore.QEvent.KeyPress):
key = event.key()
if key == QtCore.Qt.Key_Escape:
print('Esc button is pressed.')
# sys.exit(1)
elif key == QtCore.Qt.Key_Space:
self.setManualTrigger()
return QtGui.QMainWindow.eventFilter(self, source, event)
# def closeEvent(self, event):
# print('Calling')
# print('event: {0}'.format(event))
# event.accept()
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
app = QtGui.QApplication(sys.argv)
handler = LinkGPIB.LinkHandler()
m = MeterGUI()
m.initLink()
#handler.dev[0].write('startObserve()')
m.runTimer()
sys.exit(app.exec_())
| mit | -8,308,357,177,591,892,000 | 42.695335 | 122 | 0.598766 | false |
cropleyb/pentai | pentai/ai/t_alpha_beta.py | 1 | 4043 | #!/usr/bin/env python
import unittest
from pentai.ai.alpha_beta import *
class MockState(object):
def __init__(self, name, utility, successors):
self.name = name
self.utility = utility
self.successors = successors
class MockGame:
def __init__(self, states, max_depth=4):
self.states = dict([(s.name, s) for s in states])
self.max_depth = max_depth
def successors(self, state_name, depth):
state = self.states[state_name]
for child_state in state.successors:
yield child_state
def utility(self, state_name, depth):
return self.states[state_name].utility
def terminal_test(self, state_name, depth):
if depth >= self.max_depth:
return True
return len(self.states[state_name].successors) == 0
def to_move(self, state_name):
return True # TODO?
def save_utility(self, state, depth, utility_value):
pass
def report_short_circuit(self, *args):
pass
def report_vals(self, *args):
pass
def use_bl_cutoff(self):
return False
class AlphaBetaTest(unittest.TestCase):
'''
# TODO: Resurrect
def test_finished_game(self):
game = mock.Mock(
utility=0,
terminal_test=True,
to_move=True,
successors=[("move","child_state")])
action, value = alphabeta_search(state="start_state", game=game)
self.assertEquals(action, ("move", "child_state"))
self.assertEquals(value, 0)
'''
def test_top_level_options(self):
game = MockGame([
MockState("S0", 1, [(0,"S1"),(1,"S2"),(2,"S3")]),
MockState("S1", 1, []),
MockState("S2", 2, []),
MockState("S3", 1, [])])
action, value = alphabeta_search(state="S0", game=game)
self.assertEquals(action, (1, "S2"))
self.assertEquals(value, 2)
def test_top_level_with_one_move_having_a_single_descendent(self):
game = MockGame([
MockState("S0", 1, [(0,"S1"),(1,"S2"),(2,"S3")]),
MockState("S1", 1, []),
MockState("S2", 2, []),
MockState("S3", 1, [(0,"S4")]),
MockState("S4", 4, [])])
action, value = alphabeta_search(state="S0", game=game)
self.assertEquals(action, (2, "S3"))
self.assertEquals(value, 4)
def test_opponent_chooses_bad_move_for_us(self):
game = MockGame([
MockState("S0", 1, [(0,"S1"),(1,"S2")]),
MockState("S1", 1, [(0,"S3"),(1,"S4")]),
MockState("S2", 2, []),
MockState("S3", 3, []),
MockState("S4", 4, [])])
action, value = alphabeta_search(state="S0", game=game)
self.assertEquals(action, (0, "S1"))
self.assertEquals(value, 3)
def test_only_search_one_depth_level(self):
game = MockGame([
MockState("S0", 0, [(0,"S1"),(0,"S1")]),
MockState("S1", 1, [(0,"S2")]),
MockState("S2", 2, [(0,"S3")]),
MockState("S3", 3, [])], max_depth=1)
action, value = alphabeta_search(state="S0", game=game)
self.assertEquals(value, 1)
def test_only_search_two_depth_levels(self):
game = MockGame([
MockState("S0", 0, [(0,"S1"),(0,"S1")]),
MockState("S1", 1, [(0,"S2")]),
MockState("S2", 2, [(0,"S3")]),
MockState("S3", 3, [])], max_depth=2)
action, value = alphabeta_search(state="S0", game=game)
self.assertEquals(value, 2)
# !python pentai/ai/t_alpha_beta.py AlphaBetaTest.test_terminal_state
def test_terminal_state(self):
game = MockGame([
MockState("S0", 0, [(0,"S1"),(0,"S1")]),
MockState("S1", 1, [(0,"S2")]),
MockState("S2", 2, [(0,"S3")]),
MockState("S3", 3, [])], max_depth=4)
action, value = alphabeta_search(state="S0", game=game)
self.assertEquals(value, 3.0)
if __name__ == "__main__":
unittest.main()
| mit | 712,375,230,146,879,200 | 32.139344 | 73 | 0.531783 | false |
openstack/zaqar | zaqar/tests/unit/hacking/test_hacking.py | 1 | 1160 | # Copyright (c) 2017 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zaqar.hacking import checks
from zaqar.tests import base
class HackingTestCase(base.TestBase):
def test_no_log_translations(self):
for log in checks._all_log_levels:
for hint in checks._all_hints:
bad = 'LOG.%s(%s("Bad"))' % (log, hint)
self.assertEqual(1, len(list(checks.no_translate_logs(bad))))
# Catch abuses when used with a variable and not a literal
bad = 'LOG.%s(%s(msg))' % (log, hint)
self.assertEqual(1, len(list(checks.no_translate_logs(bad))))
| apache-2.0 | 7,031,336,255,733,594,000 | 41.962963 | 77 | 0.677586 | false |
edx-solutions/api-integration | edx_solutions_api_integration/management/commands/convert_ooyala_to_bcove.py | 1 | 3014 | import datetime
import logging
from optparse import make_option
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from edx_solutions_api_integration.tasks import convert_ooyala_to_bcove
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from pytz import UTC
logger = logging.getLogger(__name__) # pylint: disable=locally-disabled, invalid-name
class Command(BaseCommand):
"""
Command to update Ooyala Xblock Content IDs to corresponding Brightcove IDs
"""
help = 'Convert Ooyala IDs to corresponding Brightcove IDs in Xblock and embeds'
batch_size = 100
def add_arguments(self, parser):
parser.add_argument(
"--user-id",
dest="user_id",
help="Staff User ID",
),
parser.add_argument(
"--course-ids",
dest="course_ids",
help="Course IDs to process Ooyala instances in",
),
parser.add_argument(
"--revert",
dest="revert",
action="store_true",
default=False,
help="Revert all the converted Ids back to previous state"
),
def handle(self, *args, **options):
course_ids = options.get('course_ids')
user_id = options.get('user_id')
revert = options.get('revert')
if not user_id:
raise CommandError("--user-id parameter is missing. Please provide a staff user id")
else:
try:
User.objects.get(id=user_id)
except User.DoesNotExist:
raise CommandError("Invalid user id: {}. Please provide a valid staff user id".format(user_id))
if course_ids:
course_ids = course_ids.split(',')
logger.info('Ooyala IDs update task queued for Courses: {}'.format(course_ids))
convert_ooyala_to_bcove.delay(
staff_user_id=user_id,
course_ids=course_ids,
revert=revert,
callback="conversion_script_success_callback",
)
else:
# run on all open courses
open_courses = CourseOverview.objects.filter(
Q(end__gte=datetime.datetime.today().replace(tzinfo=UTC)) |
Q(end__isnull=True)
).values_list('id', flat=True)
logger.info('Ooyala IDs update command: queuing task for {} Open Courses'.format(len(open_courses)))
for course_ids in self.chunks(open_courses, self.batch_size):
convert_ooyala_to_bcove.delay(
staff_user_id=user_id,
course_ids=course_ids,
revert=revert,
callback="conversion_script_success_callback",
)
def chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
| agpl-3.0 | -6,302,106,137,230,728,000 | 35.756098 | 112 | 0.584605 | false |
kamyu104/GoogleCodeJam-2014 | World Finals/aram.py | 1 | 1450 | # Copyright (c) 2015 kamyu. All rights reserved.
#
# Google Code Jam 2014 World Finals - Problem F. ARAM
# https://code.google.com/codejam/contest/7214486/dashboard#s=p5
#
# Time: O(60 * N * R * G)
# Space: O(1)
#
# Can you win at least X fraction of the time?
def CanWin(X):
A = []
last_G_values = 0
# C < G, not enough coins for a reroll.
for C in xrange(0, G):
A.append(avg_win_prob_top[N] - X)
last_G_values += A[C]
# C >= G, enough coins for a reroll.
for C in xrange(G, R * G + 1):
A.append(-1e100)
for K in xrange(1, N + 1):
p = 1.0 * (N - K) / N # Probability of rerolling.
p_reroll = p / (1 - p) * last_G_values
p_not_reroll = avg_win_prob_top[K] - X
A[C] = max(A[C], p_reroll + p_not_reroll)
if A[C] >= 0:
return True
last_G_values += A[C] - A[C - G]
return False
for case in xrange(input()):
N, R, G = map(int, raw_input().strip().split())
win_prob = map(float, raw_input().strip().split())
win_prob = sorted(win_prob, reverse=True)
avg_win_prob_top = [0]
for topK in xrange(1, N + 1):
avg_win_prob_top.append(sum(win_prob[0:topK]) / topK)
left = 0.0
right = 1.0
for i in xrange(60):
mid = (left + right) / 2
if not CanWin(mid):
right = mid
else:
left = mid
print "Case #%d: %.15f" % (case+1, left) | mit | 4,796,693,524,528,156,000 | 26.377358 | 64 | 0.522759 | false |
Intel-Corporation/tensorflow | tensorflow/python/framework/func_graph.py | 1 | 42379 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FuncGraph and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as py_collections
import itertools
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework.auto_control_deps import AutomaticControlDependencies
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.lazy_loader import LazyLoader
# This is to avoid a circular dependency:
# function -> func_graph
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
def_function = LazyLoader(
"def_function", globals(),
"tensorflow.python.eager.def_function")
WHITELIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY, # pylint: disable=protected-access
variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access
]
class UnknownArgument(object):
"""Signifies an argument which is not currently handled."""
pass
def convert_structure_to_signature(structure, arg_names=None):
"""Convert a potentially nested structure to a signature.
Args:
structure: Structure to convert, where top level collection is a list or a
tuple.
arg_names: Optional list of arguments that has equal number of elements as
`structure` and is used for naming corresponding TensorSpecs.
Returns:
Identical structure that has TensorSpec objects instead of Tensors and
UknownArgument instead of any unsupported types.
"""
structure = composite_tensor.replace_composites_with_components(structure)
def encode_arg(arg, path):
"""A representation for this argument, for converting into signatures."""
if isinstance(arg, ops.Tensor):
user_specified_name = None
try:
user_specified_name = compat.as_str(
arg.op.get_attr("_user_specified_name"))
except ValueError:
pass
if path and user_specified_name and user_specified_name != path[0]:
# The user has explicitly named the argument differently than the name
# of the function argument.
name = user_specified_name
else:
name = "/".join([str(p) for p in path])
return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)
if isinstance(arg, (
int,
float,
bool,
type(None),
dtypes.DType,
tensor_spec.TensorSpec,
)):
return arg
return UnknownArgument()
# We are using the flattened paths to name the TensorSpecs. We need an
# explicit name for them downstream.
flattened = nest.flatten_with_tuple_paths(structure, expand_composites=True)
if arg_names:
if len(arg_names) != len(structure):
raise ValueError(
"Passed in arg_names don't match actual signature (%s)." % arg_names)
# Replace all top-level names with their actual arg_names. If a path before
# was "(2,'a',1)", it will become "(arg_names[2],'a',1)".
flattened = [
((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened
]
mapped = [encode_arg(arg, path) for path, arg in flattened]
return nest.pack_sequence_as(structure, mapped, expand_composites=True)
class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
control_outputs: Operations that must be executed before the function
represented by this graph can be said to have been executed.
structured_input_signature: A tuple of (args, kwargs), which are both
possibly-nested python objects that were received by this function. Note
that these structures might contain Python `None`s.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
control_captures: Set of external ops on which this graph has a control
dependency.
seed: The graph-level random seed.
capture_by_value: If True, the func graph will capture Variables by value
instead of reference.
"""
def __init__(self, name, collections=None, capture_by_value=None):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, and distribution
strategy stack from the current context or graph.
Args:
name: the name of the function.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write
to) the outer graph's collections that are not whitelisted, and both
read and write to the outer graph's collections that are whitelisted.
The current whitelisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will
capture Variables by value instead of reference. By default inherit
from outer graphs, and failing that will default to False.
"""
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.control_outputs = []
self.control_captures = set()
self.structured_input_signature = None
self.structured_outputs = None
self._weak_variables = []
self._watched_variables = weakref.WeakSet()
self.outer_graph = ops.get_default_graph()
self.captures = py_collections.OrderedDict()
# Inherit capture-by-value from outer graph.
if capture_by_value is not None:
self.capture_by_value = capture_by_value
elif self.outer_graph is not None and isinstance(
self.outer_graph, FuncGraph):
self.capture_by_value = self.outer_graph.capture_by_value
else:
self.capture_by_value = False
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
# [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of
# any None op_seed for random_op in the function, in which case we end up
# using function seed, which could be unintended behavior for the op.
self._seed_used = False
else:
self.seed = graph.seed
self._seed_used = False
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
if collections is None:
for collection_name in graph.get_all_collection_keys():
if collection_name not in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
else:
self._collections = collections
def __str__(self):
return "FuncGraph(name=%s, id=%s)" % (self.name, id(self))
def watch_variable(self, v):
"""Marks the variable v as accessed while building this graph."""
while self is not None and isinstance(self, FuncGraph):
self._watched_variables.add(v)
self = self.outer_graph
def control_dependencies(self, control_inputs):
"""Handles control dependencies.
FuncGraph wraps Graph's control_dependencies logic by first filtering out
any external tensors / operations and storing them in the graph's
control_captures member. Any consumers of this function graph must then
decide how to handle the control captures.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return super(FuncGraph, self).control_dependencies(control_inputs)
filtered_control_inputs = []
for c in control_inputs:
# Check for _UnreadVariable
if (isinstance(c, ops.IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
graph_element = ops._as_graph_element(c) # pylint: disable=protected-access
if graph_element is None:
graph_element = c
if graph_element is not None and getattr(
graph_element, "graph", None) is not self:
self.control_captures.add(graph_element)
else:
filtered_control_inputs.append(graph_element)
return super(FuncGraph, self).control_dependencies(filtered_control_inputs)
def as_default(self):
outer_cm = super(FuncGraph, self).as_default()
@tf_contextlib.contextmanager
def inner_cm():
"""Context manager for copying distribute.Strategy scope information."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on
# inheriting this stack from the default graph even in eager mode. Maybe
# it should be part of the eager context? This would also allow us to
# remove a get_default_graph() call from the function cache lookup.
old_strategy_stack = self._distribution_strategy_stack
self._distribution_strategy_stack = list(
graph._distribution_strategy_stack)
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
# so that the same trace of the Python function may be placed on several
# different devices and saved functions may be placed on new devices when
# restored.
old_device_stack = self._device_function_stack
if context.executing_eagerly():
if self._distribution_strategy_stack:
self._add_device_to_stack(context.context().device_name)
else:
if (self._distribution_strategy_stack
or device_stack_has_callable(graph._device_function_stack)):
# Hard-code devices from device functions in the function body
self._device_function_stack = graph._device_function_stack.copy()
old_creator_stack = self._variable_creator_stack
self._variable_creator_stack = graph._variable_creator_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
old_graph_key = self._graph_key
self._graph_key = graph._graph_key
# pylint: enable=protected-access
with outer_cm as g:
try:
yield g
finally:
self._distribution_strategy_stack = old_strategy_stack
self._device_function_stack = old_device_stack
self._variable_creator_stack = old_creator_stack
self._graph_key = old_graph_key
return inner_cm()
@property
def output_types(self):
return [t.dtype for t in self.outputs]
@property
def output_shapes(self):
return [t.shape for t in self.outputs]
@property
def variables(self):
"""A list of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Yields:
Strong references to variables accessed by this FuncGraph.
"""
for weak_v in self._weak_variables:
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
yield v
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def _capture_by_value(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
# When capturing by value, do the read outside
reverse_captures = dict((v, k) for k, v in self.captures.items())
uncaptured_inputs = [reverse_captures.get(t, t) for t in inputs]
with ops.init_scope():
if context.executing_eagerly():
attr_list = ("dtype", int(attrs["dtype"].type))
value, = execute.execute(
compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
context.context())
else:
op = ops.get_default_graph().create_op(
op_type, uncaptured_inputs, dtypes, input_types, name, attrs,
op_def, compute_shapes, compute_device)
value = op.outputs[0]
captured_value = self.capture(value)
return captured_value.op
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
if self.capture_by_value and op_type in ["ReadVariableOp",
"ResourceGather"]:
return self._capture_by_value(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_shapes, compute_device)
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
for i, inp in enumerate(inputs):
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
inputs[i] = inp
return super(FuncGraph, self).create_op(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_device=compute_device)
def capture(self, tensor, name=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
Returns:
Tensor from this FuncGraph.
"""
# Note: _forward_func_graph is currently only set when building the gradient
# graph graph of a defun call. If the backwards graph tries to capture
# tensors those will be captured first in the forward graph. This
# makes sure that any tensor needed by a custom_gradient is correctly
# captured.
if (getattr(tensor, "graph", None) is not self and
hasattr(self, "_forward_func_graph") and
isinstance(self._forward_func_graph, FuncGraph)):
tensor = self._forward_func_graph.capture(tensor)
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
return self._capture_helper(tensor, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
inner_graph = tensor.graph
while inner_graph is not None and isinstance(inner_graph, FuncGraph):
if inner_graph is self:
raise ValueError(
"Trying to capture a tensor from an inner function. This can be "
"caused by accessing a tensor defined inside a loop or "
"conditional body, or a subfunction, from a calling function, "
"without going through the proper return value mechanism. "
"Consider using TensorFlow mechanisms such as TensorArrays "
"to return tensors from inner functions or loop / conditional "
"bodies. Tensor: %s; tensor graph: %s; this graph: %s"
% (tensor, tensor.graph, self))
inner_graph = inner_graph.outer_graph
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name):
captured_tensor = self.captures.get(tensor, None)
if captured_tensor is None:
captured_tensor = _create_substitute_placeholder(tensor, name=name,
dtype=tensor.dtype)
self.captures[tensor] = captured_tensor
self.inputs.append(captured_tensor)
tape.record_operation("captured_value", [captured_tensor], [tensor],
lambda x: [x])
return captured_tensor
@property
def external_captures(self):
"""External tensors captured by this function."""
return list(self.captures.keys())
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return list(self.captures.values())
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
autograph=False,
autograph_options=None,
add_control_dependencies=True,
arg_names=None,
op_return_value=None,
collections=None,
capture_by_value=None,
override_flat_arg_shapes=None):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwargs: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwargs` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
func_graph: Optional. An instance of FuncGraph. If provided, we will use
this graph else a new one is built and returned.
autograph: whether to use autograph to compile `python_func`.
See https://www.tensorflow.org/guide/autograph for more information.
autograph_options: additional knobs to control when `autograph=True`.
See https://www.tensorflow.org/guide/autograph for more information.
add_control_dependencies: If True, automatically adds control dependencies
to ensure program order matches execution order and stateful ops always
execute.
arg_names: Optional list of argument names, used to give input placeholders
recognizable names.
op_return_value: Optional. A Tensor. If set and `python_func` returns
Operations, those return values will be replaced with this value. If not
set, returning an Operation triggers an error.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write to)
the outer graph's collections that are not whitelisted, and both
read and write to the outer graph's collections that are whitelisted.
The current whitelisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will capture
Variables by value instead of reference. By default inherit from outer
graphs, and failing that will default to False.
override_flat_arg_shapes: An optional list of instances that are either
`None` or `TensorShape`. The length must match that of
`nest.flatten((args, kwargs), expand_composites=True)`. The entries
containing value `None` must match entries in flattened arguments
containing non-tensors, while entries containing a `TensorShape` must
match entries in the flattened arguments containing tensors.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None` nor a
`Tensor`.
ValueError: If both `signature` and `override_flat_arg_shapes` are
passed in.
"""
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name, collections=collections,
capture_by_value=capture_by_value)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
control_manager = AutomaticControlDependencies()
else:
control_manager = ops.NullContextmanager()
with func_graph.as_default(), control_manager as a:
current_scope = variable_scope.get_variable_scope()
default_use_recource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None and override_flat_arg_shapes is not None:
raise ValueError(
"Passed both signature and override_flat_arg_shapes: %s and %s."
% (signature, override_flat_arg_shapes))
if signature is not None:
args = signature
kwargs = {}
# Creates and names placeholders for all arguments.
if override_flat_arg_shapes is not None:
flat_args = nest.flatten(args, expand_composites=True)
arg_shapes = override_flat_arg_shapes[:len(flat_args)]
kwarg_shapes = override_flat_arg_shapes[len(flat_args):]
else:
arg_shapes = None
kwarg_shapes = None
func_args = _get_defun_inputs_from_args(
args, arg_names, flat_shapes=arg_shapes)
func_kwargs = _get_defun_inputs_from_kwargs(
kwargs, flat_shapes=kwarg_shapes)
# Convert all Tensors into TensorSpecs before saving the structured inputs.
# If storing pure concrete functions that are not called through polymorphic
# functions, we don't have access to FunctionSpec, so we need to call the
# TensorSpecs by their `arg_names` for later binding.
func_graph.structured_input_signature = (
convert_structure_to_signature(func_args, arg_names),
convert_structure_to_signature(func_kwargs))
flat_func_args = nest.flatten(func_args, expand_composites=True)
flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)
# Temporarily set inputs to allow graph building code to inspect
# them. Reassigned below.
func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs
if isinstance(arg, ops.Tensor)]
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, flat_func_args,
expand_composites=True)
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, flat_func_kwargs, expand_composites=True)
def convert(x):
"""Converts a function output to a Tensor."""
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# this won't work if x needs to be captured (i.e. if python_func returns
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_composite(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.contrib.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
if add_control_dependencies:
x = a.mark_as_return(x)
return x
try:
if autograph:
from tensorflow.python import autograph # pylint: disable=g-import-not-at-top
_, original_func = tf_decorator.unwrap(python_func)
def wrapper(*args, **kwargs):
# Note: functions annotated with @tf.function should always be
# converted even though they would meet autograph's whitelisting
# criteria.
# If this assumption is ever broken, converted_call will need to
# handle the possibility of original_func still being a shim, e.g.
# bound to WeakrefSelf.
return autograph.converted_call(
original_func, None,
autograph.ConversionOptions(
recursive=True,
optional_features=autograph_options,
force_conversion=True,
), args, kwargs)
# Wrapping around a decorator allows checks like tf_inspect.getargspec
# to be accurate.
converted_func = tf_decorator.make_decorator(original_func, wrapper)
python_func = tf_decorator.rewrap(python_func, original_func,
converted_func)
func_outputs = python_func(*func_args, **func_kwargs)
# invariant: `func_outputs` contains only Tensors, CompositeTensors,
# TensorArrays and `None`s.
func_outputs = nest.map_structure(convert, func_outputs,
expand_composites=True)
check_mutation(func_args_before, func_args)
check_mutation(func_kwargs_before, func_kwargs)
finally:
current_scope.set_use_resource(default_use_recource)
# Variables in `func_args`, `func_kwargs` should be explicit inputs
# to the function, not captured inputs.
graph_variables = list(func_graph._watched_variables) # pylint: disable=protected-access
arg_variables = set()
inputs = []
for arg in (nest.flatten(func_args, expand_composites=True) +
nest.flatten(func_kwargs, expand_composites=True)):
if isinstance(arg, resource_variable_ops.ResourceVariable):
# Even if an argument variable was not used in the function, we've
# already manually captured the resource Tensor when creating argument
# placeholders.
resource_placeholder = func_graph.captures.pop(arg.handle, None)
if resource_placeholder is None:
continue
arg_variables.add(arg)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in graph_variables if v not in arg_variables]
func_graph.inputs = inputs + list(func_graph.captures.values())
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
if add_control_dependencies:
func_graph.control_outputs.extend(control_manager.ops_which_must_run)
# Register any other functions defined in the graph.
with ops.init_scope():
if context.executing_eagerly():
for f in func_graph._functions.values(): # pylint: disable=protected-access
# TODO(ashankar): What about the gradient registry?
context.add_function(f._c_func.func) # pylint: disable=protected-access
return func_graph
def maybe_captured(tensor):
"""If t is a captured value placeholder, returns the original captured value.
Args:
tensor: Tensor.
Returns:
A tensor, potentially from a different Graph/FuncGraph.
"""
if (not isinstance(tensor, ops.EagerTensor) and
tensor.op.graph.building_function and tensor.op.type == "Placeholder"):
for input_t, placeholder_t in tensor.op.graph.captures.items():
if tensor == placeholder_t:
return maybe_captured(input_t)
# pylint: enable=protected-access
return tensor
def device_stack_has_callable(device_stack):
"""Checks whether a device stack contains a callable."""
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2):
"""Check if two list of arguments are exactly the same."""
errmsg = ("Function to be traced should not modify structure of input "
"arguments. Check if your function has list and dictionary "
"operations that alter input arguments, "
"such as `list.pop`, `list.append`")
try:
nest.assert_same_structure(n1, n2, expand_composites=True)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),
nest.flatten(n2, expand_composites=True)):
if arg1 is not arg2:
raise ValueError(errmsg)
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def flatten(sequence):
"""Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.
Args:
sequence: A nested structure of Tensors, CompositeTensors, and
TensorArrays.
Returns:
A list of tensors.
"""
flat_sequence = nest.flatten(sequence, expand_composites=True)
return [
item.flow if isinstance(item, tensor_array_ops.TensorArray) else item
for item in flat_sequence]
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def pack_sequence_as(structure, flat_sequence):
"""Like `nest.pack_sequence_as` but also builds TensorArrays from flows.
Args:
structure: The structure to pack into. May contain Tensors,
CompositeTensors, or TensorArrays.
flat_sequence: An iterable containing tensors.
Returns:
A nested structure.
Raises:
AssertionError if `structure` and `flat_sequence` are not compatible.
"""
flat_sequence = list(flat_sequence)
flattened_structure = nest.flatten(structure, expand_composites=True)
if len(flattened_structure) != len(flat_sequence):
raise ValueError("Mismatch in element count")
for i in range(len(flat_sequence)):
if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):
flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(
old_ta=flattened_structure[i], flow=flat_sequence[i])
return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)
def _create_substitute_placeholder(value, name=None, dtype=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
custom_gradient.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names, flat_shapes=None):
"""Maps Python function positional args to graph-construction inputs."""
return _get_defun_inputs(
args, names, structure=args, flat_shapes=flat_shapes)
def _get_defun_inputs(args, names, structure, flat_shapes=None):
"""Maps python function args to graph-construction inputs.
Args:
args: A flat list of user-specified arguments.
names: A list of strings with user-specified argument names, same length as
`args`. May be `None`, in which case a generic name is used.
structure: The original argument list or dictionary.
flat_shapes: A flat list of values that are either `None` or
instances of `TensorShape`. If provided, then length must match
that of `nest.flatten(args, expand_composites=True)`; and locations where
`args` are instances of `Tensor` must have a corresponding `TensorShape`
in `flat_shapes`. May be `None`, in which case exact shapes are read
directly from the args.
Returns:
Placeholders with the same structure as `structure`.
Raises:
RuntimeError: if `flat_shapes` is provided, but
`len(flat_shapes) != len(nest.flatten(args, expand_composites=True))`.
RuntimeError: if a shape from `flat_shapes` is not None
for an argument that is not a `Tensor`, `TensorSpec`,
or `ResourceVariable`.
"""
func_graph = ops.get_default_graph()
function_inputs = []
if names is None:
names = [None] * len(args)
if flat_shapes is None:
shapes_iter = itertools.repeat(None)
else:
len_flat_args = len(nest.flatten(args, expand_composites=True))
if len_flat_args != len(flat_shapes):
raise RuntimeError(
"Length of fully flat shapes (%d) must match that of "
"flatten(args) (%d). args: %s, flat_shapes: %s"
% (len(flat_shapes),
len_flat_args,
args,
flat_shapes))
shapes_iter = iter(flat_shapes)
for arg_value, name in zip(args, names):
flattened = nest.flatten(arg_value, expand_composites=True)
tensor_specs = [
arg for arg in flattened if isinstance(arg, tensor_spec.TensorSpec)
]
specified_names = [arg.name for arg in tensor_specs if arg.name]
if specified_names and len(specified_names) < len(tensor_specs):
raise ValueError("If specifying TensorSpec names for nested structures, "
"either zero or all names have to be specified.")
for arg in flattened:
# We have a shape entry for each arg, regadless of whether it's a real
# Tensor or not. For non-tensor entries it should be None.
shape = next(shapes_iter)
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
if isinstance(arg, tensor_spec.TensorSpec) and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder_shape = shape if shape is not None else arg.shape
try:
placeholder = graph_placeholder(
arg.dtype, placeholder_shape,
name=requested_name)
except ValueError:
# Sometimes parameter names are not valid op names, so fall back to
# unnamed placeholders.
placeholder = graph_placeholder(arg.dtype, placeholder_shape)
if name is not None:
# Record the requested/user-specified name in case it's different than
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
elif isinstance(arg, resource_variable_ops.ResourceVariable):
# Capture arg variables to create placeholders for them. These will be
# removed as captures after the function is traced (since otherwise we'd
# just add it back with a new placeholder when the variable was
# referenced).
placeholder = func_graph.capture(arg.handle, name=name)
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
function_inputs.append(arg)
else:
if shape is not None:
raise RuntimeError(
"Expected provided shape override to be None for arg that isn't "
"a Tensor, but saw arg: '%s', shape: '%s'. args: %s"
% (arg, shape, args))
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs,
expand_composites=True)
def _get_defun_inputs_from_kwargs(kwargs, flat_shapes):
"""Maps Python function keyword args to graph-construction inputs."""
if kwargs:
names, args = zip(*sorted(kwargs.items()))
else:
names = []
args = []
return _get_defun_inputs(
args, names, structure=kwargs, flat_shapes=flat_shapes)
def dismantle_func_graph(func_graph):
"""Removes reference cycles in `func_graph` FuncGraph.
Helpful for making sure the garbage collector doesn't need to run when
the FuncGraph goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable
after this function.
"""
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while func_graph.captures:
func_graph.captures.popitem()
memory.dismantle_ordered_dict(func_graph.captures)
ops.dismantle_graph(func_graph)
| apache-2.0 | -8,036,142,726,043,407,000 | 41.763875 | 97 | 0.676845 | false |
mrcrgl/django_distributed_task | distributed_task/broker/backends/db.py | 1 | 1101 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ..interface import BrokerInterface
from distributed_task import settings
from distributed_task.core.serializer import serialize, deserialize
from distributed_task.models import Message
import time
class DatabaseMessageBroker(BrokerInterface):
queue = 'distributed_task_queue'
def prepare(self):
self.load_config()
def load_config(self):
OPTIONS = getattr(settings, 'BROKER_OPTIONS')
self.queue = OPTIONS.get('QUEUE', 'distributed_task_queue')
def produce_message(self, data):
m = Message(message=serialize(data), queue=self.queue)
m.save()
def consume_message(self, handler):
while True:
next = Message.objects.filter(queue=self.queue).order_by('created').first()
if not next:
return True
body = next.message
next.delete()
handler(deserialize(body))
def keep_consuming(self, handler):
while True:
self.consume_message(handler)
time.sleep(10) | mit | 4,212,467,058,290,029,600 | 26.55 | 87 | 0.64396 | false |
gazpachoking/deluge-old | deluge/ui/gtkui/createtorrentdialog.py | 1 | 18151 | #
# createtorrentdialog.py
#
# Copyright (C) 2008 Andrew Resch <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
import gtk
import sys
import os.path
import gobject
import base64
import logging
from twisted.internet.threads import deferToThread
from deluge.ui.client import client
import listview
import deluge.component as component
import deluge.common
from deluge.configmanager import ConfigManager
log = logging.getLogger(__name__)
class CreateTorrentDialog:
def show(self):
self.builder = gtk.Builder()
# The main dialog
self.builder.add_from_file(deluge.common.resource_filename(
"deluge.ui.gtkui", os.path.join("glade", "create_torrent_dialog.ui")
))
# The remote path dialog
self.builder.add_from_file(deluge.common.resource_filename(
"deluge.ui.gtkui", os.path.join("glade", "create_torrent_dialog.remote_path.ui")
))
# The remote save dialog
self.builder.add_from_file(deluge.common.resource_filename(
"deluge.ui.gtkui", os.path.join("glade", "create_torrent_dialog.remote_save.ui")
))
# The progress dialog
self.builder.add_from_file(deluge.common.resource_filename(
"deluge.ui.gtkui", os.path.join("glade", "create_torrent_dialog.progress.ui")
))
self.config = ConfigManager("gtkui.conf")
self.dialog = self.builder.get_object("create_torrent_dialog")
self.dialog.set_transient_for(component.get("MainWindow").window)
self.builder.connect_signals({
"on_button_file_clicked": self._on_button_file_clicked,
"on_button_folder_clicked": self._on_button_folder_clicked,
"on_button_remote_path_clicked": self._on_button_remote_path_clicked,
"on_button_cancel_clicked": self._on_button_cancel_clicked,
"on_button_save_clicked": self._on_button_save_clicked,
"on_button_up_clicked": self._on_button_up_clicked,
"on_button_add_clicked": self._on_button_add_clicked,
"on_button_remove_clicked": self._on_button_remove_clicked,
"on_button_down_clicked": self._on_button_down_clicked
})
# path, icon, size
self.files_treestore = gtk.TreeStore(str, str, gobject.TYPE_UINT64)
column = gtk.TreeViewColumn(_("Filename"))
render = gtk.CellRendererPixbuf()
column.pack_start(render, False)
column.add_attribute(render, "stock-id", 1)
render = gtk.CellRendererText()
column.pack_start(render, True)
column.add_attribute(render, "text", 0)
column.set_expand(True)
self.builder.get_object("treeview_files").append_column(column)
column = gtk.TreeViewColumn(_("Size"))
render = gtk.CellRendererText()
column.pack_start(render)
column.set_cell_data_func(render, listview.cell_data_size, 2)
self.builder.get_object("treeview_files").append_column(column)
self.builder.get_object("treeview_files").set_model(self.files_treestore)
self.builder.get_object("treeview_files").set_show_expanders(False)
# tier, url
self.trackers_liststore = gtk.ListStore(int, str)
self.builder.get_object("tracker_treeview").append_column(
gtk.TreeViewColumn(_("Tier"), gtk.CellRendererText(), text=0))
self.builder.get_object("tracker_treeview").append_column(
gtk.TreeViewColumn(_("Tracker"), gtk.CellRendererText(), text=1))
self.builder.get_object("tracker_treeview").set_model(self.trackers_liststore)
self.trackers_liststore.set_sort_column_id(0, gtk.SORT_ASCENDING)
if not client.is_localhost() and client.connected():
self.builder.get_object("button_remote_path").show()
else:
self.builder.get_object("button_remote_path").hide()
self.dialog.show()
def parse_piece_size_text(self, value):
psize, metric = value.split()
psize = int(psize)
if psize < 32:
# This is a MiB value
psize = psize * 1024 * 1024
else:
# This is a KiB value
psize = psize * 1024
return psize
def adjust_piece_size(self):
"""Adjusts the recommended piece based on the file/folder/path selected."""
size = self.files_treestore[0][2]
model = self.builder.get_object("combo_piece_size").get_model()
for index,value in enumerate(model):
psize = self.parse_piece_size_text(value[0])
pieces = size / psize
if pieces < 2048 or (index + 1) == len(model):
self.builder.get_object("combo_piece_size").set_active(index)
break
def _on_button_file_clicked(self, widget):
log.debug("_on_button_file_clicked")
# Setup the filechooserdialog
chooser = gtk.FileChooserDialog(_("Choose a file"),
self.dialog,
gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN,
gtk.RESPONSE_OK))
chooser.set_transient_for(self.dialog)
chooser.set_select_multiple(False)
chooser.set_property("skip-taskbar-hint", True)
# Run the dialog
response = chooser.run()
if response == gtk.RESPONSE_OK:
result = chooser.get_filename()
else:
chooser.destroy()
return
path = result.decode('utf-8')
self.files_treestore.clear()
self.files_treestore.append(None, [result, gtk.STOCK_FILE, deluge.common.get_path_size(path)])
self.adjust_piece_size()
chooser.destroy()
def _on_button_folder_clicked(self, widget):
log.debug("_on_button_folder_clicked")
# Setup the filechooserdialog
chooser = gtk.FileChooserDialog(_("Choose a folder"),
self.dialog,
gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN,
gtk.RESPONSE_OK))
chooser.set_transient_for(self.dialog)
chooser.set_select_multiple(False)
chooser.set_property("skip-taskbar-hint", True)
# Run the dialog
response = chooser.run()
if response == gtk.RESPONSE_OK:
result = chooser.get_filename()
else:
chooser.destroy()
return
path = result.decode('utf-8')
self.files_treestore.clear()
self.files_treestore.append(None, [result, gtk.STOCK_OPEN, deluge.common.get_path_size(path)])
self.adjust_piece_size()
chooser.destroy()
def _on_button_remote_path_clicked(self, widget):
log.debug("_on_button_remote_path_clicked")
dialog = self.builder.get_object("remote_path_dialog")
entry = self.builder.get_object("entry_path")
dialog.set_transient_for(self.dialog)
entry.set_text("/")
entry.grab_focus()
response = dialog.run()
if response == gtk.RESPONSE_OK:
result = entry.get_text()
def _on_get_path_size(size):
log.debug("size: %s", size)
if size > 0:
self.files_treestore.clear()
self.files_treestore.append(None, [result, gtk.STOCK_NETWORK, size])
self.adjust_piece_size()
client.core.get_path_size(result).addCallback(_on_get_path_size)
client.force_call(True)
dialog.hide()
def _on_button_cancel_clicked(self, widget):
log.debug("_on_button_cancel_clicked")
self.dialog.destroy()
def _on_button_save_clicked(self, widget):
log.debug("_on_button_save_clicked")
if len(self.files_treestore) == 0:
return
is_remote = self.files_treestore[0][1] == gtk.STOCK_NETWORK
torrent_filename = "%s.torrent" % os.path.split(self.files_treestore[0][0].rstrip('/'))[-1]
if is_remote:
# This is a remote path
dialog = self.builder.get_object("remote_save_dialog")
dialog.set_transient_for(self.dialog)
self.builder.get_object("entry_save_path").set_text(torrent_filename)
response = dialog.run()
if response == gtk.RESPONSE_OK:
result = self.builder.get_object("entry_save_path").get_text()
else:
dialog.hide()
return
dialog.hide()
else:
# Setup the filechooserdialog
chooser = gtk.FileChooserDialog(_("Save .torrent file"),
self.dialog,
gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_transient_for(self.dialog)
chooser.set_select_multiple(False)
chooser.set_property("skip-taskbar-hint", True)
# Add .torrent and * file filters
file_filter = gtk.FileFilter()
file_filter.set_name(_("Torrent files"))
file_filter.add_pattern("*." + "torrent")
chooser.add_filter(file_filter)
file_filter = gtk.FileFilter()
file_filter.set_name(_("All files"))
file_filter.add_pattern("*")
chooser.add_filter(file_filter)
chooser.set_current_name(torrent_filename)
# Run the dialog
response = chooser.run()
if response == gtk.RESPONSE_OK:
result = chooser.get_filename()
else:
chooser.destroy()
return
chooser.destroy()
# Fix up torrent filename
if len(result) < 9:
result += ".torrent"
elif result[-8:] != ".torrent":
result += ".torrent"
# Get the path
path = self.files_treestore[0][0]
# Get a list of trackers
trackers = []
if not len(self.trackers_liststore):
tracker = None
else:
# Create a list of lists [[tier0, ...], [tier1, ...], ...]
tier_dict = {}
for tier, tracker in self.trackers_liststore:
tier_dict.setdefault(tier, []).append(tracker)
trackers = [tier_dict[tier] for tier in sorted(tier_dict)]
# Get the first tracker in the first tier
tracker = trackers[0][0]
# Get a list of webseeds
webseeds = []
b = self.builder.get_object("textview_webseeds").get_buffer()
lines = b.get_text(b.get_start_iter(), b.get_end_iter()).strip().split("\n")
import deluge.common
for l in lines:
if deluge.common.is_url(l):
webseeds.append(l)
# Get the piece length in bytes
combo = self.builder.get_object("combo_piece_size")
piece_length = self.parse_piece_size_text(combo.get_model()[combo.get_active()][0])
author = self.builder.get_object("entry_author").get_text()
comment = self.builder.get_object("entry_comments").get_text()
private = self.builder.get_object("chk_private_flag").get_active()
add_to_session = self.builder.get_object("chk_add_to_session").get_active()
if is_remote:
def torrent_created():
self.builder.get_object("progress_dialog").hide_all()
client.deregister_event_handler("CreateTorrentProgressEvent", on_create_torrent_progress_event)
def on_create_torrent_progress_event(piece_count, num_pieces):
self._on_create_torrent_progress(piece_count, num_pieces)
if piece_count == num_pieces:
from twisted.internet import reactor
reactor.callLater(0.5, torrent_created) # pylint: disable-msg=E1101
client.register_event_handler("CreateTorrentProgressEvent", on_create_torrent_progress_event)
client.core.create_torrent(
path,
tracker,
piece_length,
comment,
result,
webseeds,
private,
author,
trackers,
add_to_session)
else:
def hide_progress(result):
self.builder.get_object("progress_dialog").hide_all()
deferToThread(self.create_torrent,
path.decode('utf-8'),
tracker,
piece_length,
self._on_create_torrent_progress,
comment,
result.decode('utf-8'),
webseeds,
private,
author,
trackers,
add_to_session).addCallback(hide_progress)
# Setup progress dialog
self.builder.get_object("progress_dialog").set_transient_for(component.get("MainWindow").window)
self.builder.get_object("progress_dialog").show_all()
self.dialog.destroy()
def create_torrent(self, path, tracker, piece_length, progress, comment, target,
webseeds, private, created_by, trackers, add_to_session):
import deluge.metafile
deluge.metafile.make_meta_file(
path,
tracker,
piece_length,
progress=progress,
comment=comment,
target=target,
webseeds=webseeds,
private=private,
created_by=created_by,
trackers=trackers)
if add_to_session:
client.core.add_torrent_file(
os.path.split(target)[-1],
base64.encodestring(open(target, "rb").read()),
{"download_location": os.path.split(path)[0]})
def _on_create_torrent_progress(self, value, num_pieces):
percent = float(value)/float(num_pieces)
def update_pbar_with_gobject(percent):
pbar = self.builder.get_object("progressbar")
pbar.set_text("%.2f%%" % (percent*100))
pbar.set_fraction(percent)
return False
if percent >= 0 and percent <= 1.0:
# Make sure there are no threads race conditions that can
# crash the UI while updating it.
gobject.idle_add(update_pbar_with_gobject, percent)
def _on_button_up_clicked(self, widget):
log.debug("_on_button_up_clicked")
row = self.builder.get_object("tracker_treeview").get_selection().get_selected()[1]
if row is None:
return
if self.trackers_liststore[row][0] == 0:
return
else:
self.trackers_liststore[row][0] -= 1
def _on_button_down_clicked(self, widget):
log.debug("_on_button_down_clicked")
row = self.builder.get_object("tracker_treeview").get_selection().get_selected()[1]
if row is None:
return
self.trackers_liststore[row][0] += 1
def _on_button_add_clicked(self, widget):
log.debug("_on_button_add_clicked")
builder = gtk.Builder()
builder.add_from_file(deluge.common.resource_filename(
"deluge.ui.gtkui", os.path.join("glade", "edit_trackers.ui")
))
dialog = builder.get_object("add_tracker_dialog")
dialog.set_transient_for(self.dialog)
textview = builder.get_object("textview_trackers")
if self.config["createtorrent.trackers"]:
textview.get_buffer().set_text("\n".join(self.config["createtorrent.trackers"]))
else:
textview.get_buffer().set_text("")
textview.grab_focus()
response = dialog.run()
if response == gtk.RESPONSE_OK:
# Create a list of trackers from the textview buffer
trackers = []
b = textview.get_buffer()
lines = b.get_text(b.get_start_iter(), b.get_end_iter()).strip().split("\n")
self.config["createtorrent.trackers"] = lines
log.debug("lines: %s", lines)
for l in lines:
if deluge.common.is_url(l):
trackers.append(l)
# We are going to add these trackers to the highest tier + 1
tier = 0
for row in self.trackers_liststore:
if row[0] > tier:
tier = row[0]
for tracker in trackers:
self.trackers_liststore.append([tier, tracker])
dialog.destroy()
def _on_button_remove_clicked(self, widget):
log.debug("_on_button_remove_clicked")
row = self.builder.get_object("tracker_treeview").get_selection().get_selected()[1]
if row is None:
return
self.trackers_liststore.remove(row)
| gpl-3.0 | -3,731,345,788,369,979,000 | 37.455508 | 111 | 0.590381 | false |
partofthething/laserComm | laserComm/receiver.py | 1 | 2975 | '''
receiver runs the ADC and photoresistor to receive an input signal.
USes MCP3008 ADC via the hardware SPI interface.
Connections are:
MCP3008 VDD -> 3.3V (red)
MCP3008 VREF -> 3.3V (red)
MCP3008 AGND -> GND (orange)
MCP3008 CLK -> SCLK (yellow)
MCP3008 DOUT -> MISO (green)
MCP3008 DIN -> MOSI (yellow)
MCP3008 CS -> CE0 (red)
MCP3008 DGND -> GND (orange)
The photoresistor goes from 4 kOhms (dark) to like 90 Ohms. (flashlight).
Output is 1024*Vin/Vref.
Build a voltage divider with like a 200 Ohm resistor in series w/ the photoR and measure
Vout between them. I put photoresistor between vout and ground.
The signal is intended to be processed using signal_processor
'''
import time
import numpy
import matplotlib
matplotlib.use('Agg') # works headless (e.g. on Raspberry Pi)
import matplotlib.pyplot as plt
try:
import spidev
except ImportError:
print('no spidev')
GAP = 0.001
class ADC(object):
"""
The Analog-to-digital converter
"""
def __init__(self):
self.adc = None
def __enter__(self):
self.adc = spidev.SpiDev()
self.adc.open(0, 0)
def __exit__(self, exc_type, exc_value, traceback):
self.adc.close()
def read(self, input_number):
"""
read SPI data from MCP3008 chip
There are 8 possible channels (0 through 7)
Will return value between 0 and 1023
"""
if ((input_number > 7) or (input_number < 0)):
return -1
r = self.adc.xfer2([1, (8 + input_number) << 4, 0])
adcValue = ((r[1] & 3) << 8) + r[2]
return adcValue
class Receiver(object):
"""
Stream processor that uses adc
"""
@property
def times(self):
return numpy.linspace(0, 10, len(self.vals))
def receive(self, adc):
self.vals = []
# receive for 10 seconds
print('Receiving')
start = time.time()
while time.time() - start < 30.0:
self.vals.append(adc.read(0))
time.sleep(GAP / 10)
def plot(self, fname='adc.pdf'):
print('Plotting')
t = self.times
plt.figure(figsize=(12, 10))
plt.plot(t, self.vals, '-')
plt.xlabel('Time (s)')
plt.ylabel('ADC signal')
plt.title('ADC Signal Trace')
plt.grid(color='0.7')
if fname:
plt.savefig(fname)
def save(self, fname='adc.txt'):
"""
Save results to file
"""
print('Saving')
with open(fname, 'w') as f:
f.writelines(['{0:04d}\n'.format(vi) for vi in self.vals])
def load(self, fname='adc.txt'):
print('Loading')
with open(fname) as f:
vals = f.readlines()
self.vals = [float(vi) for vi in vals]
if __name__ == '__main__':
adc = ADC()
receiver = Receiver()
with adc:
vals = receiver.receive(adc)
receiver.plot()
receiver.save()
| mit | -8,309,013,175,638,978,000 | 23.791667 | 88 | 0.572437 | false |
gajim/python-nbxmpp | test/unit/test_xml_vulnerability.py | 1 | 2389 | import unittest
from unittest.mock import Mock
from nbxmpp import dispatcher
class XMLVulnerability(unittest.TestCase):
def setUp(self):
self.stream = Mock()
self.stream.is_websocket = False
self.dispatcher = dispatcher.StanzaDispatcher(self.stream)
self._error_handler = Mock()
self.dispatcher.subscribe('parsing-error', self._error_handler)
self.dispatcher.reset_parser()
def test_exponential_entity_expansion(self):
bomb = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE bomb [
<!ENTITY a "test">
<!ENTITY b "&a;&a;&a;&a;&a;&a;&a;&a;&a;&a;&a;&a;&a;&a;&a;&a;">
<!ENTITY c "&b;&b;&b;&b;&b;&b;&b;&b;&b;&b;&b;&b;&b;&b;&b;&b;">
]>
<bomb>&c;</bomb>"""
self.dispatcher.process_data(bomb)
self._error_handler.assert_called()
def test_quadratic_blowup(self):
bomb = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE bomb [
<!ENTITY a "xxxxxxx... a couple of ten thousand chars">
]>
<bomb>&a;&a;&a;... repeat</bomb>"""
self.dispatcher.process_data(bomb)
self._error_handler.assert_called()
def test_external_entity_expansion(self):
bomb = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE external [
<!ENTITY ee SYSTEM "http://www.python.org/some.xml">
]>
<root>ⅇ</root>"""
self.dispatcher.process_data(bomb)
self._error_handler.assert_called()
def test_external_local_entity_expansion(self):
bomb = """<?xml version="1.0" encoding="utf-8"?>
<stream:stream xmlns:stream='http://etherx.jabber.org/streams' xmlns='jabber:client'>
<!DOCTYPE external [
<!ENTITY ee SYSTEM "file:///PATH/TO/simple.xml">
]>
<root>ⅇ</root>"""
self.dispatcher.process_data(bomb)
self._error_handler.assert_called()
def test_dtd_retrival(self):
bomb = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head/>
<body>text</body>
</html>"""
self.dispatcher.process_data(bomb)
self._error_handler.assert_called()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -27,018,462,317,749,748 | 31.283784 | 93 | 0.564253 | false |
mikeh77/mi-instrument | mi/core/instrument/instrument_driver.py | 1 | 46251 | #!/usr/bin/env python
"""
@package ion.services.mi.instrument_driver Instrument driver structures
@file ion/services/mi/instrument_driver.py
@author Edward Hunter
@brief Instrument driver classes that provide structure towards interaction
with individual instruments in the system.
"""
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
import time
import json
from threading import Thread
from mi.core.common import BaseEnum
from mi.core.exceptions import TestModeException
from mi.core.exceptions import NotImplementedException
from mi.core.exceptions import InstrumentException
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import InstrumentConnectionException
from mi.core.instrument.instrument_fsm import InstrumentFSM, ThreadSafeFSM
from mi.core.instrument.port_agent_client import PortAgentClient
from mi.core.log import get_logger,LoggerManager
log = get_logger()
class ConfigMetadataKey(BaseEnum):
"""
Keys used in the metadata structure that describes the driver, commands,
and parameters used in the driver and protocol.
"""
DRIVER = 'driver'
COMMANDS = 'commands'
PARAMETERS = 'parameters'
class DriverConfigKey(BaseEnum):
"""
Dictionary keys for driver config objects
"""
PARAMETERS = 'parameters'
SCHEDULER = 'scheduler'
# This is a copy since we can't import from pyon.
class ResourceAgentState(BaseEnum):
"""
Resource agent common states.
"""
POWERED_DOWN = 'RESOURCE_AGENT_STATE_POWERED_DOWN'
UNINITIALIZED = 'RESOURCE_AGENT_STATE_UNINITIALIZED'
INACTIVE = 'RESOURCE_AGENT_STATE_INACTIVE'
IDLE = 'RESOURCE_AGENT_STATE_IDLE'
STOPPED = 'RESOURCE_AGENT_STATE_STOPPED'
COMMAND = 'RESOURCE_AGENT_STATE_COMMAND'
STREAMING = 'RESOURCE_AGENT_STATE_STREAMING'
TEST = 'RESOURCE_AGENT_STATE_TEST'
CALIBRATE = 'RESOURCE_AGENT_STATE_CALIBRATE'
DIRECT_ACCESS = 'RESOUCE_AGENT_STATE_DIRECT_ACCESS'
BUSY = 'RESOURCE_AGENT_STATE_BUSY'
LOST_CONNECTION = 'RESOURCE_AGENT_STATE_LOST_CONNECTION'
ACTIVE_UNKNOWN = 'RESOURCE_AGENT_STATE_ACTIVE_UNKNOWN'
class ResourceAgentEvent(BaseEnum):
"""
Resource agent common events.
"""
ENTER = 'RESOURCE_AGENT_EVENT_ENTER'
EXIT = 'RESOURCE_AGENT_EVENT_EXIT'
POWER_UP = 'RESOURCE_AGENT_EVENT_POWER_UP'
POWER_DOWN = 'RESOURCE_AGENT_EVENT_POWER_DOWN'
INITIALIZE = 'RESOURCE_AGENT_EVENT_INITIALIZE'
RESET = 'RESOURCE_AGENT_EVENT_RESET'
GO_ACTIVE = 'RESOURCE_AGENT_EVENT_GO_ACTIVE'
GO_INACTIVE = 'RESOURCE_AGENT_EVENT_GO_INACTIVE'
RUN = 'RESOURCE_AGENT_EVENT_RUN'
CLEAR = 'RESOURCE_AGENT_EVENT_CLEAR'
PAUSE = 'RESOURCE_AGENT_EVENT_PAUSE'
RESUME = 'RESOURCE_AGENT_EVENT_RESUME'
GO_COMMAND = 'RESOURCE_AGENT_EVENT_GO_COMMAND'
GO_DIRECT_ACCESS = 'RESOURCE_AGENT_EVENT_GO_DIRECT_ACCESS'
GET_RESOURCE = 'RESOURCE_AGENT_EVENT_GET_RESOURCE'
SET_RESOURCE = 'RESOURCE_AGENT_EVENT_SET_RESOURCE'
EXECUTE_RESOURCE = 'RESOURCE_AGENT_EVENT_EXECUTE_RESOURCE'
GET_RESOURCE_STATE = 'RESOURCE_AGENT_EVENT_GET_RESOURCE_STATE'
GET_RESOURCE_CAPABILITIES = 'RESOURCE_AGENT_EVENT_GET_RESOURCE_CAPABILITIES'
DONE = 'RESOURCE_AGENT_EVENT_DONE'
PING_RESOURCE = 'RESOURCE_AGENT_PING_RESOURCE'
LOST_CONNECTION = 'RESOURCE_AGENT_EVENT_LOST_CONNECTION'
AUTORECONNECT = 'RESOURCE_AGENT_EVENT_AUTORECONNECT'
GET_RESOURCE_SCHEMA = 'RESOURCE_AGENT_EVENT_GET_RESOURCE_SCHEMA'
CHANGE_STATE_ASYNC = 'RESOURCE_AGENT_EVENT_CHANGE_STATE_ASYNC'
class DriverState(BaseEnum):
"""Common driver state enum"""
UNCONFIGURED = 'DRIVER_STATE_UNCONFIGURED'
DISCONNECTED = 'DRIVER_STATE_DISCONNECTED'
CONNECTING = 'DRIVER_STATE_CONNECTING'
DISCONNECTING = 'DRIVER_STATE_DISCONNECTING'
CONNECTED = 'DRIVER_STATE_CONNECTED'
ACQUIRE_SAMPLE = 'DRIVER_STATE_ACQUIRE_SAMPLE'
UPDATE_PARAMS = 'DRIVER_STATE_UPDATE_PARAMS'
SET = 'DRIVER_STATE_SET'
SLEEP = 'DRIVER_STATE_SLEEP'
class DriverProtocolState(BaseEnum):
"""
Base states for driver protocols. Subclassed for specific driver
protocols.
"""
AUTOSAMPLE = 'DRIVER_STATE_AUTOSAMPLE'
TEST = 'DRIVER_STATE_TEST'
CALIBRATE = 'DRIVER_STATE_CALIBRATE'
COMMAND = 'DRIVER_STATE_COMMAND'
DIRECT_ACCESS = 'DRIVER_STATE_DIRECT_ACCESS'
UNKNOWN = 'DRIVER_STATE_UNKNOWN'
POLL = 'DRIVER_STATE_POLL'
class DriverConnectionState(BaseEnum):
"""
Base states for driver connections.
"""
UNCONFIGURED = 'DRIVER_STATE_UNCONFIGURED'
DISCONNECTED = 'DRIVER_STATE_DISCONNECTED'
CONNECTED = 'DRIVER_STATE_CONNECTED'
class DriverEvent(BaseEnum):
"""
Base events for driver state machines. Commands and other events
are transformed into state machine events for handling.
"""
ENTER = 'DRIVER_EVENT_ENTER'
EXIT = 'DRIVER_EVENT_EXIT'
INITIALIZE = 'DRIVER_EVENT_INITIALIZE'
CONFIGURE = 'DRIVER_EVENT_CONFIGURE'
CONNECT = 'DRIVER_EVENT_CONNECT'
CONNECTION_LOST = 'DRIVER_CONNECTION_LOST'
DISCONNECT = 'DRIVER_EVENT_DISCONNECT'
SET = 'DRIVER_EVENT_SET'
GET = 'DRIVER_EVENT_GET'
DISCOVER = 'DRIVER_EVENT_DISCOVER'
EXECUTE = 'DRIVER_EVENT_EXECUTE'
ACQUIRE_SAMPLE = 'DRIVER_EVENT_ACQUIRE_SAMPLE'
START_AUTOSAMPLE = 'DRIVER_EVENT_START_AUTOSAMPLE'
STOP_AUTOSAMPLE = 'DRIVER_EVENT_STOP_AUTOSAMPLE'
TEST = 'DRIVER_EVENT_TEST'
RUN_TEST = 'DRIVER_EVENT_RUN_TEST'
STOP_TEST = 'DRIVER_EVENT_STOP_TEST'
CALIBRATE = 'DRIVER_EVENT_CALIBRATE'
RESET = 'DRIVER_EVENT_RESET'
ENTER = 'DRIVER_EVENT_ENTER'
EXIT = 'DRIVER_EVENT_EXIT'
UPDATE_PARAMS = 'DRIVER_EVENT_UPDATE_PARAMS'
BREAK = 'DRIVER_EVENT_BREAK'
EXECUTE_DIRECT = 'EXECUTE_DIRECT'
START_DIRECT = 'DRIVER_EVENT_START_DIRECT'
STOP_DIRECT = 'DRIVER_EVENT_STOP_DIRECT'
PING_DRIVER = 'DRIVER_EVENT_PING_DRIVER'
FORCE_STATE = 'DRIVER_FORCE_STATE'
CLOCK_SYNC = 'DRIVER_EVENT_CLOCK_SYNC'
SCHEDULED_CLOCK_SYNC = 'DRIVER_EVENT_SCHEDULED_CLOCK_SYNC'
ACQUIRE_STATUS = 'DRIVER_EVENT_ACQUIRE_STATUS'
INIT_PARAMS = 'DRIVER_EVENT_INIT_PARAMS'
GAP_RECOVERY = 'DRIVER_EVENT_GAP_RECOVERY'
GAP_RECOVERY_COMPLETE = 'DRIVER_EVENT_GAP_RECOVERY_COMPLETE'
class DriverAsyncEvent(BaseEnum):
"""
Asynchronous driver event types.
"""
STATE_CHANGE = 'DRIVER_ASYNC_EVENT_STATE_CHANGE'
CONFIG_CHANGE = 'DRIVER_ASYNC_EVENT_CONFIG_CHANGE'
SAMPLE = 'DRIVER_ASYNC_EVENT_SAMPLE'
ERROR = 'DRIVER_ASYNC_EVENT_ERROR'
RESULT = 'DRIVER_ASYNC_RESULT'
DIRECT_ACCESS = 'DRIVER_ASYNC_EVENT_DIRECT_ACCESS'
AGENT_EVENT = 'DRIVER_ASYNC_EVENT_AGENT_EVENT'
class DriverParameter(BaseEnum):
"""
Base driver parameters. Subclassed by specific drivers with device
specific parameters.
"""
ALL = 'DRIVER_PARAMETER_ALL'
class InstrumentDriver(object):
"""
Base class for instrument drivers.
"""
def __init__(self, event_callback):
"""
Constructor.
@param event_callback The driver process callback used to send
asynchrous driver events to the agent.
"""
LoggerManager()
self._send_event = event_callback
self._test_mode = False
#############################################################
# Device connection interface.
#############################################################
def set_test_mode(self, mode):
"""
Enable test mode for the driver. If this mode is envoked
then the user has access to test_ commands.
@param mode: test mode state
"""
self._test_mode = True if mode else False
def initialize(self, *args, **kwargs):
"""
Initialize driver connection, bringing communications parameters
into unconfigured state (no connection object).
@raises InstrumentStateException if command not allowed in current state
@raises NotImplementedException if not implemented by subclass.
"""
raise NotImplementedException('initialize() not implemented.')
def configure(self, *args, **kwargs):
"""
Configure the driver for communications with the device via
port agent / logger (valid but unconnected connection object).
@param arg[0] comms config dict.
@raises InstrumentStateException if command not allowed in current state
@throws InstrumentParameterException if missing comms or invalid config dict.
@raises NotImplementedException if not implemented by subclass.
"""
raise NotImplementedException('configure() not implemented.')
def connect(self, *args, **kwargs):
"""
Establish communications with the device via port agent / logger
(connected connection object).
@raises InstrumentStateException if command not allowed in current state
@throws InstrumentConnectionException if the connection failed.
@raises NotImplementedException if not implemented by subclass.
"""
raise NotImplementedException('connect() not implemented.')
def disconnect(self, *args, **kwargs):
"""
Disconnect from device via port agent / logger.
@raises InstrumentStateException if command not allowed in current state
@raises NotImplementedException if not implemented by subclass.
"""
raise NotImplementedException('disconnect() not implemented.')
#############################################################
# Command and control interface.
#############################################################
def discover_state(self, *args, **kwargs):
"""
Determine initial state upon establishing communications.
@param timeout=timeout Optional command timeout.
@retval Current device state.
@raises InstrumentTimeoutException if could not wake device.
@raises InstrumentStateException if command not allowed in current state or if
device state not recognized.
@raises NotImplementedException if not implemented by subclass.
"""
raise NotImplementedException('discover_state() is not implemented.')
def get_resource_capabilities(self, *args, **kwargs):
"""
Return driver commands and parameters.
@param current_state True to retrieve commands available in current
state, otherwise reutrn all commands.
@retval list of AgentCapability objects representing the drivers
capabilities.
@raises NotImplementedException if not implemented by subclass.
"""
raise NotImplementedException('get_resource_capabilities() is not implemented.')
def get_resource_state(self, *args, **kwargs):
"""
Return the current state of the driver.
@retval str current driver state.
@raises NotImplementedException if not implemented by subclass.
"""
raise NotImplementedException('get_resource_state() is not implemented.')
def get_resource(self, *args, **kwargs):
"""
Retrieve device parameters.
@param args[0] DriverParameter.ALL or a list of parameters to retrive.
@retval parameter : value dict.
@raises InstrumentParameterException if missing or invalid get parameters.
@raises InstrumentStateException if command not allowed in current state
@raises NotImplementedException if not implemented by subclass.
"""
raise NotImplementedException('get_resource() is not implemented.')
def set_resource(self, *args, **kwargs):
"""
Set device parameters.
@param args[0] parameter : value dict of parameters to set.
@param timeout=timeout Optional command timeout.
@raises InstrumentParameterException if missing or invalid set parameters.
@raises InstrumentTimeoutException if could not wake device or no response.
@raises InstrumentProtocolException if set command not recognized.
@raises InstrumentStateException if command not allowed in current state.
@raises NotImplementedException if not implemented by subclass.
"""
raise NotImplementedException('set_resource() not implemented.')
def execute_resource(self, *args, **kwargs):
"""
Execute a driver command.
@param timeout=timeout Optional command timeout.
@ retval Command specific.
@raises InstrumentTimeoutException if could not wake device or no response.
@raises InstrumentProtocolException if command not recognized.
@raises InstrumentStateException if command not allowed in current state.
@raises NotImplementedException if not implemented by subclass.
"""
raise NotImplementedException('execute_resource() not implemented.')
def start_direct(self, *args, **kwargs):
"""
Start direct access mode
@param timeout=timeout Optional command timeout.
@ retval Command specific.
@raises NotImplementedException if not implemented by subclass.
"""
raise NotImplementedException('execute_resource() not implemented.')
def stop_direct(self, *args, **kwargs):
"""
Stop direct access mode
@param timeout=timeout Optional command timeout.
@ retval Command specific.
@raises NotImplementedException if not implemented by subclass.
"""
raise NotImplementedException('execute_resource() not implemented.')
########################################################################
# Event interface.
########################################################################
def _driver_event(self, type, val=None):
"""
Construct and send an asynchronous driver event.
@param type a DriverAsyncEvent type specifier.
@param val event value for sample and test result events.
"""
event = {
'type' : type,
'value' : None,
'time' : time.time()
}
if type == DriverAsyncEvent.STATE_CHANGE:
state = self.get_resource_state()
event['value'] = state
self._send_event(event)
elif type == DriverAsyncEvent.CONFIG_CHANGE:
config = self.get_resource(DriverParameter.ALL)
event['value'] = config
self._send_event(event)
elif type == DriverAsyncEvent.SAMPLE:
event['value'] = val
self._send_event(event)
elif type == DriverAsyncEvent.ERROR:
event['value'] = val
self._send_event(event)
elif type == DriverAsyncEvent.RESULT:
event['value'] = val
self._send_event(event)
elif type == DriverAsyncEvent.DIRECT_ACCESS:
event['value'] = val
self._send_event(event)
elif type == DriverAsyncEvent.AGENT_EVENT:
event['value'] = val
self._send_event(event)
########################################################################
# Test interface.
########################################################################
def driver_ping(self, msg):
"""
Echo a message.
@param msg the message to prepend and echo back to the caller.
"""
reply = 'driver_ping: '+msg
return reply
def test_exceptions(self, msg):
"""
Test exception handling in the driver process.
@param msg message string to put in a raised exception to be caught in
a test.
@raises InstrumentExeption always.
"""
raise InstrumentException(msg)
class SingleConnectionInstrumentDriver(InstrumentDriver):
"""
Base class for instrument drivers with a single device connection.
Provides connenction state logic for single connection drivers. This is
the base class for the majority of driver implementation classes.
"""
def __init__(self, event_callback):
"""
Constructor for singly connected instrument drivers.
@param event_callback Callback to the driver process to send asynchronous
driver events back to the agent.
"""
InstrumentDriver.__init__(self, event_callback)
# The only and only instrument connection.
# Exists in the connected state.
self._connection = None
# The one and only instrument protocol.
self._protocol = None
# Build connection state machine.
self._connection_fsm = ThreadSafeFSM(DriverConnectionState,
DriverEvent,
DriverEvent.ENTER,
DriverEvent.EXIT)
# Add handlers for all events.
self._connection_fsm.add_handler(DriverConnectionState.UNCONFIGURED, DriverEvent.ENTER, self._handler_unconfigured_enter)
self._connection_fsm.add_handler(DriverConnectionState.UNCONFIGURED, DriverEvent.EXIT, self._handler_unconfigured_exit)
self._connection_fsm.add_handler(DriverConnectionState.UNCONFIGURED, DriverEvent.INITIALIZE, self._handler_unconfigured_initialize)
self._connection_fsm.add_handler(DriverConnectionState.UNCONFIGURED, DriverEvent.CONFIGURE, self._handler_unconfigured_configure)
self._connection_fsm.add_handler(DriverConnectionState.DISCONNECTED, DriverEvent.ENTER, self._handler_disconnected_enter)
self._connection_fsm.add_handler(DriverConnectionState.DISCONNECTED, DriverEvent.EXIT, self._handler_disconnected_exit)
self._connection_fsm.add_handler(DriverConnectionState.DISCONNECTED, DriverEvent.INITIALIZE, self._handler_disconnected_initialize)
self._connection_fsm.add_handler(DriverConnectionState.DISCONNECTED, DriverEvent.CONFIGURE, self._handler_disconnected_configure)
self._connection_fsm.add_handler(DriverConnectionState.DISCONNECTED, DriverEvent.CONNECT, self._handler_disconnected_connect)
self._connection_fsm.add_handler(DriverConnectionState.CONNECTED, DriverEvent.ENTER, self._handler_connected_enter)
self._connection_fsm.add_handler(DriverConnectionState.CONNECTED, DriverEvent.EXIT, self._handler_connected_exit)
self._connection_fsm.add_handler(DriverConnectionState.CONNECTED, DriverEvent.DISCONNECT, self._handler_connected_disconnect)
self._connection_fsm.add_handler(DriverConnectionState.CONNECTED, DriverEvent.CONNECTION_LOST, self._handler_connected_connection_lost)
self._connection_fsm.add_handler(DriverConnectionState.CONNECTED, DriverEvent.DISCOVER, self._handler_connected_protocol_event)
self._connection_fsm.add_handler(DriverConnectionState.CONNECTED, DriverEvent.GET, self._handler_connected_protocol_event)
self._connection_fsm.add_handler(DriverConnectionState.CONNECTED, DriverEvent.SET, self._handler_connected_protocol_event)
self._connection_fsm.add_handler(DriverConnectionState.CONNECTED, DriverEvent.EXECUTE, self._handler_connected_protocol_event)
self._connection_fsm.add_handler(DriverConnectionState.CONNECTED, DriverEvent.FORCE_STATE, self._handler_connected_protocol_event)
self._connection_fsm.add_handler(DriverConnectionState.CONNECTED, DriverEvent.START_DIRECT, self._handler_connected_start_direct_event)
self._connection_fsm.add_handler(DriverConnectionState.CONNECTED, DriverEvent.STOP_DIRECT, self._handler_connected_stop_direct_event)
# Start state machine.
self._connection_fsm.start(DriverConnectionState.UNCONFIGURED)
self._pre_da_config = {}
self._startup_config = {}
# Idempotency flag for lost connections.
# This set to false when a connection is established to
# allow for lost callback to become activated.
self._connection_lost = True
#############################################################
# Device connection interface.
#############################################################
def initialize(self, *args, **kwargs):
"""
Initialize driver connection, bringing communications parameters
into unconfigured state (no connection object).
@raises InstrumentStateException if command not allowed in current state
"""
# Forward event and argument to the connection FSM.
return self._connection_fsm.on_event(DriverEvent.INITIALIZE, *args, **kwargs)
def configure(self, *args, **kwargs):
"""
Configure the driver for communications with the device via
port agent / logger (valid but unconnected connection object).
@param arg[0] comms config dict.
@raises InstrumentStateException if command not allowed in current state
@throws InstrumentParameterException if missing comms or invalid config dict.
"""
# Forward event and argument to the connection FSM.
return self._connection_fsm.on_event(DriverEvent.CONFIGURE, *args, **kwargs)
def connect(self, *args, **kwargs):
"""
Establish communications with the device via port agent / logger
(connected connection object).
@raises InstrumentStateException if command not allowed in current state
@throws InstrumentConnectionException if the connection failed.
"""
# Forward event and argument to the connection FSM.
result = self._connection_fsm.on_event(DriverEvent.CONNECT, *args, **kwargs)
init_config = {}
if len(args) > 0 and isinstance(args[0], dict):
init_config = args[0]
self.set_init_params(init_config)
return result
def disconnect(self, *args, **kwargs):
"""
Disconnect from device via port agent / logger.
@raises InstrumentStateException if command not allowed in current state
"""
# Forward event and argument to the connection FSM.
return self._connection_fsm.on_event(DriverEvent.DISCONNECT, *args, **kwargs)
#############################################################
# Configuration logic
#############################################################
def get_init_params(self):
"""
get the driver initialization parameters
@return driver configuration dictionary
"""
return self._startup_config
def set_init_params(self, config):
"""
Set the initialization parameters down in the protocol and store the
driver configuration in the driver.
If the protocol hasn't been setup yet cache the config. Next time
this method is called, if you call it with an empty config it will
read from the cache.
@param config This default configuration assumes a structure driver
configuration dict with keys named in DriverConfigKey.
Stranger parameters can be adjusted by over riding this method.
@raise InstrumentParameterException If the config cannot be applied
"""
if not isinstance(config, dict):
raise InstrumentParameterException("Incompatible initialization parameters")
if self._protocol:
param_config = None
if config:
param_config = config
elif self._startup_config:
param_config = self._startup_config
if param_config:
self._protocol.set_init_params(param_config)
self._protocol.initialize_scheduler()
if config:
self._startup_config = config
def apply_startup_params(self):
"""
Apply the startup values previously stored in the protocol to
the running config of the live instrument. The startup values are the
values that are (1) marked as startup parameters and are (2) the "best"
value to use at startup. Preference is given to the previously-set init
value, then the default value, then the currently used value.
This default implementation simply pushes the logic down into the protocol
for processing should the action be better accomplished down there.
The driver writer can decide to overload this method in the derived
driver class and apply startup parameters in the driver (likely calling
some get and set methods for the resource). If the driver does not
implement an apply_startup_params() method in the driver, this method
will call into the protocol. Deriving protocol classes are expected to
implement an apply_startup_params() method lest they get the exception
from the base InstrumentProtocol implementation.
"""
log.debug("Base driver applying startup params...")
self._protocol.apply_startup_params()
def get_cached_config(self):
"""
Return the configuration object that shows the instrument's
configuration as cached in the protocol parameter dictionary.
@retval The running configuration in the instruments config format. By
default, it is a dictionary of parameter names and values.
"""
if self._protocol:
return self._protocol.get_cached_config()
def get_config_metadata(self):
"""
Return the configuration metadata object in JSON format
@retval The description of the parameters, commands, and driver info
in a JSON string
@see https://confluence.oceanobservatories.org/display/syseng/CIAD+MI+SV+Instrument+Driver-Agent+parameter+and+command+metadata+exchange
"""
log.debug("Getting metadata from driver...")
protocol = self._protocol
# Because the config requires information from the protocol param dict
# we temporarily instantiate a protocol object to get at the static
# information.
if not protocol:
self._build_protocol()
log.debug("Getting metadata from protocol...")
return self._protocol.get_config_metadata_dict()
def restore_direct_access_params(self, config):
"""
Restore the correct values out of the full config that is given when
returning from direct access. By default, this takes a simple dict of
param name and value. Override this class as needed as it makes some
simple assumptions about how your instrument sets things.
@param config The configuration that was previously saved (presumably
to disk somewhere by the driver that is working with this protocol)
"""
vals = {}
# for each parameter that is read only, restore
da_params = self._protocol.get_direct_access_params()
for param in da_params:
vals[param] = config[param]
log.debug("Restore DA Parameters: %s" % vals)
self.set_resource(vals, True)
#############################################################
# Commande and control interface.
#############################################################
def discover_state(self, *args, **kwargs):
"""
Determine initial state upon establishing communications.
@param timeout=timeout Optional command timeout.
@retval Current device state.
@raises InstrumentTimeoutException if could not wake device.
@raises InstrumentStateException if command not allowed in current state or if
device state not recognized.
@raises NotImplementedException if not implemented by subclass.
"""
# Forward event and argument to the protocol FSM.
return self._connection_fsm.on_event(DriverEvent.DISCOVER, DriverEvent.DISCOVER, *args, **kwargs)
def get_resource_capabilities(self, current_state=True, *args, **kwargs):
"""
Return driver commands and parameters.
@param current_state True to retrieve commands available in current
state, otherwise reutrn all commands.
@retval list of AgentCapability objects representing the drivers
capabilities.
@raises NotImplementedException if not implemented by subclass.
"""
if self._protocol:
return self._protocol.get_resource_capabilities(current_state)
else:
return [['foobb'], ['fooaa']]
def get_resource_state(self, *args, **kwargs):
"""
Return the current state of the driver.
@retval str current driver state.
@raises NotImplementedException if not implemented by subclass.
"""
connection_state = self._connection_fsm.get_current_state()
if connection_state == DriverConnectionState.CONNECTED:
return self._protocol.get_current_state()
else:
return connection_state
def get_resource(self, *args, **kwargs):
"""
Retrieve device parameters.
@param args[0] DriverParameter.ALL or a list of parameters to retrive.
@retval parameter : value dict.
@raises InstrumentParameterException if missing or invalid get parameters.
@raises InstrumentStateException if command not allowed in current state
@raises NotImplementedException if not implemented by subclass.
"""
# Forward event and argument to the protocol FSM.
return self._connection_fsm.on_event(DriverEvent.GET, DriverEvent.GET, *args, **kwargs)
def set_resource(self, *args, **kwargs):
"""
Set device parameters.
@param args[0] parameter : value dict of parameters to set.
@param timeout=timeout Optional command timeout.
@raises InstrumentParameterException if missing or invalid set parameters.
@raises InstrumentTimeoutException if could not wake device or no response.
@raises InstrumentProtocolException if set command not recognized.
@raises InstrumentStateException if command not allowed in current state.
@raises NotImplementedException if not implemented by subclass.
"""
# Forward event and argument to the protocol FSM.
return self._connection_fsm.on_event(DriverEvent.SET, DriverEvent.SET, *args, **kwargs)
def execute_resource(self, resource_cmd, *args, **kwargs):
"""
Poll for a sample.
@param timeout=timeout Optional command timeout.
@ retval Device sample dict.
@raises InstrumentTimeoutException if could not wake device or no response.
@raises InstrumentProtocolException if acquire command not recognized.
@raises InstrumentStateException if command not allowed in current state.
@raises NotImplementedException if not implemented by subclass.
"""
# Forward event and argument to the protocol FSM.
return self._connection_fsm.on_event(DriverEvent.EXECUTE, resource_cmd, *args, **kwargs)
def start_direct(self, *args, **kwargs):
"""
start direct access mode
@param timeout=timeout Optional command timeout.
@ retval Device sample dict.
@raises InstrumentTimeoutException if could not wake device or no response.
@raises InstrumentProtocolException if acquire command not recognized.
@raises InstrumentStateException if command not allowed in current state.
@raises NotImplementedException if not implemented by subclass.
"""
# Need to pass the event as a parameter because the event handler to capture the current
# pre-da config requires it.
return self._connection_fsm.on_event(DriverEvent.START_DIRECT, DriverEvent.START_DIRECT)
def execute_direct(self, *args, **kwargs):
"""
execute direct accesscommand
@param timeout=timeout Optional command timeout.
@ retval Device sample dict.
@raises InstrumentTimeoutException if could not wake device or no response.
@raises InstrumentProtocolException if acquire command not recognized.
@raises InstrumentStateException if command not allowed in current state.
@raises NotImplementedException if not implemented by subclass.
"""
return self.execute_resource(DriverEvent.EXECUTE_DIRECT, *args, **kwargs)
def stop_direct(self, *args, **kwargs):
"""
stop direct access mode
@param timeout=timeout Optional command timeout.
@ retval Device sample dict.
@raises InstrumentTimeoutException if could not wake device or no response.
@raises InstrumentProtocolException if acquire command not recognized.
@raises InstrumentStateException if command not allowed in current state.
@raises NotImplementedException if not implemented by subclass.
"""
return self._connection_fsm.on_event(DriverEvent.STOP_DIRECT, DriverEvent.STOP_DIRECT)
def test_force_state(self, *args, **kwargs):
"""
Force driver into a given state for the purposes of unit testing
@param state=desired_state Required desired state to change to.
@raises InstrumentParameterException if no state parameter.
@raises TestModeException if not in test mode
"""
if(not self._test_mode):
raise TestModeException();
# Get the required param
state = kwargs.get('state', None) # via kwargs
if state is None:
raise InstrumentParameterException('Missing state parameter.')
# We are mucking with internal FSM parameters which may be bad.
# The alternative was to raise an event to change the state. Dont
# know which is better.
self._protocol._protocol_fsm.current_state = state
########################################################################
# Unconfigured handlers.
########################################################################
def _handler_unconfigured_enter(self, *args, **kwargs):
"""
Enter unconfigured state.
"""
# Send state change event to agent.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unconfigured_exit(self, *args, **kwargs):
"""
Exit unconfigured state.
"""
pass
def _handler_unconfigured_initialize(self, *args, **kwargs):
"""
Initialize handler. We are already in unconfigured state, do nothing.
@retval (next_state, result) tuple, (None, None).
"""
next_state = None
result = None
return (next_state, result)
def _handler_unconfigured_configure(self, *args, **kwargs):
"""
Configure driver for device comms.
@param args[0] Communiations config dictionary.
@retval (next_state, result) tuple, (DriverConnectionState.DISCONNECTED,
None) if successful, (None, None) otherwise.
@raises InstrumentParameterException if missing or invalid param dict.
"""
next_state = None
result = None
# Get the required param dict.
config = kwargs.get('config', None) # via kwargs
# TODO use kwargs as the only mechanism
if config is None:
try:
config = args[0] # via first argument
except IndexError:
pass
if config is None:
raise InstrumentParameterException('Missing comms config parameter.')
# Verify dict and construct connection client.
self._connection = self._build_connection(config)
next_state = DriverConnectionState.DISCONNECTED
return (next_state, result)
########################################################################
# Disconnected handlers.
########################################################################
def _handler_disconnected_enter(self, *args, **kwargs):
"""
Enter disconnected state.
"""
# Send state change event to agent.
self._connection_lost = True
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_disconnected_exit(self, *args, **kwargs):
"""
Exit disconnected state.
"""
pass
def _handler_disconnected_initialize(self, *args, **kwargs):
"""
Initialize device communications. Causes the connection parameters to
be reset.
@retval (next_state, result) tuple, (DriverConnectionState.UNCONFIGURED,
None).
"""
next_state = None
result = None
self._connection = None
next_state = DriverConnectionState.UNCONFIGURED
return (next_state, result)
def _handler_disconnected_configure(self, *args, **kwargs):
"""
Configure driver for device comms.
@param args[0] Communiations config dictionary.
@retval (next_state, result) tuple, (None, None).
@raises InstrumentParameterException if missing or invalid param dict.
"""
next_state = None
result = None
# Get required config param dict.
config = kwargs.get('config', None) # via kwargs
# TODO use kwargs as the only mechanism
if config is None:
try:
config = args[0] # via first argument
except IndexError:
pass
if config is None:
raise InstrumentParameterException('Missing comms config parameter.')
# Verify configuration dict, and update connection if possible.
self._connection = self._build_connection(config)
return (next_state, result)
def _handler_disconnected_connect(self, *args, **kwargs):
"""
Establish communications with the device via port agent / logger and
construct and intialize a protocol FSM for device interaction.
@retval (next_state, result) tuple, (DriverConnectionState.CONNECTED,
None) if successful.
@raises InstrumentConnectionException if the attempt to connect failed.
"""
next_state = None
result = None
self._build_protocol()
try:
self._connection.init_comms(self._protocol.got_data,
self._protocol.got_raw,
self._got_exception,
self._lost_connection_callback)
self._protocol._connection = self._connection
next_state = DriverConnectionState.CONNECTED
except InstrumentConnectionException as e:
log.error("Connection Exception: %s", e)
log.error("Instrument Driver remaining in disconnected state.")
# Re-raise the exception
raise
return (next_state, result)
########################################################################
# Connected handlers.
########################################################################
def _handler_connected_enter(self, *args, **kwargs):
"""
Enter connected state.
"""
# Send state change event to agent.
self._connection_lost = False
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_connected_exit(self, *args, **kwargs):
"""
Exit connected state.
"""
pass
def _handler_connected_disconnect(self, *args, **kwargs):
"""
Disconnect to the device via port agent / logger and destroy the
protocol FSM.
@retval (next_state, result) tuple, (DriverConnectionState.DISCONNECTED,
None) if successful.
"""
next_state = None
result = None
log.info("_handler_connected_disconnect: invoking stop_comms().")
self._connection.stop_comms()
self._protocol = None
next_state = DriverConnectionState.DISCONNECTED
return (next_state, result)
def _handler_connected_connection_lost(self, *args, **kwargs):
"""
The device connection was lost. Stop comms, destroy protocol FSM and
revert to disconnected state.
@retval (next_state, result) tuple, (DriverConnectionState.DISCONNECTED,
None).
"""
next_state = None
result = None
log.info("_handler_connected_connection_lost: invoking stop_comms().")
self._connection.stop_comms()
self._protocol = None
# Send async agent state change event.
log.info("_handler_connected_connection_lost: sending LOST_CONNECTION " \
"event, moving to DISCONNECTED state.")
self._driver_event(DriverAsyncEvent.AGENT_EVENT,
ResourceAgentEvent.LOST_CONNECTION)
next_state = DriverConnectionState.DISCONNECTED
return (next_state, result)
def _handler_connected_protocol_event(self, event, *args, **kwargs):
"""
Forward a driver command event to the protocol FSM.
@param args positional arguments to pass on.
@param kwargs keyword arguments to pass on.
@retval (next_state, result) tuple, (None, protocol result).
"""
next_state = None
result = self._protocol._protocol_fsm.on_event(event, *args, **kwargs)
return (next_state, result)
def _handler_connected_start_direct_event(self, event, *args, **kwargs):
"""
Stash the current config first, then forward a driver command event
to the protocol FSM.
@param args positional arguments to pass on.
@param kwargs keyword arguments to pass on.
@retval (next_state, result) tuple, (None, protocol result).
"""
next_state = None
# Get the value for all direct access parameters and store them in the protocol
self._pre_da_config = self.get_resource(self._protocol.get_direct_access_params())
self._protocol.store_direct_access_config(self._pre_da_config)
self._protocol.enable_da_initialization()
log.debug("starting DA. Storing DA parameters for restore: %s", self._pre_da_config)
result = self._protocol._protocol_fsm.on_event(event, *args, **kwargs)
return (next_state, result)
def _handler_connected_stop_direct_event(self, event, *args, **kwargs):
"""
Restore previous config first, then forward a driver command event
to the protocol FSM.
@param args positional arguments to pass on.
@param kwargs keyword arguments to pass on.
@retval (next_state, result) tuple, (None, protocol result).
"""
next_state = None
result = self._protocol._protocol_fsm.on_event(event, *args, **kwargs)
# Moving the responsibility for applying DA parameters to the
# protocol.
#self.restore_direct_access_params(self._pre_da_config)
return (next_state, result)
########################################################################
# Helpers.
########################################################################
def _build_connection(self, config):
"""
Constructs and returns a Connection object according to the given
configuration. The connection object is a LoggerClient instance in
this base class. Subclasses can overwrite this operation as needed.
The value returned by this operation is assigned to self._connection
and also to self._protocol._connection upon entering in the
DriverConnectionState.CONNECTED state.
@param config configuration dict
@retval a Connection instance, which will be assigned to
self._connection
@throws InstrumentParameterException Invalid configuration.
"""
if 'mock_port_agent' in config:
mock_port_agent = config['mock_port_agent']
# check for validity here...
if (mock_port_agent is not None):
return mock_port_agent
try:
addr = config['addr']
port = config['port']
cmd_port = config.get('cmd_port')
if isinstance(addr, str) and isinstance(port, int) and len(addr)>0:
return PortAgentClient(addr, port, cmd_port)
else:
raise InstrumentParameterException('Invalid comms config dict.')
except (TypeError, KeyError):
raise InstrumentParameterException('Invalid comms config dict.')
def _got_exception(self, exception):
"""
Callback for the client for exception handling with async data. Exceptions
are wrapped in an event and sent up to the agent.
"""
try:
log.error("ASYNC Data Exception Detected: %s (%s)", exception.__class__.__name__, str(exception))
finally:
self._driver_event(DriverAsyncEvent.ERROR, exception)
def _lost_connection_callback(self, error_string):
"""
A callback invoked by the port agent client when it looses
connectivity to the port agent.
"""
if not self._connection_lost:
log.info("_lost_connection_callback: starting thread to send " \
"CONNECTION_LOST event to instrument driver.")
self._connection_lost = True
lost_comms_thread = Thread(
target=self._connection_fsm.on_event,
args=(DriverEvent.CONNECTION_LOST, ))
lost_comms_thread.start()
else:
log.info("_lost_connection_callback: connection_lost flag true.")
def _build_protocol(self):
"""
Construct device specific single connection protocol FSM.
Overridden in device specific subclasses.
"""
pass
| bsd-2-clause | 7,355,743,505,821,260,000 | 41.122951 | 144 | 0.632224 | false |
fvbock/gDBPool | gdbpool/connection_pool.py | 1 | 6127 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Florian von Bock (f at vonbock dot info)
#
# gDBPool - db connection pooling for gevent
__author__ = "Florian von Bock"
__email__ = "f at vonbock dot info"
__version__ = "0.1.3"
import gevent
from gevent import monkey; monkey.patch_all()
import psycopg2
import sys, traceback
from psyco_ge import make_psycopg_green; make_psycopg_green()
assert 'gdbpool.psyco_ge' in sys.modules.keys()
from gevent.queue import Queue, Empty as QueueEmptyException
from time import time
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_UNCOMMITTED, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ, ISOLATION_LEVEL_SERIALIZABLE
psycopg2.extensions.register_type( psycopg2.extensions.UNICODE )
psycopg2.extensions.register_type( psycopg2.extensions.UNICODEARRAY )
from psycopg2 import InterfaceError
from pool_connection import PoolConnection
from gdbpool_error import DBInteractionException, DBPoolConnectionException, PoolConnectionException, StreamEndException
class DBConnectionPool( object ):
"""
The Connection Pool
"Classic" pool of connections with connection lifecycle management
"""
def __init__( self, dsn, db_module = 'psycopg2', pool_size = 10,
conn_lifetime = 600, do_log = False ):
"""
:param string dsn: DSN for the default `class:DBConnectionPool`
:param string db_module: name of the DB-API module to use
:param int pool_size: Poolsize of the first/default `class:DBConnectionPool`
:param int conn_lifetime: Number of seconds after which a connection will be recycled when :meth:`.put` back
:param bool do_log: Log to the console or not
"""
if do_log:
import logging
logging.basicConfig( level = logging.INFO, format = "%(asctime)s %(message)s" )
self.logger = logging.getLogger()
self.do_log = do_log
self.dsn = dsn
self.db_module = db_module
self.pool_size = pool_size
self.CONN_RECYCLE_AFTER = conn_lifetime if conn_lifetime is not None else 0
self.pool = Queue( self.pool_size )
__import__( db_module )
self.connection_jobs = map( lambda x: gevent.spawn( self.create_connection ), xrange( self.pool_size ) )
try:
gevent.joinall( self.connection_jobs, timeout = 10 )
assert self.pool_size == self.pool.qsize()
if self.do_log:
self.logger.info( "$ poolsize: %i" % self.pool.qsize() )
self.ready = True
except AssertionError, e:
raise DBPoolConnectionException( "Could not get %s connections for the pool as requested. %s" % ( self.pool_size, e.message ) )
except Exception, e:
raise e
def __del__( self ):
while not self.pool.empty():
conn = self.pool.get()
conn.close()
def create_connection( self ):
"""
Try to open a new connection to the database and put it on the pool
"""
try:
self.pool.put( PoolConnection( self.db_module, self.dsn ) )
except PoolConnectionException, e:
raise e
def resize( self, new_size ):
"""
Resize the pool (nr. of connections on the pool)
:param int new_size: nr ob connections the pool should be resized to
"""
while self.qsize != new_size:
if self.qsize < new_size:
self.create_connection()
else:
conn = self.pool.get()
conn.close()
def get( self, timeout = None, iso_level = ISOLATION_LEVEL_READ_COMMITTED ):
"""
Get a connection from the pool
:param int timeout: seconds to wait for a connection or None
:param iso_level: transaction isolation level to be set on the connection. Must be one of psycopg2.extensions ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_UNCOMMITTED, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ, ISOLATION_LEVEL_SERIALIZABLE
:returns: -- a :class:`PoolConnection`
"""
try:
conn = self.pool.get( timeout = timeout )
if iso_level != ISOLATION_LEVEL_READ_COMMITTED:
conn.set_isolation_level( iso_level )
return conn
except gevent.queue.Empty, e:
raise PoolConnectionException( e )
def put( self, conn, timeout = 1, force_recycle = False ):
"""
Put a connection back onto the pool
:param conn: The :class:`PoolConnection` object to be put back onto the pool
:param int timeout: timeout in seconds to to put the connection onto the pool
:param bool force_recycle: Force connection recycling independent from the pool wide connection lifecycle
"""
if isinstance( conn, PoolConnection ):
if ( self.CONN_RECYCLE_AFTER != 0 and time() - conn.PoolConnection_initialized_at < self.CONN_RECYCLE_AFTER ) and force_recycle == False:
try:
conn.reset()
if conn.isolation_level != ISOLATION_LEVEL_READ_COMMITTED:
if self.do_log:
self.logger.info( "set ISOLATION_LEVEL_READ_COMMITTED." )
conn.set_isolation_level( ISOLATION_LEVEL_READ_COMMITTED )
conn.commit()
self.pool.put( conn, timeout = timeout )
except gevent.queue.Full, e:
raise PoolConnectionException( e )
else:
if self.do_log:
self.logger.info( "recycling conn." )
try:
conn.reset() # ?
conn.close()
except InterfaceError:
pass
del conn
gevent.spawn( self.create_connection ).join()
else:
raise PoolConnectionException( "Passed object %s is not a PoolConnection." % ( conn, ) )
@property
def qsize( self ):
return self.pool.qsize()
| mit | -6,869,467,594,450,040,000 | 40.120805 | 273 | 0.610903 | false |
pawelmhm/scrapy | scrapy/contracts/__init__.py | 3 | 6122 | import re
import sys
from functools import wraps
from inspect import getmembers
from typing import Dict
from unittest import TestCase
from scrapy.http import Request
from scrapy.utils.python import get_spec
from scrapy.utils.spider import iterate_spider_output
class Contract:
""" Abstract class for contracts """
request_cls = None
def __init__(self, method, *args):
self.testcase_pre = _create_testcase(method, f'@{self.name} pre-hook')
self.testcase_post = _create_testcase(method, f'@{self.name} post-hook')
self.args = args
def add_pre_hook(self, request, results):
if hasattr(self, 'pre_process'):
cb = request.callback
@wraps(cb)
def wrapper(response, **cb_kwargs):
try:
results.startTest(self.testcase_pre)
self.pre_process(response)
results.stopTest(self.testcase_pre)
except AssertionError:
results.addFailure(self.testcase_pre, sys.exc_info())
except Exception:
results.addError(self.testcase_pre, sys.exc_info())
else:
results.addSuccess(self.testcase_pre)
finally:
return list(iterate_spider_output(cb(response, **cb_kwargs)))
request.callback = wrapper
return request
def add_post_hook(self, request, results):
if hasattr(self, 'post_process'):
cb = request.callback
@wraps(cb)
def wrapper(response, **cb_kwargs):
output = list(iterate_spider_output(cb(response, **cb_kwargs)))
try:
results.startTest(self.testcase_post)
self.post_process(output)
results.stopTest(self.testcase_post)
except AssertionError:
results.addFailure(self.testcase_post, sys.exc_info())
except Exception:
results.addError(self.testcase_post, sys.exc_info())
else:
results.addSuccess(self.testcase_post)
finally:
return output
request.callback = wrapper
return request
def adjust_request_args(self, args):
return args
class ContractsManager:
contracts: Dict[str, Contract] = {}
def __init__(self, contracts):
for contract in contracts:
self.contracts[contract.name] = contract
def tested_methods_from_spidercls(self, spidercls):
is_method = re.compile(r"^\s*@", re.MULTILINE).search
methods = []
for key, value in getmembers(spidercls):
if callable(value) and value.__doc__ and is_method(value.__doc__):
methods.append(key)
return methods
def extract_contracts(self, method):
contracts = []
for line in method.__doc__.split('\n'):
line = line.strip()
if line.startswith('@'):
name, args = re.match(r'@(\w+)\s*(.*)', line).groups()
args = re.split(r'\s+', args)
contracts.append(self.contracts[name](method, *args))
return contracts
def from_spider(self, spider, results):
requests = []
for method in self.tested_methods_from_spidercls(type(spider)):
bound_method = spider.__getattribute__(method)
try:
requests.append(self.from_method(bound_method, results))
except Exception:
case = _create_testcase(bound_method, 'contract')
results.addError(case, sys.exc_info())
return requests
def from_method(self, method, results):
contracts = self.extract_contracts(method)
if contracts:
request_cls = Request
for contract in contracts:
if contract.request_cls is not None:
request_cls = contract.request_cls
# calculate request args
args, kwargs = get_spec(request_cls.__init__)
# Don't filter requests to allow
# testing different callbacks on the same URL.
kwargs['dont_filter'] = True
kwargs['callback'] = method
for contract in contracts:
kwargs = contract.adjust_request_args(kwargs)
args.remove('self')
# check if all positional arguments are defined in kwargs
if set(args).issubset(set(kwargs)):
request = request_cls(**kwargs)
# execute pre and post hooks in order
for contract in reversed(contracts):
request = contract.add_pre_hook(request, results)
for contract in contracts:
request = contract.add_post_hook(request, results)
self._clean_req(request, method, results)
return request
def _clean_req(self, request, method, results):
""" stop the request from returning objects and records any errors """
cb = request.callback
@wraps(cb)
def cb_wrapper(response, **cb_kwargs):
try:
output = cb(response, **cb_kwargs)
output = list(iterate_spider_output(output))
except Exception:
case = _create_testcase(method, 'callback')
results.addError(case, sys.exc_info())
def eb_wrapper(failure):
case = _create_testcase(method, 'errback')
exc_info = failure.type, failure.value, failure.getTracebackObject()
results.addError(case, exc_info)
request.callback = cb_wrapper
request.errback = eb_wrapper
def _create_testcase(method, desc):
spider = method.__self__.name
class ContractTestCase(TestCase):
def __str__(_self):
return f"[{spider}] {method.__name__} ({desc})"
name = f'{spider}_{method.__name__}'
setattr(ContractTestCase, name, lambda x: x)
return ContractTestCase(name)
| bsd-3-clause | 8,974,094,250,658,661,000 | 33.011111 | 81 | 0.563868 | false |
nachowski/csv2sqlite | csv2sqlite.py | 1 | 3373 | # Converts a CSV file (typically exported from individual .xls sheets) to a sqlite db as a new table
#
# - The CSV filename (without .csv extension) is used as the table name.
# - The columns from the first row are used as column names in the table.
# - You can store multiple tables (from multiple csv files) in the same sqlite3 db
# - The table will contain an __id primary key and an android_metadata table to make it android-compatible.
# - Typically the .db file should be zipped and copied to <android-project>/assets
#
# Usage: python2 csv2sqlite.py my_fabulous_data.csv db.sqlite
#
# Author: Nachiket Apte <[email protected]>
import csv, sqlite3, sys, os
try:
filename = sys.argv[1]
except IndexError:
print 'Missing argument: python csv2sqlite.py <tablename.csv>'
sys.exit(2)
try:
sqlitefilename = sys.argv[2]
except IndexError:
print 'Using default name for db: mydb.sqlite'
sqlitefilename = "mydb.sqlite"
# open our csv file for parsing. We use a standard db filename which may contain
# multiple tables from different csv files
reader = csv.reader(open(filename, 'r'), delimiter=';')
table, fileExtension = os.path.splitext(filename)
conn = sqlite3.connect(sqlitefilename)
curs = conn.cursor()
# Android-specific shizz. Remove if not relevant
curs.execute("DROP TABLE IF EXISTS android_metadata")
curs.execute("CREATE TABLE IF NOT EXISTS android_metadata (locale TEXT DEFAULT 'en_US')")
curs.execute("INSERT INTO 'android_metadata' VALUES ('en_US')")
##
counter = 0
# Screw fancy functions, I'm a python noob
tableInsertValues = "?";
tableInsertSql = "INSERT INTO " + table + " (__id"
for row in reader:
if counter == 0:
# first row of csv, create table based on columns
colNameCreateString = ""
for colName in row:
# No spaces in column names. All other formatting is preserved
colName = colName.replace(' ', '')
# All columns are strings, good luck future developers
colNameCreateString += ", " + colName + " TEXT"
# Magic here
tableInsertSql += ", " + colName
tableInsertValues += ", ?"
# make our insert statement based on the column values
tableInsertSql += ") VALUES (" + tableInsertValues + ");"
# make and execute our create statement
curs.execute("DROP TABLE IF EXISTS " + table)
print "Making table " + table + " with " + str(len(row)) + " columns"
try:
curs.execute("CREATE TABLE IF NOT EXISTS " + table + " ( __id INTEGER PRIMARY KEY" + colNameCreateString + ");")
except sqlite3.OperationalError:
# Some .xls files might be missing a title row
print "First row must contain headers! This one contains " + str(row)
sys.exit(2)
else:
# insert row as data
to_db = [counter]
for colVal in row:
colVal = colVal.strip(); # trim
if len(colVal) == 0:
# excel is dumb sometimes, convert empty strings to null values
to_db.append(None)
else:
to_db.append(unicode(colVal.strip(), "utf8"))
curs.execute(tableInsertSql, to_db)
counter += 1
conn.commit()
print "Imported " + str(counter - 1) + " rows into " + sqlitefilename | mit | -521,818,109,497,795,300 | 37.781609 | 124 | 0.636525 | false |
ShaunRW/keyModder | KeycodeTranslator.py | 1 | 2907 | #! /usr/bin/python2.6
# KEYCODE TRANSLATOR
#-----------------------------------------#
# Filename: KeycodeTranslator.py
# Belongs To: keyModder
# Usage: from KeycodeTranslator import KeycodeTranslator
# Description: Offers function to convert key codes to the key name and vice versa.
# Created: 19 Nov 2014
# Modified: 19 Nov 2014
# Author: Shaun Wilesmith
# Notes:
#
#
#-----------------------------------------#
class KeycodeTranslator:
def __init__(self):
self.KeycodeLookup = {
'up':126,
'down':125,
'left':123,
'right':124,
'backspace':117,
'enter':76,
'home':115,
'end':119,
'pagedown':121,
'pageup':116,
'return':36,
'delete':51,
'tab':48,
'spacebar':49,
'shift':56,
'control':59,
'menu':58,
'escape':53,
'capslock':57,
'help':114,
'f1':122,
'f2':120,
'f3':99,
'f4':118,
'f5':96,
'f6':97,
'f7':98,
'f8':100,
'f9':101,
'f10':109,
'f11':103,
'f12':111,
'fn':63,
'option':58,
'command':55,
'q':12,
'w':13,
'e':14,
'r':15,
't':17,
'y':16,
'u':32,
'i':34,
'o':31,
'p':35,
'a':0,
's':1,
'd':2,
'f':3,
'g':5,
'h':4,
'j':38,
'k':40,
'l':37,
'z':6,
'x':7,
'c':8,
'v':9,
'b':11,
'n':45,
'm':46,
'0':29,
'1':18,
'2':19,
'3':20,
'4':21,
'5':23,
'6':22,
'7':26,
'8':28,
'9':25,
'period':47,
'comma':43,
'slash':44,
'num0':82,
'num1':83,
'num2':84,
'num3':85,
'num4':86,
'num5':87,
'num6':88,
'num7':89,
'num8':91,
'num9':92,
'multiply':67,
'add':69,
'subtract':78,
'divide':75,
'decimal':65,
'numequal':81
}
def KeycodeFromName(self, name):
if name.lower() in self.KeycodeLookup:
return self.KeycodeLookup[name.lower()]
else:
return False
def NameFromKeycode(self, keycode):
for k, v in self.KeycodeLookup.items():
if keycode==v:
return k
| gpl-2.0 | 7,995,939,785,253,920,000 | 22.443548 | 88 | 0.325421 | false |
razziel89/ManipulateAggregates | ManipulateAggregates/collection/gnuplot/__init__.py | 1 | 33249 | """Class definition to control Gnuplot from Python.
"""
# This file is part of ManipulateAggregates.
#
# Copyright (C) 2016 by Torsten Sachse
#
# ManipulateAggregates is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ManipulateAggregates is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ManipulateAggregates. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import itertools
from subprocess import Popen, PIPE
from shutil import move
import tempfile as tf
import distutils.spawn as ds
from . import gpcsv
from . import postprocess
# default values for gnuplot
GNUPLOT_DEFAULT = {
"dash": "AUTO",
"color": "AUTO",
"point": "AUTO",
"lines": True,
"points": False,
"title": "AUTO",
"xcol": 1,
"ycol": 2,
"zcol": 3,
"type": "UNKNOWN",
"head": False,
"linewidth": 1.0,
"pointsize": 1.0,
"solid": True,
"opacity": 1.0,
}
def _which(executable):
"""Return whether or not an executable was found.
Args:
executable: (string) the name of the executable
Returns:
whether or not the specified executable was found
"""
return ds.find_executable(executable) is not None
def _mv(src, dest):
"""Move a file from src to dest.
Args:
src: (string) source path of the file
dest: (string) destination path of the file
"""
move(src, dest)
class gnuplot:
"""Controller class for gnuplot.
Attributes:
correct_mark: (bool) whether or not marks should be slightly displaced if they
overlap
correct_mark_dist: (float) by how much to displace overlaping marks
dict: (dictionary) holds configuration parameters
dimensions: (int, 2 or 3): the dimensionality of the plot
f: (pipe) STDIO of GP to which the controlling information is written.
font: (string) font to use
fontsize: (int) fontsize to use
GP: (process) an instance of Gnuplot opened via Popen
rectanglecount: (int) how many rectangles were already drawn
tempfiles: list of temporary files and whether they shall be auto-deleted
xmarks: (dictionary) used to store marks in x direction - check for overlaps
xrange: (tuple of 2 floats) range for x axis
ymarks: (dictionary) used to store marks in y direction - check for overlaps
yrange: (tuple of 2 floats) range for y axis
zrange: (tuple of 2 floats) range for y axis if plot in 3 dimensions
"""
class gp_dict:
"""A dummy class to hold the description of the config dictionaries.
All members of this class are actually keys (of type string) that can
be in each config dictionary and the given type is that of the
associated value. The two letter abbreviations lc, lw and dt are
Gnuplot commands. Please see Gnuplot's documentation.
Attributes:
lines: (none) if the key is set, plot a line
points: (none) if the key is set, plot points
type: (string) type of plot: 'file' (raw data) or 'function' (analytic function)
function: (string) description of the analytic function
filename: (string) the file that contains the raw data
xcol: (int) coloumn in the file that contains x data
ycol: (int) coloumn in the file that contains y data
zcol: (int) coloumn in the file that contains z data
border: (none) when plotting a rectange, it will have a border if this is set
color: (int) colour as in 'lc INT'. Special value: 'AUTO'
dash: (int) dash type as in 'dt INT'. Special value: 'AUTO'
point: (int) point type as in 'dt INT'. Special value: 'AUTO'
head: (none) whether arrows shall have heads
linewidth: (float) line width as in 'lw FLOAT'
opacity: (float) opacity of the rectangle (if solid is set)
pointsize: (float) size of the points (if points is declared)
solid: (none) if set, plot a solid rectangle (not just border)
title: (string) the name of the plotted function/data
"""
def __init__(self):
"""Dummy constructor, do not use."""
raise Exception("This dummy class shall not be used directly")
self.lines = ""
self.points = ""
self.type = ""
self.function = ""
self.filename = ""
self.xcol = ""
self.ycol = ""
self.zcol = ""
self.border = ""
self.color = ""
self.dash = ""
self.point = ""
self.head = ""
self.linewidth = ""
self.opacity = ""
self.pointsize = ""
self.solid = ""
self.title = ""
def __init__(
self,
filename,
size=(20, 12),
linewidth=1,
xrange=None,
yrange=None,
correct_mark=True,
correct_mark_dist=0.001,
fontsize=10,
xlog=False,
ylog=False,
classic_colors=True,
dimensions=2,
font="Helvetica",
verbose=False,
):
"""Constructor.
Args:
filename: (string) name of the output file that will be created
size: (tuple of 2 ints) the (x,y) size of the output in cm
linewidth: (float) lines are scaled by this factor
xrange: (tuple of 2 floats) range for x axis
yrange: (tuple of 2 floats) range for y axis
correct_mark: (bool) whether or not marks should be slightly displaced if
they overlap
correct_mark_dist: (float) by how much to displace overlaping marks
fontsize: (int) fontsize to use
xlog: (bool) whether or not the x axis is on a logarithmic scale
ylog: (bool) whether or not the y axis is on a logarithmic scale
classic_colors: (bool) whether or not to use Gnuplot's classic color scheme
dimensions: (int, 2 or 3): the dimensionality of the plot
font: (string) font to use
verbose: (bool) if True, echo everything that is being passed to Gnuplot to
stderr
"""
self.tempfiles = []
self.GP = Popen(["gnuplot"], stdin=PIPE, stdout=sys.stderr, stderr=sys.stderr)
self.cat = Popen(["cat"], stdin=PIPE, stdout=sys.stderr, stderr=sys.stderr)
self.f = self.GP.stdin
self.rectanglecount = 1
self.dimensions = dimensions
self.fontsize = fontsize
self.font = font
self.filename = filename
self.verbose = verbose
if self.dimensions not in (2, 3):
raise ValueError("Wrong number of dimensions provided.")
self._write(
'set term pdf enhanced colour font "%s,%d" size %dcm,%dcm linewidth %d\n'
% (font, fontsize, size[0], size[1], linewidth)
)
if classic_colors:
self._write("set colors classic\n")
if xlog:
self._write("set logscale x\n")
if ylog:
self._write("set logscale y\n")
self.correct_mark = correct_mark
if correct_mark:
self.xmarks = {}
self.ymarks = {}
self.correct_mark_dist = correct_mark_dist
self.xrange = xrange
if self.xrange is not None:
self._write("set xrange [%f:%f]\n" % (xrange[0], xrange[1]))
self.yrange = yrange
if self.yrange is not None:
self._write("set yrange [%f:%f]\n" % (yrange[0], yrange[1]))
self._write('set output "%s.pdf"\n' % (filename))
def _write(self, s):
"""Write something to Gnuplot but also print it to
stderr if verbose output is requested.
Args:
s (string): string to be passed to Gnuplot
"""
self.f.write(s)
if self.verbose:
self.cat.stdin.write(s)
def _set_dict(self, dict):
"""Set the dictionary holding config parameters.
Each function to be plotted has a certain set of possible config
options. See gnuplot.gp_dict for all possible options.
Args:
dict: (dictionary) holds configuration parameters in the form of
key-value pairs.
"""
self.dict = dict
def _get(self, *args, **kwargs):
"""Retrieve a value from the dictionary.
Args:
`*args`: (strings) the config options whose associated value shall be
retrieved.
`**kwargs`: (dictionary) key strict: whether or not to raise an Error
if the key cannot be found in the current dictionary or the
default one. If False then None is returned in such cases.
"""
for k in args:
result = self.dict.get(k, GNUPLOT_DEFAULT.get(k, None))
if result is not None:
break
if kwargs.get("strict", False) and result is None:
raise KeyError(
"Key %s not provided by the current dictionary and no default set."
% (key)
)
return result
def set_title(self, title):
"""Set the title of the plot.
Args:
title: (string) the title of the generated plot
"""
self._write('set title "%s"\n' % (title))
def emptyplot(self):
"""Create an empty plot.
This is useful if only marks or arrows shall be plotted. Normally,
Gnuplot would not create a plot under such conditions.
Requires xrange and yrange to be set using set_xrange and
set_yrange.
"""
if self.xrange is None or self.yrange is None:
raise RuntimeError(
"Cannot perform emtpy plot if either xrange or yrange is not set."
)
self._write("plot NaN notitle\n")
self.postplot()
def postplot(self):
"""Unset arrows and labels.
This is required when creating a plot that contains multiple pages since
otherwise the labels and arrows/marks would be repeated on every page.
"""
self._write("unset arrow\n")
self._write("unset label\n")
def unset(self, prop, oneprop=True):
"""Send an arbitrary 'unset' command to Gnuplot.
Args:
prop: (string or iterable of strings) if oneprop is True (the
default), unset the one property given via prop. Otherwise
unset all properties in the iterable prop.
oneprop: (bool) whether or not prop is not an iterable
"""
if oneprop:
iterable = [prop]
else:
iterable = prop
for p in iterable:
self._write("unset %s\n" % (p))
def set(self, prop, oneprop=True):
"""Send an arbitrary 'set' command to Gnuplot.
Args:
prop: (string or iterable of strings) if oneprop is True (the
default), et the one property given via prop. Otherwise set
all properties in the iterable prop.
oneprop: (bool) whether or not prop is not an iterable
"""
if oneprop:
iterable = [prop]
else:
iterable = prop
for p in iterable:
self._write("set %s\n" % (p))
def lineplot(self, data):
"""Plot one or several functions (can also be raw data).
Each function has a certain set of possible config options. See
gnuplot.gp_dict for all possible options.
Args:
data: (dictionary or list of dictionaries) each dictionary contains
a set of key-value pairs that define the function to be plotted
and how it shall be formated.
"""
if isinstance(data, dict):
dict_list = [data]
else:
try:
if False in (isinstance(d, dict) for d in data):
raise TypeError("")
else:
dict_list = data
except TypeError:
raise TypeError(
"Data for lineplot is neither a dictionary nor a list of dictionaries."
)
if len(dict_list) == 0:
print("WARNING: cannot plot since no data was passed over", file=sys.stderr)
return
breakchar = ", "
if self.dimensions == 2:
self._write("plot ")
elif self.dimensions == 3:
self._write("splot ")
count = 1
for f in dict_list:
self._set_dict(f)
if f == dict_list[-1]:
breakchar = "\n"
if not self._get("lines") and not self._get("points"):
raise ValueError(
"At least one of 'lines' or 'points' has to be declared otherwise nothing would be plotted."
)
if self._get("type") == "function":
self._write("%s " % (self._get("function")))
elif self._get("type") == "filename" or self._get("type") == "file":
self._write('"%s" u ' % (self._get("filename", "file", strict=False)))
# x coloumn
if isinstance(self._get("xcol"), int):
self._write("($%d):" % (self._get("xcol")))
else:
self._write("(%s):" % (self._get("xcol")))
# y coloumn
if isinstance(self._get("ycol"), int):
self._write("($%d)" % (self._get("ycol")))
else:
self._write("(%s)" % (self._get("ycol")))
# z coloumn, if present
if self.dimensions == 3:
if isinstance(self._get("zcol"), int):
self._write(":($%d) " % (self._get("zcol")))
else:
self._write(":(%s) " % (self._get("zcol")))
self._write(" ")
elif self._get("type") == "UNKNOWN":
raise ValueError(
"No plot type provided. Missing key 'type' from dictionary."
)
else:
raise ValueError("Unknown plot type: %s" % (f["type"]))
if self._set_style(count, "line"):
count += 1
self._write(breakchar)
if self.correct_mark:
self.xmarks = {}
self.ymarks = {}
self.postplot()
def data_to_file(self, data, formatstring=None, delete=True):
"""Convert some data (given as x-y value pairs) to a format for Gnuplot.
Args:
data: (list of pairs of floats) the data to be plotted
formatstring: (string) a printf-type string that will be used to
convert each element of data to a string. Gnuplot will plot
what's left after the conversion.
delete: (bool) whether or not the temporary file that is created
shall be deleted when finalize is called
"""
tempfile = tf.NamedTemporaryFile(delete=False)
if formatstring is None:
for datum in data:
tempfile.write("\t".join(map(str, datum)) + "\n")
else:
for datum in data:
tempfile.write(formatstring % tuple(datum))
tempfile.close()
self.tempfiles.append((delete, tempfile.name))
return tempfile.name
def _set_style(self, count, type):
"""Create s string that Gnuplot understands and that describes a plot's style.
Args:
count: (int) how many times already the automatic style generation
has been used
type: (string) what kind of thing shall be plotted. Can be:
"lines", "rectangle" or "arrow".
"""
used_count = False
style = ""
if type == "line":
style += "w "
if self._get("lines"):
style += "l"
if self._get("points"):
style += "p "
style += "ps %f " % (self._get("pointsize"))
style += " "
dash = self._get("dash")
if dash == "AUTO":
style += "dt %d " % (count)
used_count = True
else:
style += "dt %d " % (dash)
point = self._get("point")
if point == "AUTO":
style += "pt %d " % (count)
used_count = True
else:
style += "pt %d " % (point)
color = self._get("color")
if color == "AUTO":
style += "lc %d " % (count)
used_count = True
else:
style += "lc %d " % (color)
width = self._get("linewidth")
style += "lw %f " % (width)
title = self._get("title")
if title == "AUTO":
pass
elif title is None:
style += "notitle "
else:
style += 'title "%s" ' % (title)
elif type == "rectangle":
color = self._get("color")
if color == "AUTO" or color is None:
style += "fc "
else:
style += "fc %s " % (color)
style += "lt -1 lw 0 "
if self._get("solid"):
style += "fs solid %f " % (self._get("opacity"))
if self._get("border") is None:
style += "noborder "
elif type == "arrow":
dash = self._get("dash")
if dash == "AUTO":
style += "dt %d " % (count)
used_count = True
else:
style += "dt %d " % (dash)
color = self._get("color")
if color == "AUTO":
style += "lc %d " % (count)
used_count = True
else:
style += "lc %d " % (color)
width = self._get("linewidth")
style += "lw %f " % (width)
if not self._get("head"):
style += "nohead "
self._write(style)
return used_count
def set_xrange(self, start, stop):
"""Set the range of the plot in x-direction.
Args:
start: (float) start of the x range
stop: (float) end of the x range
"""
self._write("set xrange [%f:%f]\n" % (start, stop))
self.xrange = (start, stop)
def set_yrange(self, start, stop):
"""Set the range of the plot in y-direction.
Args:
start: (float) start of the y range
stop: (float) end of the y range
"""
self._write("set yrange [%f:%f]\n" % (start, stop))
self.yrange = (start, stop)
def set_zrange(self, start, stop):
"""Set the range of the plot in z-direction if the plot is 3D.
Args:
start: (float) start of the z range
stop: (float) end of the z range
"""
if self.dimensions == 3:
self._write("set zrange [%f:%f]\n" % (start, stop))
self.zrange = (start, stop)
else:
raise ValueError("Cannot set zrange for non-3d plot.")
def set_stick(self, pos, height, color, base=0.0, width=0.5):
"""Create a vertical line of a certain height (i.e., stick).
Args:
pos: (float) the x position of the stick
height: (float) the height in the y direction of the stick
color: (int) the color if the stick as in 'lc INT'
base: (float) where the stick shall start (defaults to x axis)
width: (float) the width of the stick
"""
try:
if len(pos) != len(height):
print(
"To print several sticks, the positions list and the height list must have the same number of elements."
)
else:
gen = ((p, h) for p, h in zip(pos, height))
except TypeError:
gen = [(pos, height)]
for p, h in gen:
self.mark(
p,
"x",
width=width,
color=color,
rectangle=False,
opacity=1.0,
center=True,
extent=(base, base + h),
)
def set_background(
self, opacities, colors=None, nr_fields=None, direction="x", extent=None
):
"""Create a non-white background for the plot.
You can create backgrounds of areas with alternating colors or even
checked backgrounds. This is realized as repeating a given pattern of
opacities and colours until the entirety of the plot is filled in a
certain direction. A checked pattern can be obtained by repeating a
black-and-white pattern multiple times as black-and-white ->
white-and-black -> black-and-white etc. These backgrounds are realized
via rectangles, so they support all the properties of Gnuplot's
rectangles.
Args:
opacities: (iterable of floats) pattern of opacities
colors: (iterable of ints) pattern of colors. Defaults to black for
all pattern elements.
nr_fields: (int) the number of sections in which to partition the
plot. E.g., if given a black-and-white pattern, a value of 5
would result in black->white->black->white->black.
direction: ("x" or "y") the direction of the pattern (defaults to "x")
extent: (tuple of 2 floats) how far in the other direction (that
not specified by direction) the pattern shall extent
"""
if direction == "x":
samerange = self.xrange
otherrange = self.yrange
elif direction == "y":
samerange = self.yrange
otherrange = self.xrange
else:
raise ValueError('Unknown direction "%s", must be x or y.' % (direction))
if self.dimensions != 2:
raise RuntimeError("Cannot create background for non-2d plot.")
if extent is None:
if otherrange is None:
raise ValueError(
"Cannot create background in %s-direction without other range being set."
% (direction)
)
else:
extent = otherrange
if samerange is None:
raise ValueError(
"Cannot create background in %s-direction without same range being set."
% (direction)
)
try:
if colors is None:
colors = [None] * len(opacities)
except TypeError:
opacities = [opacities]
colors = [None]
try:
if len(colors) != len(opacities):
raise ValueError(
"Cannot create background, colors and opacities do not have the same number of elements."
)
else:
iterable = [(c, o) for c, o in zip(colors, opacities)]
except TypeError:
iterable = [(colours, opacities)]
if nr_fields is None:
nr_fields = len(colors)
result = []
width = 1.0 * (samerange[1] - samerange[0]) / (nr_fields)
pos = samerange[0]
count = 0
for color, opacity in itertools.cycle(iterable):
self.mark(
pos,
direction,
width=width,
color=color,
rectangle=True,
center=False,
opacity=opacity,
extent=extent,
)
result.append((pos, pos + width))
pos += width
count += 1
if count == nr_fields:
break
return result
def mark(
self,
pos,
direction,
width=0.5,
color=None,
rectangle=False,
opacity=1.0,
center=True,
extent=None,
label=None,
zpos=None,
dash=None,
):
"""Create vertival or horizontal line on the plot.
If the plot is 3D, the position in the 3rd direction is also required.
However, the plots are still in planes parallel to the x-y plane.
Args:
pos: (float) x or y position of the mark (depending on direction)
direction: ("x" or "y") the direction of the line
width: (float) the line width
color: (int) colour as in 'lc INT'
rectangle: (bool) whether the mark is not just a line but a rectangle
opacity: (float) opacity of the mark (only used if rectangle)
center: (bool) whether or not the given position is the mark's center.
Otherwise, the pos is considered to be the left border (only
used if rectangle)
extent: (tuple of 2 floats) the startpoint and endpoint in the
direction of the line (defaults to: entire plot)
label: (dictionary) an optional description of an optional label. See
description of set_label for required and optional keys.
zpos: (float) position of the mark in a 3D plot. Required if the
dimensionality of the plot is 3.
"""
if direction == "x":
hererange = self.yrange
heremarks = self.xmarks
startpos = lambda p, e: (p, e[0])
endpos = lambda p, e: (p, e[1])
elif direction == "y":
hererange = self.xrange
heremarks = self.ymarks
startpos = lambda p, e: (e[0], p)
endpos = lambda p, e: (e[1], p)
else:
raise ValueError('Unknown direction "%s", must be x or y.' % (direction))
if self.dimensions != 2:
if rectangle:
raise RuntimeError(
"Cannot set %smark as rectangle for non-2d plot." % (direction)
)
elif self.dimensions == 3:
if zpos is None:
raise RuntimeError(
"Cannot set %smark as arrow for non-2d plot without zpos defined."
% (direction)
)
else:
raise RuntimeError(
"Fatal internal error: wrong number of dimensions set. However that happened."
)
if extent is None:
if hererange is None:
raise ValueError(
"Cannot create %smark without other range being set." % (direction)
)
else:
extent = hererange
if not rectangle:
if self.correct_mark:
if pos in heremarks:
heremarks[pos] += 1
_pos = pos + self.correct_mark_dist * (heremarks[pos])
else:
heremarks[pos] = 0
_pos = pos
self._write("set arrow from %8.7E,%8.7E" % startpos(_pos, extent))
if self.dimensions == 3:
self._write(",%8.7E" % (zpos))
self._write(" to %8.7E,%8.7E" % endpos(_pos, extent))
if self.dimensions == 3:
self._write(",%8.7E" % (zpos))
self._write(" ")
self._set_dict(
{
"head": False,
"color": color if color is not None else 0,
"linewidth": width,
"dash": 1 if dash is None else dash,
}
)
self._set_style(heremarks[pos], "arrow")
if opacity != 1.0:
print(
"WARNING: opacity unequal 100% set, but is ignored for xmark that is not a rectangle.",
file=sys.stderr,
)
else:
if center:
pos -= 0.5 * width
self._write("set obj %d rect from " % (self.rectanglecount))
self._write("%f,%f to " % startpos(pos, extent))
self._write("%f,%f " % endpos(pos + width, extent))
self._set_dict({"color": color, "opacity": opacity, "border": None})
self._set_style(self.rectanglecount, "rectangle")
self.rectanglecount += 1
self._write("\n")
if label is not None:
if isinstance(label, dict):
label = [label]
for l in label:
if "where" in l:
where = l["where"]
else:
where = "tl"
if where == "tl":
labelpos = (
startpos(pos, extent)[0] + l["offset"][0],
startpos(pos, extent)[1] + l["offset"][1],
)
l["pivot"] = "left"
elif where == "bl":
labelpos = (
startpos(pos, extent)[0] + l["offset"][0],
endpos(pos, extent)[1] + l["offset"][1],
)
l["pivot"] = "left"
elif where == "tr":
labelpos = (
endpos(pos, extent)[0] + l["offset"][0],
startpos(pos, extent)[1] + l["offset"][1],
)
l["pivot"] = "right"
elif where == "br":
labelpos = (
endpos(pos, extent)[0] + l["offset"][0],
endpos(pos, extent)[1] + l["offset"][1],
)
l["pivot"] = "right"
elif where == "c":
labelpos = (
0.5 * (startpos(pos, extent)[0] + endpos(pos, extent)[0])
+ l["offset"][0],
0.5 * (startpos(pos, extent)[1] + endpos(pos, extent)[1])
+ l["offset"][1],
)
l["pivot"] = "center"
else:
raise ValueError(
'Wrong value for "where" in dictionary. Must be one of ["tl","bl","tr","br","c"] but is %s.'
% (where)
)
l["position"] = labelpos
self.set_label(l)
def set_label(self, label):
"""Set a label on a plot.
The argument label is a dictionary whose entries for "font",
"fontsize", "pivot", "rotation" and "depth" can overwrite the defaults.
Needs to have entries for "text" (the label's text) and "position"
(tuple of floats describing the position).
Args:
label: (dictionary) a description of the label. See description of
set_label for required and optional keys.
"""
if "font" in label:
font = label["font"]
else:
font = self.font
if "fontsize" in label:
fontsize = label["fontsize"]
else:
fontsize = self.fontsize
if "pivot" in label:
pivot = label["pivot"]
else:
pivot = "center"
if "rotation" in label:
rotation = label["rotation"]
else:
rotation = 0.0
if "depth" in label:
depth = label["depth"]
else:
depth = "front"
self._write(
'set label "%s" at %f,%f font "%s,%d" %s rotate by %.2f %s\n'
% (
label["text"],
label["position"][0],
label["position"][1],
font,
fontsize,
depth,
rotation,
pivot,
)
)
def finalize(self, delete=True, convert=False):
"""Finalize the plot.
This calls a set of routines that finish the plotting procedure.
Without calling this, the plot will never actually be created.
Args:
delete: (bool) whether or not temporary files that were declared as
"to-be-deleted" shall actually be deleted.
convert: (bool) whether or not to convert the eps file to a pdf
file if the required software is installed
"""
if not self.f.closed:
self.f.close()
rc = self.GP.wait()
if delete:
for d, filename in self.tempfiles:
if d:
os.remove(filename)
return rc
| gpl-3.0 | 6,274,585,563,111,448,000 | 36.48478 | 124 | 0.512045 | false |
endthestart/tinsparrow | tinsparrow/tinsparrow/urls.py | 1 | 2739 | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from rest_framework.authtoken.views import obtain_auth_token
from .views import LibraryView, songfile, LayoutView
from .api import api_root, song_upload
from .api import ArtistList, ArtistDetail
from .api import AlbumList, AlbumDetail, ArtistAlbumList
from .api import SongList, SongDetail, ArtistSongList, AlbumSongList
from .api import QueueList
from .api import LibraryList
artist_urls = patterns(
'',
url(r'^/(?P<pk>\d+)/albums/$', ArtistAlbumList.as_view(), name='artistalbum-list'),
url(r'^/(?P<pk>\d+)/songs/$', ArtistSongList.as_view(), name='artistsong-list'),
url(r'^/(?P<pk>\d+)/$', ArtistDetail.as_view(), name='artist-detail'),
url(r'^/$', ArtistList.as_view(), name='artist-list'),
)
album_urls = patterns(
'',
url(r'^/(?P<pk>\d+)/songs/$', AlbumSongList.as_view(), name='albumsong-list'),
url(r'^/(?P<pk>\d+)/$', AlbumDetail.as_view(), name='album-detail'),
url(r'^/$', AlbumList.as_view(), name='album-list'),
)
song_urls = patterns(
'',
url(r'^/(?P<pk>\d+)/$', SongDetail.as_view(), name='song-detail'),
url(r'^/$', SongList.as_view(), name='song-list'),
)
queue_urls = patterns(
'',
url(r'^/$', QueueList.as_view(), name='queue-list'),
)
library_urls = patterns(
'',
url(r'^/$', LibraryList.as_view(), name='library-list'),
)
urlpatterns = patterns(
'',
url(r'^api/$', api_root, name='api-root'),
url(r'^api/upload/$', song_upload, name='song-upload'),
url(r'^api/artists', include(artist_urls)),
url(r'^api/albums', include(album_urls)),
url(r'^api/songs', include(song_urls)),
url(r'^api/queue', include(queue_urls)),
url(r'^api/library', include(library_urls)),
url(r'^api/token-auth/', obtain_auth_token),
url(r'^library/', login_required(LibraryView.as_view()), name='library'),
# url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^song/(?P<song_id>\d+)', songfile, name='song_file'),
url(r'^$', 'tinsparrow.views.login', name='home'),
url(r'^account/login/$', 'tinsparrow.views.login', name='login'),
url(r'^account/logout/$', 'tinsparrow.views.logout', name='logout'),
url(r'^layout/$', LayoutView.as_view(), name='layout_view'),
url(r'^admin/', include(admin.site.urls)),
)
# Uncomment the next line to serve media files in dev.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns(
'',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| mit | -176,903,586,610,345,180 | 34.571429 | 87 | 0.646951 | false |
relekang/python-semantic-release | semantic_release/settings.py | 1 | 4061 | """Helpers to read settings from setup.cfg or pyproject.toml
"""
import configparser
import importlib
import logging
import os
from collections import UserDict
from functools import wraps
from os import getcwd
from typing import Callable, List
import tomlkit
from tomlkit.exceptions import TOMLKitError
from .errors import ImproperConfigurationError
logger = logging.getLogger(__name__)
def _config():
cwd = getcwd()
ini_paths = [
os.path.join(os.path.dirname(__file__), "defaults.cfg"),
os.path.join(cwd, "setup.cfg"),
]
ini_config = _config_from_ini(ini_paths)
toml_path = os.path.join(cwd, "pyproject.toml")
toml_config = _config_from_pyproject(toml_path)
# Cast to a UserDict so that we can mock the get() method.
return UserDict({**ini_config, **toml_config})
def _config_from_ini(paths):
parser = configparser.ConfigParser()
parser.read(paths)
flags = {
"changelog_capitalize",
"changelog_scope",
"check_build_status",
"commit_version_number",
"patch_without_tag",
"major_on_zero",
"remove_dist",
"upload_to_pypi",
"upload_to_release",
"tag_commit",
}
# Iterate through the sections so that default values are applied
# correctly. See:
# https://stackoverflow.com/questions/1773793/convert-configparser-items-to-dictionary
config = {}
for key, _ in parser.items("semantic_release"):
if key in flags:
config[key] = parser.getboolean("semantic_release", key)
else:
config[key] = parser.get("semantic_release", key)
return config
def _config_from_pyproject(path):
if os.path.isfile(path):
try:
with open(path, "r") as f:
pyproject = tomlkit.loads(f.read())
if pyproject:
return pyproject.get("tool", {}).get("semantic_release", {})
except TOMLKitError as e:
logger.warning(f"Could not decode pyproject.toml: {e}")
return {}
config = _config()
def current_commit_parser() -> Callable:
"""Get the currently-configured commit parser
:raises ImproperConfigurationError: if ImportError or AttributeError is raised
:returns: Commit parser
"""
try:
# All except the last part is the import path
parts = config.get("commit_parser").split(".")
module = ".".join(parts[:-1])
# The final part is the name of the parse function
return getattr(importlib.import_module(module), parts[-1])
except (ImportError, AttributeError) as error:
raise ImproperConfigurationError(f'Unable to import parser "{error}"')
def current_changelog_components() -> List[Callable]:
"""Get the currently-configured changelog components
:raises ImproperConfigurationError: if ImportError or AttributeError is raised
:returns: List of component functions
"""
component_paths = config.get("changelog_components").split(",")
components = list()
for path in component_paths:
try:
# All except the last part is the import path
parts = path.split(".")
module = ".".join(parts[:-1])
# The final part is the name of the component function
components.append(getattr(importlib.import_module(module), parts[-1]))
except (ImportError, AttributeError) as error:
raise ImproperConfigurationError(
f'Unable to import changelog component "{path}"'
)
return components
def overload_configuration(func):
"""This decorator gets the content of the "define" array and edits "config"
according to the pairs of key/value.
"""
@wraps(func)
def wrap(*args, **kwargs):
if "define" in kwargs:
for defined_param in kwargs["define"]:
pair = defined_param.split("=", maxsplit=1)
if len(pair) == 2:
config[str(pair[0])] = pair[1]
return func(*args, **kwargs)
return wrap
| mit | -702,771,670,662,746,500 | 28.860294 | 90 | 0.628417 | false |
jasuarez/minbif | tests/test.py | 1 | 14535 | # -*- coding: utf-8 -*-
"""
Minbif - IRC instant messaging gateway
Copyright(C) 2009 Romain Bignon
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
from __future__ import with_statement
import sys
import os
import traceback
from select import select
from subprocess import Popen, PIPE, STDOUT
from time import sleep, time
import re
try:
import config
except ImportError:
print >>sys.stderr, 'Error: please rename config.py.example to config.py and edit it!'
sys.exit(1)
from structs import Account, Buddy
NOBUFFER_PATH = os.path.normpath(os.path.join(os.path.dirname(__file__), 'libnobuffer.so'))
MINBIF_PATH = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'build', 'src', 'minbif'))
def getBacktrace(empty="Empty backtrace."):
"""
Try to get backtrace as string.
Returns "Error while trying to get backtrace" on failure.
"""
try:
info = sys.exc_info()
trace = traceback.format_exception(*info)
sys.exc_clear()
if trace[0] != "None\n":
return "".join(trace)
except:
# No i18n here (imagine if i18n function calls error...)
return "Error while trying to get backtrace"
return empty
class Test:
NAME = ''
INSTANCES = {}
TESTS = []
PATH = '/tmp/minbif-tests'
def __getitem__(self, inst):
return self.INSTANCES[inst]
def run(self, stop_on_failure=True):
print '\nStarting test: %s' % self.NAME
ret = self._run(stop_on_failure)
for name, instance in self.INSTANCES.iteritems():
instance.stop()
if ret:
print 'End of test %s: success' % self.NAME
else:
print 'End of test %s: failed' % self.NAME
self.display_logs()
print ''
return ret
def _run(self, stop_on_failure=True):
if not self.rm_and_mkdir(self.PATH):
return False
for name, instance in self.INSTANCES.iteritems():
sys.stdout.write('\tLaunch %-25s' % (name + ':'))
inst_path = '%s/%s' % (self.PATH, name)
if not self.rm_and_mkdir(inst_path):
return False
if instance.start(inst_path):
print '[Success]'
else:
print '[Failed]'
return False
for test in self.TESTS:
if not self.run_test(test) and stop_on_failure:
return False
return True
def run_test(self, test):
sys.stdout.write('\tTest %-26s ' % (test + ':'))
if not hasattr(self, 'test_%s' % test):
print '[Not found]'
else:
func = getattr(self, 'test_%s' % test)
msg = ''
try:
ret = func()
except Exception, e:
ret = False
msg = '%s: %s' % (type(e).__name__, str(e))
for instance in self.INSTANCES.itervalues():
instance.log(getBacktrace())
if ret:
print '[Success]'
return True
else:
print '[Failed] %s' % msg
return False
def rm_and_mkdir(self, path):
try:
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
except OSError, e:
print 'Error: unable to remove directory %s: %s' % (path, e)
return False
try:
os.rmdir(path)
except:
pass
try:
os.mkdir(path)
except OSError, e:
print 'Error: unable to create directory %s: %s' % (path, e)
return False
return True
def display_logs(self):
for name, instance in self.INSTANCES.iteritems():
print '\nLog for %s:' % name
instance.display_logs()
class Message:
def __init__(self, cmd, sender=None, receiver=None, args=[]):
self.cmd = cmd
self.sender = sender
self.receiver = receiver
self.args = args
@staticmethod
def parseline(line):
args = line.split()
if not args or len(args) < 3:
return None
cmd = None
sender = None
receiver = None
if args[0][0] == ':':
sender = args.pop(0)[1:]
cmd = args.pop(0)
receiver = args.pop(0)
for i in xrange(len(args)):
if args[i][0] == ':':
args[i] = ' '.join(args[i:])[1:]
args = args[:i+1]
break
return Message(cmd, sender, receiver, args)
class Instance:
DEFAULT_CONF = {'path': {'users': ''},
'irc': {'hostname': 'im.symlink.me',
'type': 0,
'ping': 0,
},
'file_transfers': {'enabled': 'true',
'dcc': 'true',
'port_range': '1024-65535',
},
'aaa': {},
'logging': {'level': 'DESYNCH WARNING ERR INFO DEBUG',
'to_syslog': 'false'
},
}
def __init__(self, config={}):
self.config = config
self.path = ''
self.logs = []
self.process = None
def display_logs(self):
for log in self.logs:
print ' %s' % log
def log(self, s):
self.logs.append('%.3f %s' % (time(), s))
def stop(self):
if not self.process:
return
try:
while self.readline(): pass
self.write("QUIT")
except IOError:
pass
else:
self.process.wait()
self.process = None
def write(self, msg):
if self.process:
self.log("> %s" % msg)
self.process.stdin.write("%s\n" % msg)
def readline(self, timeout=0):
out = self.process.stdout
if timeout is not None:
ready = select([out.fileno()], [], [], timeout)[0]
if not ready:
return None
line = out.readline()
if not line:
return None
line = line.rstrip()
self.log("< %s" % line)
return line
def readmsg(self, cmd=None, timeout=0):
start = time()
while start + timeout >= (1 if timeout == 0 else time()):
line = self.readline(timeout)
if not line:
return None
msg = Message.parseline(line)
if not msg:
print line
continue
if not cmd or isinstance(cmd, (str,unicode)) and msg.cmd == cmd \
or msg.cmd in cmd:
return msg
def start(self, path):
try:
self.path = path
config_path = '%s/minbif.conf' % path
self.write_config(config_path)
self.process = Popen((MINBIF_PATH, config_path),
stdin=PIPE, stdout=PIPE, stderr=STDOUT,
env={"LD_PRELOAD": NOBUFFER_PATH})
return self.login()
except Exception, e:
self.process = None
self.log(getBacktrace())
sys.stdout.write("(%s) " % e)
return False
def update_config(self, config, config2):
for key, value in config2.iteritems():
if key in config and isinstance(config[key], dict):
self.update_config(config[key], value)
else:
config[key] = value
def write_config(self, filename):
config = self.DEFAULT_CONF.copy()
config['path']['users'] = '%s/users' % self.path
self.update_config(config, self.config)
self.config = config
with open(filename, 'w') as f:
self.write_config_section(f, 0, config)
def write_config_section(self, fp, depth, section):
tabs = ' ' * depth
for key, value in section.iteritems():
if isinstance(value, dict):
fp.write("%s%s {\n" % (tabs, key))
self.write_config_section(fp, depth+1, value)
fp.write("%s}\n" % tabs)
else:
fp.write("%s%s = %s\n" % (tabs, key, value))
def login(self, nickname="minbif", password="minbifrocks"):
self.write("USER minbif * * :MinBif")
self.write("PASS %s" % password)
self.write("NICK %s" % nickname)
msg = self.readmsg("001", 5)
return (msg != None)
def create_account(self, proto=None, channel='&minbif'):
account = None
if not proto:
try:
account = config.ACCOUNTS.pop()
except IndexError:
return False
else:
for acc in xrange(len(config.ACCOUNTS)):
if config.ACCOUNTS[acc].proto == proto:
account = config.ACCOUNTS.pop(acc)
break
if not account:
return False
options = ''
for key, value in account.options:
options += ' '
if isinstance(value, bool):
if value:
options += "-%s" % key
else:
options += "-!%s" % key
else:
options += "-%s \"%s\"" % (key, value)
self.write("MAP add %s %s %s %s %s" % (account.proto,
account.username,
account.password,
options,
channel))
return self.readmsg("017", 1) != None
def remove_account(self, name):
self.write("MAP delete %s" % name)
return self.readmsg("017", 1) != None
def wait_connected(self, name):
accounts = self.get_accounts()
if not name in accounts.iterkeys():
return False
acc = accounts[name]
if acc.state == 'connected':
return True
if acc.state != 'connecting':
return False
while 1:
msg = self.readmsg(('NOTICE','PRIVMSG'),5)
if not msg:
return False
m = re.match('\*\*\* Notice -- Connection to ([^ ]+):%s established!' % name, msg.args[0])
if m:
return True
if msg.sender.startswith('request!') and msg.args[0] == 'New request: SSL Certificate Verification':
self.write("PRIVMSG request :accept")
def get_accounts(self):
# flush
while self.readline():
pass
self.write("MAP")
accounts = {}
while 1:
msg = self.readmsg(("015", "017"), 2)
if not msg:
return False
if msg.cmd == "017":
break
line = msg.args[0]
# me
if not line.startswith('|') and not line.startswith('`'):
continue
m = re.match(".([- ][\*\+]*)(.+):([[a-zA-Z]+)([0-9]*)", line)
if m:
acc = Account(proto=m.group(3), username=m.group(2))
prompt2state = {'-': 'connected',
' ': 'disconnected',
'-*': 'connecting',
'-+': 'added'
}
acc.state = prompt2state[m.group(1)]
accounts['%s%s' % (m.group(3), m.group(4))] = acc
return accounts
def get_full_account(self, accname):
while self.readline():
pass
self.write("MAP edit %s" % accname)
acc = None
while 1:
msg = self.readmsg("NOTICE", 1)
if not msg:
return acc
m = re.match("-- Parameters of account (.+):([a-zA-Z]+)([0-9]*) --", msg.args[0])
if m:
acc = Account(proto=m.group(2), username=m.group(1))
else:
m = re.match("([^ ]+) = (.*)", msg.args[0])
if m:
acc.options[m.group(1)] = m.group(2)
def get_buddies(self, accname=''):
while self.readline(): pass
self.write("WHO %s" % accname)
buddies = {}
while 1:
msg = self.readmsg(('352', '315'), 3)
if not msg or msg.cmd == '315':
return buddies
servername = msg.args[3]
if servername == self.config['irc']['hostname']:
continue
nickname = msg.args[4]
username = msg.args[1]
hostname = msg.args[2]
realname = msg.args[6][2:]
buddy = Buddy(servername, nickname, username, hostname, realname)
buddies[nickname] = buddy
def request_answer(self, question, answer, timeout=1):
self.log('Wait request "%s"' % question)
while 1:
msg = self.readmsg('PRIVMSG', timeout)
if not msg:
return False
if msg.sender.startswith('request!') and msg.args[0].startswith(question):
self.write('PRIVMSG request :%s' % answer)
return True
def clean_buddies(self, accname=''):
self.request_answer('New request: Authorize buddy?', 'authorize', 0)
while 1:
buddies = self.get_buddies(accname)
if not buddies:
break
for nick, buddy in buddies.iteritems():
self.write("KILL %s" % nick)
self.request_answer('New request: Authorize buddy?', 'authorize', 0)
return True
| gpl-2.0 | -4,323,576,392,194,958,000 | 30.735808 | 112 | 0.487513 | false |
MITHyperloopTeam/software_core | software/UI/pod_tube_vis.py | 1 | 8129 | #!/usr/bin/env python
#signif reference to http://pastebin.com/k87sfiEf
import sys
import math
import signal
import time
import os
import math, random
import numpy as np
from numpy import linalg
from PIL import Image
#interface stuff
from PyQt4 import QtCore, QtGui, QtOpenGL
import pyqtgraph as pg
#comms stuff
import lcm
from mithl import floating_base_t
from mithl import particle_filter_t
from mithl import vectorXf_t
from mithl import state_estimator_particle_set
from mithl import state_estimator_particle
from lcm_utils import *
#read yaml config information
import yaml
class PodTubeVisWidget(QtGui.QWidget):
''' Pod Visualization window. Plots pod state with pyqtgraph.'''
def __init__(self, config, lc=None, parent=None, name=None):
super(PodTubeVisWidget, self).__init__(parent)
self.lc = lc
if name:
self.setObjectName(name)
self.startTime = time.time()
self.config = config
self.setMinimumHeight(200)
self.plot = pg.PlotWidget(title="State Estimation")
self.plot.setXRange(0,float(config['tube']['length']),padding=0.1)
self.plot.hideAxis("left")
img = QtGui.QImage("../models/pod.png")
img = img.convertToFormat(QtGui.QImage.Format_ARGB32_Premultiplied)
img = img.rgbSwapped()
img = img.mirrored(False, True)
imgArray = np.float64(pg.imageToArray(img, copy=True))
self.img_mle = pg.ImageItem(imgArray, opacity=0.0)
self.img_gt = pg.ImageItem(imgArray, opacity=0.9)
self.img_aspect = float(imgArray.shape[1]) / float(imgArray.shape[0])
self.pod_mle = 0.0
self.pod_gt = 0.0
self.viewBox = self.plot.getViewBox()
self.viewBox.setMouseEnabled(x=True, y=False)
self.viewBox.setYRange(-0.5, 0.5)
self.viewBox.setBackgroundColor([50, 80, 80])
# add a nice gradient background
self.gradBackground = QtGui.QGraphicsRectItem(0, -1, config["tube"]["length"], 2)
gradient = QtGui.QLinearGradient(0, -1, 0, 2)
gradient.setColorAt(0.0, QtGui.QColor(50, 50, 50))
gradient.setColorAt(1.0, QtGui.QColor(40, 40, 160))
self.gradBackground.setBrush(QtGui.QBrush(gradient))
self.viewBox.addItem(self.gradBackground)
# add the fiducial markers at half opacity
line_center = config["tube"]["length"] - config["tube"]["distance_after_last_fiducial"]
self.lines = []
self.lineColor = QtGui.QColor(200, 200, 0)
self.lineWidth = config["tube"]["fiducial_width"]
while (line_center > 0):
line = QtGui.QGraphicsLineItem(line_center, -1.0, line_center, 1.0)
self.lines.append(line)
self.viewBox.addItem(line)
line_center -= config["tube"]["fiducial_separation"]
# add the keep-outs and back and front
backZone = QtGui.QGraphicsRectItem(-50000, -1, 50000, 2)
#backZone.setPen(QtCore.Qt.NoPen)
backZone.setBrush(QtGui.QBrush(QtGui.QColor(200, 50, 50), QtCore.Qt.Dense1Pattern))
self.viewBox.addItem(backZone)
frontZone = QtGui.QGraphicsRectItem(config["tube"]["length"], -1, 50000, 2)
#backZone.setPen(QtCore.Qt.NoPen)
frontZone.setBrush(QtGui.QBrush(QtGui.QColor(200, 50, 50), QtCore.Qt.Dense1Pattern))
self.viewBox.addItem(frontZone)
self.particles = np.zeros((0, 3))
self.particles_scatter = pg.ScatterPlotItem()
self.viewBox.addItem(self.particles_scatter)
self.viewBox.addItem(self.img_mle)
self.viewBox.addItem(self.img_gt)
self.densityCurve = self.plot.plot([0], [0], pen=pg.mkPen([255, 0, 0]))
#self.setWindowTitle("BrakingSliders")
mainLayout = QtGui.QVBoxLayout()
mainLayout.addWidget(self.plot)
self.setLayout(mainLayout)
self.podse_sub = self.lc.subscribe("_FC_SE", self.handle_state_estimate)
self.podfb_sub = self.lc.subscribe("SIM_FB", self.handle_ground_truth)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
self.timer.start(33)
def update(self):
viewXSize = float(self.viewBox.viewRange()[0][1] - self.viewBox.viewRange()[0][0])
viewYSize = float(self.viewBox.viewRange()[1][1] - self.viewBox.viewRange()[1][0])
# given MLE, make view range track pod
xmin, xmax = self.viewBox.viewRange()[0]
borderRatio = 0.4
softxmax = borderRatio*xmin + (1.0-borderRatio)*xmax
softxmin = (1.0-borderRatio)*xmin + borderRatio*xmax
if (self.pod_gt - softxmax >= 0):
xmin += (self.pod_gt - softxmax)*0.25
xmax += (self.pod_gt - softxmax)*0.25
elif (self.pod_gt - softxmin <= 0):
xmin += (self.pod_gt - softxmin)*0.25
xmax += (self.pod_gt - softxmin)*0.25
self.viewBox.setRange(xRange=(xmin, xmax), padding=0.0)
# might need to generate these
viewXSize = xmax - xmin
minScale = 1./10.0
# draw as either 2.0 meters long, or minScale total line length, preserving
# aspect ratio in view pixel
actualViewRatio = self.viewBox.viewPixelSize() # size of one screen pixel in view coords
viewRatioAdjustment = float(actualViewRatio[0]) / float(actualViewRatio[1])
mleDrawLength = max(2.0, viewXSize * minScale)
mleDrawHeight = mleDrawLength * self.img_aspect / viewRatioAdjustment
mleDrawX = self.pod_mle - mleDrawLength / 2.
mleDrawY = viewYSize/10.0 + self.viewBox.viewRange()[1][0] - mleDrawHeight / 2.0
mleDrawRect = QtCore.QRectF(mleDrawX, mleDrawY, mleDrawLength, mleDrawHeight)
self.img_mle.setRect(mleDrawRect)
gtDrawX = self.pod_gt - mleDrawLength / 2.
gtDrawRect = QtCore.QRectF(gtDrawX, mleDrawY, mleDrawLength, mleDrawHeight)
self.img_gt.setRect(gtDrawRect)
for line in self.lines:
# lines must be at least 1 px wide
line.setPen(QtGui.QPen(self.lineColor, max(self.lineWidth, actualViewRatio[0]) , QtCore.Qt.SolidLine))
if len(self.particles) > 0:
weights = np.array([p[2] for p in self.particles])
normalized_weights = weights / np.max(weights)
self.particles_scatter.setData(np.array([p[0][0] for p in self.particles]), 0.5*normalized_weights-0.25, pen=pg.mkPen([0, 125, 255, 150], width=5))
# build up sample points
densityX = np.array([xmin, xmax])
for p in self.particles:
densityX = np.append(densityX, np.arange(p[0][0]-p[1][0][0]*4, p[0][0]+p[1][0][0]*4, max(p[1][0][0]/2, 0.01)))
densityX = np.sort(densityX)
densityY = np.zeros(densityX.shape)
for p in self.particles:
densityY += p[2] * np.exp( - (densityX - p[0][0])**2 / p[1][0][0]**2) / np.sqrt(2 * math.pi * max(p[1][0][0]/2, 0.01)**2)
densityY /= np.max(densityY)*1.5
densityY -= -mleDrawY
self.densityCurve.setData(densityX, densityY)
def handle_state_estimate(self, channel, data):
msg = state_estimator_particle_set.decode(data)
self.pod_mle = msg.particles[0].mu[0]
particles = []
for i in range(msg.n_particles):
if msg.particles[i].id >= 0 and msg.particles[i].weight > 0.:
particles.append([msg.particles[i].mu,
msg.particles[i].Sigma,
msg.particles[i].weight])
self.particles = particles
self.pod_gt = msg.particles[0].mu[0]
def handle_ground_truth(self, channel, data):
msg = floating_base_t.decode(data)
#self.pod_gt = msg.q[0]
if __name__ == '__main__':
# hook up interrupt signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
with open('../config/simConfig.yaml', 'r') as f:
config = yaml.load(f)
lc = create_lcm()
app = QtGui.QApplication(sys.argv)
window = PodTubeVisWidget(config, lc=lc)
window.show()
start_lcm(lc)
sys.exit(app.exec_())
| lgpl-3.0 | 106,389,324,820,216,420 | 36.634259 | 159 | 0.62111 | false |
Azure/WALinuxAgent | tests/protocol/mocks.py | 1 | 9717 | # Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import contextlib
import re
from azurelinuxagent.common.protocol.wire import WireProtocol
from azurelinuxagent.common.utils import restutil
from tests.tools import patch
from tests.protocol import mockwiredata
# regex used to determine whether to use the mock wireserver data
_USE_MOCK_WIRE_DATA_RE = re.compile(
r'https?://(mock-goal-state|{0}).*'.format(restutil.KNOWN_WIRESERVER_IP.replace(r'.', r'\.')), re.IGNORECASE)
@contextlib.contextmanager
def mock_wire_protocol(mock_wire_data_file, http_get_handler=None, http_post_handler=None, http_put_handler=None, fail_on_unknown_request=True):
"""
Creates a WireProtocol object that handles requests to the WireServer and the Host GA Plugin (i.e requests on the WireServer endpoint), plus
some requests to storage (requests on the fake server 'mock-goal-state').
The data returned by those requests is read from the files specified by 'mock_wire_data_file' (which must follow the structure of the data
files defined in tests/protocol/mockwiredata.py).
The caller can also provide handler functions for specific HTTP methods using the http_*_handler arguments. The return value of the handler
function is interpreted similarly to the "return_value" argument of patch(): if it is an exception the exception is raised or, if it is
any object other than None, the value is returned by the mock. If the handler function returns None the call is handled using the mock
wireserver data or passed to the original to restutil.http_request.
The returned protocol object maintains a list of "tracked" urls. When a handler function returns a value than is not None the url for the
request is automatically added to the tracked list. The handler function can add other items to this list using the track_url() method on
the mock.
The return value of this function is an instance of WireProtocol augmented with these properties/methods:
* mock_wire_data - the WireProtocolData constructed from the mock_wire_data_file parameter.
* start() - starts the patchers for http_request and CryptUtil
* stop() - stops the patchers
* track_url(url) - adds the given item to the list of tracked urls.
* get_tracked_urls() - returns the list of tracked urls.
NOTE: This function patches common.utils.restutil.http_request and common.protocol.wire.CryptUtil; you need to be aware of this if your
tests patch those methods or others in the call stack (e.g. restutil.get, resutil._http_request, etc)
"""
tracked_urls = []
# use a helper function to keep the HTTP handlers (they need to be modified by set_http_handlers() and
# Python 2.* does not support nonlocal declarations)
def http_handlers(get, post, put):
http_handlers.get = get
http_handlers.post = post
http_handlers.put = put
del tracked_urls[:]
http_handlers(get=http_get_handler, post=http_post_handler, put=http_put_handler)
#
# function used to patch restutil.http_request
#
original_http_request = restutil.http_request
def http_request(method, url, data, **kwargs):
# if there is a handler for the request, use it
handler = None
if method == 'GET':
handler = http_handlers.get
elif method == 'POST':
handler = http_handlers.post
elif method == 'PUT':
handler = http_handlers.put
if handler is not None:
if method == 'GET':
return_value = handler(url, **kwargs)
else:
return_value = handler(url, data, **kwargs)
if return_value is not None:
tracked_urls.append(url)
if isinstance(return_value, Exception):
raise return_value
return return_value
# if the request was not handled try to use the mock wireserver data
if _USE_MOCK_WIRE_DATA_RE.match(url) is not None:
if method == 'GET':
return protocol.mock_wire_data.mock_http_get(url, **kwargs)
if method == 'POST':
return protocol.mock_wire_data.mock_http_post(url, data, **kwargs)
if method == 'PUT':
return protocol.mock_wire_data.mock_http_put(url, data, **kwargs)
# the request was not handled; fail or call the original resutil.http_request
if fail_on_unknown_request:
raise ValueError('Unknown HTTP request: {0} [{1}]'.format(url, method))
return original_http_request(method, url, data, **kwargs)
#
# functions to start/stop the mocks
#
def start():
patched = patch("azurelinuxagent.common.utils.restutil.http_request", side_effect=http_request)
patched.start()
start.http_request_patch = patched
patched = patch("azurelinuxagent.common.protocol.wire.CryptUtil", side_effect=protocol.mock_wire_data.mock_crypt_util)
patched.start()
start.crypt_util_patch = patched
start.http_request_patch = None
start.crypt_util_patch = None
def stop():
if start.crypt_util_patch is not None:
start.crypt_util_patch.stop()
if start.http_request_patch is not None:
start.http_request_patch.stop()
#
# create the protocol object
#
protocol = WireProtocol(restutil.KNOWN_WIRESERVER_IP)
protocol.mock_wire_data = mockwiredata.WireProtocolData(mock_wire_data_file)
protocol.start = start
protocol.stop = stop
protocol.track_url = lambda url: tracked_urls.append(url) # pylint: disable=unnecessary-lambda
protocol.get_tracked_urls = lambda: tracked_urls
protocol.set_http_handlers = lambda http_get_handler=None, http_post_handler=None, http_put_handler=None:\
http_handlers(get=http_get_handler, post=http_post_handler, put=http_put_handler)
# go do it
try:
protocol.start()
protocol.detect()
yield protocol
finally:
protocol.stop()
class HttpRequestPredicates(object):
"""
Utility functions to check the urls used by tests
"""
@staticmethod
def is_goal_state_request(url):
return url.lower() == 'http://{0}/machine/?comp=goalstate'.format(restutil.KNOWN_WIRESERVER_IP)
@staticmethod
def is_telemetry_request(url):
return url.lower() == 'http://{0}/machine?comp=telemetrydata'.format(restutil.KNOWN_WIRESERVER_IP)
@staticmethod
def is_health_service_request(url):
return url.lower() == 'http://{0}:80/healthservice'.format(restutil.KNOWN_WIRESERVER_IP)
@staticmethod
def is_in_vm_artifacts_profile_request(url):
return re.match(r'https://.+\.blob\.core\.windows\.net/\$system/.+\.(vmSettings|settings)\?.+', url) is not None
@staticmethod
def _get_host_plugin_request_artifact_location(url, request_kwargs):
if 'headers' not in request_kwargs:
raise ValueError('Host plugin request is missing HTTP headers ({0})'.format(url))
headers = request_kwargs['headers']
if 'x-ms-artifact-location' not in headers:
raise ValueError('Host plugin request is missing the x-ms-artifact-location header ({0})'.format(url))
return headers['x-ms-artifact-location']
@staticmethod
def is_host_plugin_health_request(url):
return url.lower() == 'http://{0}:{1}/health'.format(restutil.KNOWN_WIRESERVER_IP, restutil.HOST_PLUGIN_PORT)
@staticmethod
def is_host_plugin_extension_artifact_request(url):
return url.lower() == 'http://{0}:{1}/extensionartifact'.format(restutil.KNOWN_WIRESERVER_IP, restutil.HOST_PLUGIN_PORT)
@staticmethod
def is_host_plugin_status_request(url):
return url.lower() == 'http://{0}:{1}/status'.format(restutil.KNOWN_WIRESERVER_IP, restutil.HOST_PLUGIN_PORT)
@staticmethod
def is_host_plugin_extension_request(request_url, request_kwargs, extension_url):
if not HttpRequestPredicates.is_host_plugin_extension_artifact_request(request_url):
return False
artifact_location = HttpRequestPredicates._get_host_plugin_request_artifact_location(request_url, request_kwargs)
return artifact_location == extension_url
@staticmethod
def is_host_plugin_in_vm_artifacts_profile_request(url, request_kwargs):
if not HttpRequestPredicates.is_host_plugin_extension_artifact_request(url):
return False
artifact_location = HttpRequestPredicates._get_host_plugin_request_artifact_location(url, request_kwargs)
return HttpRequestPredicates.is_in_vm_artifacts_profile_request(artifact_location)
@staticmethod
def is_host_plugin_put_logs_request(url):
return url.lower() == 'http://{0}:{1}/vmagentlog'.format(restutil.KNOWN_WIRESERVER_IP,
restutil.HOST_PLUGIN_PORT)
class MockHttpResponse:
def __init__(self, status, body=''):
self.body = body
self.status = status
def read(self, *_):
return self.body
| apache-2.0 | -2,802,583,881,621,370,000 | 43.573394 | 144 | 0.678605 | false |
Hanaasagi/sorator | orator/query/grammars/postgres_grammar.py | 1 | 4336 | # -*- coding: utf-8 -*-
from .grammar import QueryGrammar
class PostgresQueryGrammar(QueryGrammar):
_operators = [
'=', '<', '>', '<=', '>=', '<>', '!=',
'like', 'not like', 'between', 'ilike',
'&', '|', '#', '<<', '>>'
]
marker = '%s'
def _compile_lock(self, query, value):
"""
Compile the lock into SQL
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param value: The lock value
:type value: bool or str
:return: The compiled lock
:rtype: str
"""
if isinstance(value, str):
return value
if value:
return 'FOR UPDATE'
return 'FOR SHARE'
def compile_update(self, query, values):
"""
Compile an update statement into SQL
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The update values
:type values: dict
:return: The compiled update
:rtype: str
"""
table = self.wrap_table(query.from__)
columns = self._compile_update_columns(values)
from_ = self._compile_update_from(query)
where = self._compile_update_wheres(query)
return ('UPDATE %s SET %s%s %s' %
(table, columns, from_, where)).strip()
def _compile_update_columns(self, values):
"""
Compile the columns for the update statement
:param values: The columns
:type values: dict
:return: The compiled columns
:rtype: str
"""
columns = []
for key, value in values.items():
columns.append('%s = %s' % (self.wrap(key), self.parameter(value)))
return ', '.join(columns)
def _compile_update_from(self, query):
"""
Compile the "from" clause for an update with a join.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:return: The compiled sql
:rtype: str
"""
if not query.joins:
return ''
froms = []
for join in query.joins:
froms.append(self.wrap_table(join.table))
if len(froms):
return ' FROM %s' % ', '.join(froms)
return ''
def _compile_update_wheres(self, query):
"""
Compile the additional where clauses for updates with joins.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:return: The compiled sql
:rtype: str
"""
base_where = self._compile_wheres(query)
if not query.joins:
return base_where
join_where = self._compile_update_join_wheres(query)
if not base_where.strip():
return 'WHERE %s' % self._remove_leading_boolean(join_where)
return '%s %s' % (base_where, join_where)
def _compile_update_join_wheres(self, query):
"""
Compile the "join" clauses for an update.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:return: The compiled sql
:rtype: str
"""
join_wheres = []
for join in query.joins:
for clause in join.clauses:
join_wheres.append(self._compile_join_constraints(clause))
return ' '.join(join_wheres)
def compile_insert_get_id(self, query, values, sequence=None):
"""
Compile an insert and get ID statement into SQL.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The values to insert
:type values: dict
:param sequence: The id sequence
:type sequence: str
:return: The compiled statement
:rtype: str
"""
if sequence is None:
sequence = 'id'
return '%s RETURNING %s'\
% (self.compile_insert(query, values), self.wrap(sequence))
def compile_truncate(self, query):
"""
Compile a truncate table statement into SQL.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:return: The compiled statement
:rtype: str
"""
return {
'TRUNCATE %s RESTART IDENTITY' % self.wrap_table(query.from__): {}
}
| mit | 5,993,220,880,501,831,000 | 23.777143 | 79 | 0.545895 | false |
peckhams/topoflow | topoflow/utils/ncts_files.py | 1 | 27069 |
# This new verion (6/11/10) hasn't been tested yet.
# Can't run unit tests on my MacPro w/o Nio.
#---------------------------------------------------
# S.D. Peckham
# Sept 2014 (new version to use netCDF4)
# May, June 2010
import os
import sys
import time
import numpy as np
import file_utils
import netCDF4 as nc
#-------------------------------------------------------------------
# This class is for I/O of time series data to netCDF files.
#-------------------------------------------------------------------
#
# unit_test1()
# unit_test2()
# save_as_text() # (not ready yet)
#
# class ncts_file():
#
# import_netCDF4()
# open_file()
# get_dtype_map()
# open_new_file()
# update_time_index()
#-----------------------------
# add_value()
# get_value()
#-----------------------------
# values_at_IDs()
# add_values_at_IDs()
#-----------------------------
# add_series()
# get_series()
#-----------------------------
# close_file()
# close()
#
#-------------------------------------------------------------------
def unit_test1(n_values=10, VERBOSE=False,
file_name="NCTS_Series_Test.nc"):
#--------------------------------------------------------
# Notes: This test uses add_value() and get_value() to
# add and retrieve a time series to/from a file,
# one value at a time.
#--------------------------------------------------------
print ' '
print 'Running unit_test1()...'
#-------------------------------------
# Make instance of ncts_file() class
#-------------------------------------
ncts = ncts_file()
var_names = ['depth']
OK = ncts.open_new_file( file_name,
var_names=var_names,
long_names=["depth of water"],
units_names=["meters"],
dtypes=['float32'],
comment="Created by TopoFlow 3.0.")
## time_long_name='time',
## time_units_name="minutes")
###########################################################
# WHAT ABOUT UNITS AND LONG_NAME for the TIME VALUES ??
###########################################################
if not(OK):
print 'ERROR during open_new_file().'
return
series = np.sqrt(np.arange( n_values, dtype='Float32'))
times = np.arange( n_values, dtype='Float32') * 60.0
#--------------------------
# Add time series to file
#--------------------------
print 'Writing values to NCTS file...'
for time_index in xrange(n_values):
time = times[ time_index ]
value = series[ time_index ]
ncts.add_value( value, var_names[0], time )
#----------------------------------------
ncts.update_time_index()
if (VERBOSE):
print self.ncts_unit # (print a summary)
ncts.close_file()
print 'Finished writing ncts file: ' + file_name
print ' '
#--------------------------------------------
# Re-open the file and read the time series
#--------------------------------------------
OK = ncts.open_file( file_name )
if not(OK): return
print 'Reading values from ncts file: '
for time_index in xrange(n_values):
value, time = ncts.get_value(var_names[0], time_index)
ti_str = str(time_index)
t_str = 'time[' + ti_str + '], '
v_str = 'value[' + ti_str + '] = '
print (t_str + v_str), time, value
## print '-----------------------------------------------'
#-----------------
# Close the file
#-----------------
ncts.close_file()
print 'Finished reading ncts file: ' + file_name
print ' '
# unit_test1()
#-------------------------------------------------------------------
def unit_test2(n_values=10, VERBOSE=False,
file_name="NCTS_Series_Test.nc"):
#--------------------------------------------------------
# Notes: This test uses add_series() and get_series() to
# add and retrieve a time series to/from a file,
# all values at once.
#--------------------------------------------------------
print ' '
print 'Running unit_test2()...'
#-------------------------------------
# Make instance of ncts_file() class
#-------------------------------------
ncts = ncts_file()
var_name = "depth"
OK = ncts.open_new_file( file_name,
var_names=[var_name],
long_names=["depth of water"],
units_names=["meters"],
dtypes=['float32'],
time_units='minutes',
comment="Created by TopoFlow 3.0.")
###############################################
# WHAT ABOUT LONG_NAME for the TIME VALUES ??
###############################################
if not(OK):
print 'ERROR during open_new_file().'
return
series = np.sqrt(np.arange( n_values, dtype='Float32'))
times = np.arange( n_values, dtype='Float32') * 60.0
#--------------------------
# Add time series to file
#--------------------------
print 'Writing values to NCTS file...'
ncts.add_series( series, var_names[0], times )
#--------------------------------------------
ncts.update_time_index( step=n_values )
if (VERBOSE):
print self.ncts_unit # (print a summary)
ncts.close_file()
print 'Finished writing ncts file: ' + file_name
print ' '
#--------------------------------------------
# Re-open the file and read the time series
#--------------------------------------------
OK = ncts.open_file( file_name )
if not(OK): return
print 'Reading values from ncts file: '
series, times = ncts.get_series( var_names[0] )
for n in xrange(n_values):
time = times[n]
value = series[n]
ti_str = str(n)
t_str = 'time[' + ti_str + '], '
v_str = 'value[' + ti_str + '] = '
print (t_str + v_str), time, value
## print '-----------------------------------------------'
#-----------------
# Close the file
#-----------------
ncts.close_file()
print 'Finished reading ncts file: ' + file_name
print ' '
# unit_test2()
#-------------------------------------------------------------------
def save_as_text(ncts_file_name=None, text_file_name=None):
ncts = ncts_file()
OK = ncts.open_file( ncts_file_name )
if not(OK): return
var_name = 'H'
data = ncts.get_series( var_name )
ncts.close()
data = np.array( data )
print 'min(data), max(data) =', data.min(), data.max()
text_unit = open( text_file_name, 'w' )
data.tofile( unit ) ###### CHECK THIS #######
text_unit.close()
# save_as_text()
#-------------------------------------------------------------------
class ncts_file():
#----------------------------------------------------------
# Note: ncts = NetCDF Time Series (used by CSDMS)
#----------------------------------------------------------
def import_netCDF4(self):
try:
import netCDF4
# print 'Imported netCDF4 version: ' + netCDF4.__version__
return netCDF4
except:
## print ' '
## print 'SORRY, Cannot write netCDF files because'
## print 'the "netCDF4" package cannot be imported.'
## print ' '
## python_version = sys.version[:3]
## if (python_version != '2.6'):
## print 'Note that "PyNIO" is only installed for'
## print 'Python version 2.6 on "beach".'
## print 'The current Python version is:', python_version
## print ' '
return False
# import_netCDF4()
#----------------------------------------------------------
def open_file(self, file_name):
#-------------------------
# Open file to read only
#-------------------------
try:
ncts_unit = nc.Dataset(file_name, mode='r')
self.ncts_unit = ncts_unit
### return ncts_unit
return True
except:
return False
# open_file()
#----------------------------------------------------------
def get_dtype_map(self):
#----------------------------------------
# Possible settings for "dtype_code"
#----------------------------------------------------
# These two-char codes are used for netCDF4 package
#----------------------------------------------------
# See: http://unidata.github.io/netcdf4-python/
#----------------------------------------------------
dtype_map = {'float64':'f8', 'float32':'f4',
'int64':'i8', 'int32':'i4',
'int16':'i2', 'int8':'i1',
'S|100':'S1'} # ( ????? )
#-------------------------------------------------
# These one-char codes are used for Nio in PyNIO
#-------------------------------------------------
# dtype_code = "d" # (double, Float64)
# dtype_code = "f" # (float, Float32)
# dtype_code = "l" # (long, Int64)
# dtype_code = "i" # (int, Int32)
# dtype_code = "h" # (short, Int16)
# dtype_code = "b" # (byte, Int8)
# dtype_code = "S1" # (char)
#-------------------------------------------
# dtype_map = {'float64':'d', 'float32':'f',
# 'int64':'l', 'int32':'i',
# 'int16':'s', 'int8':'b',
# 'S|100':'S1'} # (check last entry)
return dtype_map
# get_dtype_map()
#----------------------------------------------------------
def open_new_file(self, file_name,
var_names=['X'],
long_names=[None],
units_names=['None'],
dtypes=['float32'],
### dtypes=['float64'],
time_units='minutes',
comment=''):
#----------------------------
# Does file already exist ?
#----------------------------
file_name = file_utils.check_overwrite( file_name )
#---------------------------------------
# Check and store the time series info
#---------------------------------------
self.format = 'ncts'
self.file_name = file_name
self.time_index = 0
if (long_names[0] is None):
long_names = var_names
#-------------------------------------------
# We may not need to save these in self.
# I don't think they're used anywhere yet.
#-------------------------------------------
self.var_names = var_names
self.long_names = long_names
self.units_names = units_names
self.time_units = time_units
self.dtypes = dtypes
#---------------------------------------------
# Create array of dtype codes from dtypes
# for multiple time series (i.e. columns).
#---------------------------------------------
dtype_map = self.get_dtype_map()
dtype_codes = []
if (len(dtypes) == len(var_names)):
for dtype in dtypes:
dtype_code = dtype_map[ dtype.lower() ]
dtype_codes.append( dtype_code )
else:
dtype = dtypes[0]
dtype_code = dtype_map[ dtype.lower() ]
for k in xrange(len(var_names)):
dtype_codes.append( dtype_code )
self.dtype_codes = dtype_codes
#-------------------------------------
# Open a new netCDF file for writing
#--------------------------------------------------------------
# Format options are: NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET,
# NETCDF3_64BIT_DATA, NETCDF4_CLASSIC, and NETCDF4
#-----------------------------------------------------------------
# NETCDF3_CLASSIC results in MUCH SMALLER filesizes than using
# NETCDF4_CLASSIC or NETCDF4.
# NETCDF3_CLASSIC, June_20_67_0D-Q.nc, 5200 bytes
# NETCDF4_CLASSIC, June_20_67_0D-Q.nc, 4217537 Bytes
# The 2nd one is 811 TIMES BIGGER, even after setting chunksize.
#-----------------------------------------------------------------
# For more info see: http://unidata.github.io/netcdf4-python/
#-----------------------------------------------------------------
# The "nccopy" utility can convert between these formats.
#-----------------------------------------------------------------
try:
format = 'NETCDF3_CLASSIC'
### format = 'NETCDF4'
### format = 'NETCDF4_CLASSIC' # (before 2/19/17)
ncts_unit = nc.Dataset(file_name, mode='w', format=format)
OK = True
except:
OK = False
return OK
#------------------------------------------------------------
# Option to pre-fill with fill values
# Set fill_value for a var with "var._Fill_Value = number"
# For Nio was: opt.PreFill = False # (for efficiency)
#------------------------------------------------------------
ncts_unit.set_fill_off()
# ncts_unit.set_fill_on()
#-------------------------------------
# Prepare and save a history string
#-------------------------------------
# Sample output from time.asctime():
# "Thu Oct 8 17:10:18 2009"
#-------------------------------------
history = "Created using netCDF4 " + nc.__version__ + " on "
history = history + time.asctime() + ". "
history = history + comment
ncts_unit.history = history
#------------------------------------------------
# Create an unlimited time dimension (via None)
#------------------------------------------------
# Without using "int()" for length, we get this:
# TypeError: size must be None or integer
#------------------------------------------------
ncts_unit.createDimension("time", None)
#-------------------------
# Create a time variable
#---------------------------------------------------
#('f' = float32; must match in add_values_at_IDs()
#---------------------------------------------------
# NB! Can't use "time" vs. "tvar" here unless we
# add "import time" inside this function.
#---------------------------------------------------
tvar = ncts_unit.createVariable('time', 'f8', ("time",))
ncts_unit.variables['time'].units = time_units
#-----------------------------------
# Create variables using var_names
#-----------------------------------
# Returns "var" as a PyNIO object
#----------------------------------------------------------
# NB! The 3rd argument here (dimension), must be a tuple.
# If there is only one dimension, then we need to add a
# comma, as shown.
#-----------------------------------------------------------
# (2/19/17) For "0D" netCDF files created by TF, the files
# are much too large with the default chunksize. By using
# chunksizes=[1], filesize for Treynor is reduced by a
# factor of 6.9 (4.25 MB vs. 29.38 MB).
#-----------------------------------------------------------
# But even this is 287.9 times bigger than the TXT file!
#-----------------------------------------------------------
# Default chunksize in NetCDF 4.4.1.1 = 4MB.
#-----------------------------------------------------------
n_vars = len( var_names )
for k in xrange( n_vars ):
var_name = var_names[k]
## var = ncts_unit.createVariable(var_name, dtype_codes[k], ("time",))
## var = ncts_unit.createVariable(var_name, dtype_codes[k], ("time",), chunksizes=[512])
## var = ncts_unit.createVariable(var_name, dtype_codes[k], ("time",), chunksizes=[1])
## var = ncts_unit.createVariable(var_name, dtype_codes[k], ("time",), chunksizes=[4000])
var = ncts_unit.createVariable(var_name, dtype_codes[k], ("time",), chunksizes=[n_vars])
#------------------------------------
# Create attributes of the variable
#------------------------------------
ncts_unit.variables[var_name].long_name = long_names[k]
ncts_unit.variables[var_name].units = units_names[k]
#----------------------------------
# Specify a "nodata" fill value ?
#----------------------------------
# var._Fill_Value = -9999.0 ## Used for pre-fill above ?
self.ncts_unit = ncts_unit
return OK
# open_new_file()
#----------------------------------------------------------
def update_time_index(self, step=1):
#---------------------------------------------------
# We shouldn't update clock in every add_value()
# call because different values (for same time)
# may be written with different add_value() calls.
#---------------------------------------------------
#------------------------------------
# Increment the internal time index
#------------------------------------
self.time_index += step
# update_time_index()
#----------------------------------------------------------
def add_value(self, value, var_name, time=None,
time_index=-1):
#---------------------------------------------------
# Note: "time_index" allows insertion/overwrite
# of a value at a particular location.
#---------------------------------------------------
# This syntax works for scalars and grids
# nc_unit.variables[var_name].assign_value( value )
#---------------------------------------------------
#-------------------------------------
# Can use time_index to overwrite an
# existing grid vs. simple append.
#-------------------------------------
if (time_index == -1):
time_index = self.time_index
if (time is None):
time = np.float64( time_index )
#---------------------------------------
# Write a time to existing netCDF file
#---------------------------------------
times = self.ncts_unit.variables[ 'time' ]
times[ time_index ] = time
#---------------------------------------------
# Write a data value to existing netCDF file
#---------------------------------------------
values = self.ncts_unit.variables[ var_name ]
values[ time_index ] = value
####################################################
# We shouldn't update clock in every add_value()
# call because different values (for same time)
# may be written with different add_value() calls.
####################################################
#------------------------------------
# Increment the internal time index
#------------------------------------
# self.time_index += 1
#-------------------------------------------------
# 12/2/09: netCDF is supposed to take care of
# byteorder transparently. However, we need to
# make sure we don't byteswap in the function
# "model_output.save_value_to_file()" when the
# output format is netCDF.
#-------------------------------------------------
## if (sys.byteorder == 'big'):
## var[time_index] = value
## else:
## value2 = value.copy()
## var[time_index] = value2.byteswap()
## self.time_index += 1
# add_value()
#----------------------------------------------------------
def get_value(self, var_name, time_index):
values = self.ncts_unit.variables[ var_name ]
times = self.ncts_unit.variables[ 'time' ]
return (values[ time_index ], times[ time_index ])
# get_value()
#-------------------------------------------------------------------
def values_at_IDs(self, var, IDs):
#----------------------------------------------------------
# Notes: If "var" is a grid, subscript with self.IDs to
# get a 1D array of values. If "var" is scalar,
# return a vector with the scalar value repeated
# once for each ID in self.IDs.
#----------------------------------------------------------
#---------------------------------
# Is variable a grid or scalar ?
#---------------------------------
if (np.ndim(var) > 0):
return np.float32( var[ IDs ] )
else:
#-----------------------------------------------------
# (3/16/07) Bug fix. This gets used in case of q0,
# which is a scalar when INFIL_ALL_SCALARS is true.
# Without this, don't get a value for every ID.
#-----------------------------------------------------
n_IDs = np.size(IDs[0])
vector = np.zeros( n_IDs, dtype='Float32')
return (vector + np.float32(var))
# values_at_IDs()
#-------------------------------------------------------------------
def add_values_at_IDs(self, time, var, var_name, IDs,
time_index=-1):
#---------------------------------------------------
# Note: Here "var" is typically a grid and IDs are
# (row,col) subscripts into the grid. A set
# of variable names are constructed from the
# actual "var_name" (e.g. "Q") and the
# row and column. Note that we must have
# called open_new_file() with these same
# var_names.
#---------------------------------------------------
# Note: "time_index" allows insertion/overwrite
# of a value at a particular location.
#---------------------------------------------------
# This syntax works for scalars and grids
# nc_unit.variables[var_name].assign_value( value )
#---------------------------------------------------
#-------------------------------------
# Can use time_index to overwrite an
# existing grid vs. simple append.
#-------------------------------------
if (time_index == -1):
time_index = self.time_index
#---------------------------------------------
# Write current time to existing netCDF file
#---------------------------------------------
times = self.ncts_unit.variables[ 'time' ]
times[ time_index ] = time
#--------------------------------------------
# Write data values to existing netCDF file
#--------------------------------------------
vals = self.values_at_IDs( var, IDs )
rows = IDs[0]
cols = IDs[1]
n_IDs = np.size(rows)
for k in xrange(n_IDs):
#----------------------------------------
# Construct var_name of form: Q[24,32]
# or, if necessary, Q_24_32
#----------------------------------------
row_str = '_' + str(rows[k])
col_str = '_' + str(cols[k])
#--------------------------------------------------
# Must match with model_output.open_new_ts_file()
#--------------------------------------------------
## row_str = '[' + str(rows[k]) + ','
## col_str = str(cols[k]) + ']'
vname = var_name + row_str + col_str
values = self.ncts_unit.variables[ vname ]
values[ time_index ] = vals[k]
#---------------------------
# Increment the time index
#---------------------------
self.time_index += 1
# add_values_at_IDs()
#-------------------------------------------------------------------
def add_series(self, values, var_name, times,
time_index=-1):
#-----------------------------------------------------
# Note: "time_index" allows insertion/overwrite
# of a time series at a particular location.
#-----------------------------------------------------
# This syntax works for scalars and grids
# nc_unit.variables[var_name].assign_value( values )
#-----------------------------------------------------
#-------------------------------------
# Can use time_index to overwrite an
# existing grid vs. simple append.
#-------------------------------------
if (time_index == -1):
time_index = self.time_index
#---------------------------------------------
# Write a data value to existing netCDF file
#---------------------------------------------
series = self.ncts_unit.variables[ var_name ]
series[:] = values
######################################################
# WE SHOULDN'T update clock in every add_value()
# call because different vars (e.g. the time)
# must be written with different add_value() calls.
######################################################
#------------------------------------
# Increment the internal time index
#------------------------------------
# self.time_index += np.size(values)
# add_series()
#----------------------------------------------------------
def get_series(self, var_name):
series = self.ncts_unit.variables[ var_name ]
times = self.ncts_unit.variables[ 'time' ]
return (series, times)
# get_series()
#-------------------------------------------------------------------
def close_file(self):
# self.ncts_unit.sync() ## (netCDF4 has no "flush")
self.ncts_unit.close()
# close_file()
#-------------------------------------------------------------------
def close(self):
# self.ncts_unit.sync() ## (netCDF4 has no "flush")
self.ncts_unit.close()
# close()
#-------------------------------------------------------------------
| mit | 8,897,951,236,339,802,000 | 38.690616 | 101 | 0.367468 | false |
compiteing/flask-ponypermission | venv/lib/python2.7/site-packages/pony/orm/tests/test_relations_one2one1.py | 1 | 4536 | from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
db = Database('sqlite', ':memory:')
class Male(db.Entity):
name = Required(unicode)
wife = Optional('Female', column='wife')
class Female(db.Entity):
name = Required(unicode)
husband = Optional('Male')
db.generate_mapping(create_tables=True)
class TestOneToOne(unittest.TestCase):
def setUp(self):
with db_session:
db.execute('delete from male')
db.execute('delete from female')
db.insert(Female, id=1, name='F1')
db.insert(Female, id=2, name='F2')
db.insert(Female, id=3, name='F3')
db.insert(Male, id=1, name='M1', wife=1)
db.insert(Male, id=2, name='M2', wife=2)
db.insert(Male, id=3, name='M3', wife=None)
db_session.__enter__()
def tearDown(self):
db_session.__exit__()
def test_1(self):
Male[3].wife = Female[3]
self.assertEqual(Male[3]._vals_[Male.wife], Female[3])
self.assertEqual(Female[3]._vals_[Female.husband], Male[3])
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([1, 2, 3], wives)
def test_2(self):
Female[3].husband = Male[3]
self.assertEqual(Male[3]._vals_[Male.wife], Female[3])
self.assertEqual(Female[3]._vals_[Female.husband], Male[3])
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([1, 2, 3], wives)
def test_3(self):
Male[1].wife = None
self.assertEqual(Male[1]._vals_[Male.wife], None)
self.assertEqual(Female[1]._vals_[Female.husband], None)
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([None, 2, None], wives)
def test_4(self):
Female[1].husband = None
self.assertEqual(Male[1]._vals_[Male.wife], None)
self.assertEqual(Female[1]._vals_[Female.husband], None)
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([None, 2, None], wives)
def test_5(self):
Male[1].wife = Female[3]
self.assertEqual(Male[1]._vals_[Male.wife], Female[3])
self.assertEqual(Female[1]._vals_[Female.husband], None)
self.assertEqual(Female[3]._vals_[Female.husband], Male[1])
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([3, 2, None], wives)
def test_6(self):
Female[3].husband = Male[1]
self.assertEqual(Male[1]._vals_[Male.wife], Female[3])
self.assertEqual(Female[1]._vals_[Female.husband], None)
self.assertEqual(Female[3]._vals_[Female.husband], Male[1])
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([3, 2, None], wives)
def test_7(self):
Male[1].wife = Female[2]
self.assertEqual(Male[1]._vals_[Male.wife], Female[2])
self.assertEqual(Male[2]._vals_[Male.wife], None)
self.assertEqual(Female[1]._vals_[Female.husband], None)
self.assertEqual(Female[2]._vals_[Female.husband], Male[1])
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([2, None, None], wives)
def test_8(self):
Female[2].husband = Male[1]
self.assertEqual(Male[1]._vals_[Male.wife], Female[2])
self.assertEqual(Male[2]._vals_[Male.wife], None)
self.assertEqual(Female[1]._vals_[Female.husband], None)
self.assertEqual(Female[2]._vals_[Female.husband], Male[1])
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([2, None, None], wives)
def test_to_dict_1(self):
m = Male[1]
d = m.to_dict()
self.assertEqual(d, dict(id=1, name='M1', wife=1))
def test_to_dict_2(self):
m = Male[3]
d = m.to_dict()
self.assertEqual(d, dict(id=3, name='M3', wife=None))
def test_to_dict_3(self):
f = Female[1]
d = f.to_dict()
self.assertEqual(d, dict(id=1, name='F1', husband=1))
def test_to_dict_4(self):
f = Female[3]
d = f.to_dict()
self.assertEqual(d, dict(id=3, name='F3', husband=None))
if __name__ == '__main__':
unittest.main()
| mit | -6,078,875,790,468,676,000 | 31.850746 | 67 | 0.564374 | false |
jianghuaw/nova | nova/tests/unit/api/openstack/compute/test_block_device_mapping.py | 1 | 17244 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import range
from webob import exc
from nova.api.openstack.compute import block_device_mapping
from nova.api.openstack.compute import servers as servers_v21
from nova import block_device
from nova.compute import api as compute_api
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
CONF = cfg.CONF
class BlockDeviceMappingTestV21(test.TestCase):
validation_error = exception.ValidationError
def _setup_controller(self):
self.controller = servers_v21.ServersController()
def setUp(self):
super(BlockDeviceMappingTestV21, self).setUp()
fakes.stub_out_nw_api(self)
self._setup_controller()
fake.stub_out_image_service(self)
self.bdm = [{
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'uuid': 'fake',
'device_name': 'vdb',
'delete_on_termination': False,
}]
def _get_servers_body(self, no_image=False):
body = {
'server': {
'name': 'server_test',
'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'flavorRef': 'http://localhost/123/flavors/3',
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
if no_image:
del body['server']['imageRef']
return body
def _test_create(self, params, no_image=False):
body = self._get_servers_body(no_image)
body['server'].update(params)
req = fakes.HTTPRequest.blank('/v2/fake/servers')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
self.controller.create(req, body=body).obj['server']
def test_create_instance_with_volumes_enabled_no_image(self):
"""Test that the create will fail if there is no image
and no bdms supplied in the request
"""
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('imageRef', kwargs)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self.assertRaises(exc.HTTPBadRequest,
self._test_create, {}, no_image=True)
@mock.patch.object(compute_api.API, '_validate_bdm')
@mock.patch.object(compute_api.API, '_get_bdm_image_metadata')
def test_create_instance_with_bdms_and_no_image(
self, mock_bdm_image_metadata, mock_validate_bdm):
mock_bdm_image_metadata.return_value = {}
mock_validate_bdm.return_value = True
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertThat(
block_device.BlockDeviceDict(self.bdm[0]),
matchers.DictMatches(kwargs['block_device_mapping'][0])
)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self._test_create(params, no_image=True)
mock_validate_bdm.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, mock.ANY)
mock_bdm_image_metadata.assert_called_once_with(
mock.ANY, mock.ANY, False)
@mock.patch.object(compute_api.API, '_validate_bdm')
@mock.patch.object(compute_api.API, '_get_bdm_image_metadata')
def test_create_instance_with_bdms_and_empty_imageRef(
self, mock_bdm_image_metadata, mock_validate_bdm):
mock_bdm_image_metadata.return_value = {}
mock_validate_bdm.return_value = True
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertThat(
block_device.BlockDeviceDict(self.bdm[0]),
matchers.DictMatches(kwargs['block_device_mapping'][0])
)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm,
'imageRef': ''}
self._test_create(params)
def test_create_instance_with_imageRef_as_full_url(self):
bdm = [{'device_name': 'foo'}]
image_href = ('http://localhost/v2/fake/images/'
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
params = {block_device_mapping.ATTRIBUTE_NAME: bdm,
'imageRef': image_href}
self.assertRaises(exception.ValidationError,
self._test_create, params)
def test_create_instance_with_non_uuid_imageRef(self):
bdm = [{'device_name': 'foo'}]
params = {block_device_mapping.ATTRIBUTE_NAME: bdm,
'imageRef': '123123abcd'}
self.assertRaises(exception.ValidationError,
self._test_create, params)
def test_create_instance_with_invalid_bdm_in_2nd_dict(self):
bdm_1st = {"source_type": "image", "delete_on_termination": True,
"boot_index": 0,
"uuid": "2ff3a1d3-ed70-4c3f-94ac-941461153bc0",
"destination_type": "local"}
bdm_2nd = {"source_type": "volume",
"uuid": "99d92140-3d0c-4ea5-a49c-f94c38c607f0",
"destination_type": "invalid"}
bdm = [bdm_1st, bdm_2nd]
params = {block_device_mapping.ATTRIBUTE_NAME: bdm,
'imageRef': '2ff3a1d3-ed70-4c3f-94ac-941461153bc0'}
self.assertRaises(exception.ValidationError,
self._test_create, params)
def test_create_instance_with_boot_index_none_ok(self):
"""Tests creating a server with two block devices. One is the boot
device and the other is a non-bootable device.
"""
# From the docs:
# To disable a device from booting, set the boot index to a negative
# value or use the default boot index value, which is None. The
# simplest usage is, set the boot index of the boot device to 0 and use
# the default boot index value, None, for any other devices.
bdms = [
# This is the bootable device that would create a 20GB cinder
# volume from the given image.
{
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
'uuid': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'volume_size': 20
},
# This is the non-bootable 10GB ext4 ephemeral block device.
{
'source_type': 'blank',
'destination_type': 'local',
'boot_index': None,
# If 'guest_format' is 'swap' then a swap device is created.
'guest_format': 'ext4'
}
]
params = {block_device_mapping.ATTRIBUTE_NAME: bdms}
self._test_create(params, no_image=True)
def test_create_instance_with_boot_index_none_image_local_fails(self):
"""Tests creating a server with a local image-based block device which
has a boot_index of None which is invalid.
"""
bdms = [{
'source_type': 'image',
'destination_type': 'local',
'boot_index': None,
'uuid': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
}]
params = {block_device_mapping.ATTRIBUTE_NAME: bdms}
self.assertRaises(exc.HTTPBadRequest, self._test_create,
params, no_image=True)
def test_create_instance_with_invalid_boot_index(self):
bdm = [{"source_type": "image", "delete_on_termination": True,
"boot_index": 'invalid',
"uuid": "2ff3a1d3-ed70-4c3f-94ac-941461153bc0",
"destination_type": "local"}]
params = {block_device_mapping.ATTRIBUTE_NAME: bdm,
'imageRef': '2ff3a1d3-ed70-4c3f-94ac-941461153bc0'}
self.assertRaises(exception.ValidationError,
self._test_create, params)
def test_create_instance_with_device_name_not_string(self):
self.bdm[0]['device_name'] = 123
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_bdm_param_not_list(self, mock_create):
self.params = {'block_device_mapping': '/dev/vdb'}
self.assertRaises(self.validation_error,
self._test_create, self.params)
def test_create_instance_with_device_name_empty(self):
self.bdm[0]['device_name'] = ''
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
def test_create_instance_with_device_name_too_long(self):
self.bdm[0]['device_name'] = 'a' * 256
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
def test_create_instance_with_space_in_device_name(self):
self.bdm[0]['device_name'] = 'v da'
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertTrue(kwargs['legacy_bdm'])
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
def test_create_instance_with_invalid_size(self):
self.bdm[0]['volume_size'] = 'hello world'
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
def _test_create_instance_with_destination_type_error(self,
destination_type):
self.bdm[0]['destination_type'] = destination_type
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(self.validation_error,
self._test_create, params, no_image=True)
def test_create_instance_with_destination_type_empty_string(self):
self._test_create_instance_with_destination_type_error('')
def test_create_instance_with_invalid_destination_type(self):
self._test_create_instance_with_destination_type_error('fake')
@mock.patch.object(compute_api.API, '_validate_bdm')
def test_create_instance_bdm(self, mock_validate_bdm):
bdm = [{
'source_type': 'volume',
'device_name': 'fake_dev',
'uuid': 'fake_vol'
}]
bdm_expected = [{
'source_type': 'volume',
'device_name': 'fake_dev',
'volume_id': 'fake_vol'
}]
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertFalse(kwargs['legacy_bdm'])
for expected, received in zip(bdm_expected,
kwargs['block_device_mapping']):
self.assertThat(block_device.BlockDeviceDict(expected),
matchers.DictMatches(received))
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
self._test_create(params, no_image=True)
mock_validate_bdm.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY)
@mock.patch.object(compute_api.API, '_validate_bdm')
def test_create_instance_bdm_missing_device_name(self, mock_validate_bdm):
del self.bdm[0]['device_name']
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertFalse(kwargs['legacy_bdm'])
self.assertNotIn(None,
kwargs['block_device_mapping'][0]['device_name'])
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self._test_create(params, no_image=True)
mock_validate_bdm.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY)
@mock.patch.object(
block_device.BlockDeviceDict, '_validate',
side_effect=exception.InvalidBDMFormat(details='Wrong BDM'))
def test_create_instance_bdm_validation_error(self, mock_validate):
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params, no_image=True)
@mock.patch('nova.compute.api.API._get_bdm_image_metadata')
def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
self.assertRaises(exc.HTTPBadRequest, self._test_create, params,
no_image=True)
def test_create_instance_bdm_api_validation_fails(self):
self.validation_fail_test_validate_called = False
self.validation_fail_instance_destroy_called = False
bdm_exceptions = ((exception.InvalidBDMSnapshot, {'id': 'fake'}),
(exception.InvalidBDMVolume, {'id': 'fake'}),
(exception.InvalidBDMImage, {'id': 'fake'}),
(exception.InvalidBDMBootSequence, {}),
(exception.InvalidBDMLocalsLimit, {}))
ex_iter = iter(bdm_exceptions)
def _validate_bdm(*args, **kwargs):
self.validation_fail_test_validate_called = True
ex, kargs = next(ex_iter)
raise ex(**kargs)
def _instance_destroy(*args, **kwargs):
self.validation_fail_instance_destroy_called = True
self.stub_out('nova.compute.api.API._validate_bdm', _validate_bdm)
self.stub_out('nova.objects.Instance.destroy', _instance_destroy)
for _unused in range(len(bdm_exceptions)):
params = {block_device_mapping.ATTRIBUTE_NAME:
[self.bdm[0].copy()]}
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params)
self.assertTrue(self.validation_fail_test_validate_called)
self.assertFalse(self.validation_fail_instance_destroy_called)
self.validation_fail_test_validate_called = False
self.validation_fail_instance_destroy_called = False
| apache-2.0 | -440,329,227,146,488,260 | 39.478873 | 79 | 0.587799 | false |
math-a3k/django-ai | tests/apps/bayesian_networks/test_bns.py | 1 | 27510 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-ai
------------
Tests for `django-ai` models module.
"""
import random
import numpy as np
from bayespy.nodes import Gaussian
from django.test import (TestCase, )
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
# from django.urls import reverse
from django.contrib.auth.models import User
from django_ai.bayesian_networks import models
from django_ai.bayesian_networks.bayespy_constants import (
DIST_GAUSSIAN_ARD, DIST_GAMMA, DIST_GAUSSIAN, DET_ADD,
DIST_DIRICHLET, DIST_WISHART, DIST_CATEGORICAL, DIST_MIXTURE, )
from django_ai.bayesian_networks.utils import (
parse_node_args, mahalanobis_distance, )
from tests.test_models import models as test_models
class TestBN(TestCase):
def setUp(self):
# Set the seeds
random.seed(123456)
np.random.seed(123456)
# Set up the user
self.user, _ = User.objects.get_or_create(
username='testadmin', email='[email protected]',
is_superuser=True
)
self.user.set_password("12345")
self.user.save()
self.client.login(username='testadmin', password='12345')
# BN 1
self.bn1, _ = models.BayesianNetwork.objects.get_or_create(
name="BN for tests - 1")
self.mu, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn1,
name="mu",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_GAUSSIAN_ARD,
distribution_params="0, 1e-6",
graph_interval="-10, 20"
)
self.tau, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn1,
name="tau",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_GAMMA,
distribution_params="1e-6, 1e-6",
graph_interval="1e-6, 0.1"
)
self.ui_avg1, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn1,
name="userinfo.avg1",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=True,
distribution=DIST_GAUSSIAN_ARD,
distribution_params="mu, tau",
)
self.ui_avg1_col, _ = \
models.BayesianNetworkNodeColumn.objects.get_or_create(
node=self.ui_avg1,
ref_model=ContentType.objects.get(model="userinfo",
app_label="test_models"),
ref_column="avg1",
)
self.e1, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn1,
description="mu -> userinfo.avg1",
parent=self.mu,
child=self.ui_avg1
)
self.e2, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn1,
description="tau -> userinfo.avg1",
parent=self.tau,
child=self.ui_avg1
)
# BN 2
self.bn2, _ = models.BayesianNetwork.objects.get_or_create(
name="BN for tests - 2")
self.x1, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn2,
name="x1",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_GAUSSIAN,
distribution_params="[0, 0], [[1, 0], [0,1]]",
)
self.x2, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn2,
name="x2",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_GAUSSIAN,
distribution_params="[1, 1], [[1, 0], [0,1]]",
)
self.z, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn2,
name="z",
node_type=models.BayesianNetworkNode.NODE_TYPE_DETERMINISTIC,
is_observable=False,
deterministic=DET_ADD,
deterministic_params="x1, x2",
)
self.bn2e1, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn2,
description="x1 -> z",
parent=self.x1,
child=self.z
)
self.bn2e2, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn2,
description="x2 -> z",
parent=self.x2,
child=self.z
)
# BN 3 (Clustering)
self.bn3, _ = models.BayesianNetwork.objects.get_or_create(
name="Clustering (testing)",
network_type=models.BayesianNetwork.BN_TYPE_CLUSTERING,
engine_meta_iterations=10,
results_storage="dmf:test_models.userinfo.cluster_1",
counter_threshold=2,
threshold_actions=":recalculate",
)
self.alpha, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn3,
name="alpha",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_DIRICHLET,
distribution_params="numpy.full(10, 1e-05)",
)
self.Z, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn3,
name="Z",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_CATEGORICAL,
distribution_params="alpha, plates=(:dl_Y, ), :ifr",
)
self.mu_c, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn3,
name="mu",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_GAUSSIAN,
distribution_params=("numpy.zeros(2), [[1e-5,0], [0, 1e-5]], "
"plates=(10, )"),
)
self.Lambda, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn3,
name="Lambda",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=False,
distribution=DIST_WISHART,
distribution_params="2, [[1e-5,0], [0, 1e-5]], plates=(10, )",
)
self.Y, _ = models.BayesianNetworkNode.objects.get_or_create(
network=self.bn3,
name="Y",
node_type=models.BayesianNetworkNode.NODE_TYPE_STOCHASTIC,
is_observable=True,
distribution=DIST_MIXTURE,
distribution_params=("Z, @bayespy.nodes.Gaussian(), "
"mu, Lambda, :noplates"),
)
#
self.Y_col_avg_logged, _ = \
models.BayesianNetworkNodeColumn.objects.get_or_create(
node=self.Y,
ref_model=ContentType.objects.get(
model="userinfo", app_label="test_models"),
ref_column="avg_time_pages"
)
self.Y_col_avg_pages_a, _ = \
models.BayesianNetworkNodeColumn.objects.get_or_create(
node=self.Y,
ref_model=ContentType.objects.get(
model="userinfo", app_label="test_models"),
ref_column="avg_time_pages_a"
)
#
self.alpha_to_Z, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn3,
description="alpha -> Z",
parent=self.alpha,
child=self.Z
)
self.Z_to_Y, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn3,
description="Z -> Y",
parent=self.Z,
child=self.Y
)
self.mu_to_Y, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn3,
description="mu -> Y",
parent=self.mu_c,
child=self.Y
)
self.Lambda_to_Y, _ = models.BayesianNetworkEdge.objects.get_or_create(
network=self.bn3,
description="Lambda -> Y",
parent=self.Lambda,
child=self.Y
)
def test_bn_inference(self):
self.bn1.perform_inference(recalculate=True)
Q = self.bn1.engine_object
mu = Q['mu'].get_moments()[0]
tau = Q['tau'].get_moments()[0]
# For avoiding rounding and float differences
self.assertEqual(str(mu)[:5], '9.809')
self.assertEqual(str(tau)[:5], '0.039')
def test_bn_cached_eo(self):
self.bn1.get_engine_object()
expected_output = self.bn1.engine_object
actual_output = self.bn1.get_engine_object()
self.assertEqual(expected_output, actual_output)
def test_ww_bn_reset_inference(self):
"""
Django parallel test running has issues, the 'ww' in the test name
is to make it run it at the end where no problems arise
"""
self.setUp()
expected_clean_metadata = {
"clusters_labels": {},
"prev_clusters_labels": {},
"clusters_means": {},
"prev_clusters_means": {},
"clusters_sizes": {},
"prev_clusters_sizes": {},
"columns": [],
}
# Avoid unneccesary calculation
self.bn3.engine_meta_iterations = 1
#
self.bn3.perform_inference()
self.assertTrue(self.bn3.engine_object is not None)
self.assertTrue(self.bn3.engine_object_timestamp is not None)
self.assertTrue(self.bn3.metadata != expected_clean_metadata)
results = test_models.UserInfo.objects.values_list(
"cluster_1", flat=True)
self.assertTrue(any(list(results)))
self.bn3.reset_inference()
self.assertTrue(self.bn3.engine_object is None)
self.assertTrue(self.bn3.engine_object_timestamp is None)
self.assertTrue(self.bn3.metadata == expected_clean_metadata)
results = test_models.UserInfo.objects.values_list(
"cluster_1", flat=True)
self.assertTrue(not any(list(results)))
def test_bn_deterministic_nodes(self):
# Initialize the EO
self.bn2.get_engine_object(reconstruct=True, save=True)
self.z.refresh_from_db()
z_eo = self.z.get_engine_object()
expected_moments = [np.array([1., 1.]),
np.array([[3., 1.], [1., 3.]])]
moments = z_eo.get_moments()
self.assertTrue(all(expected_moments[0] == moments[0]))
self.assertTrue(all(expected_moments[1][0] == moments[1][0]))
self.assertTrue(all(expected_moments[1][1] == moments[1][1]))
def test_bn_validation(self):
# Test invalid syntax
with self.assertRaises(ValidationError):
self.bn3.results_storage = "drf-examples.models.blabla"
self.bn3.full_clean()
# Test invalid engine
with self.assertRaises(ValidationError):
self.bn3.results_storage = "drf:examples.models.blabla"
self.bn3.full_clean()
# Test 'dfm' invalid path
with self.assertRaises(ValidationError):
self.bn3.results_storage = "drf:examples.models"
self.bn3.full_clean()
# Test 'dfm' invalid model
with self.assertRaises(ValidationError):
self.bn3.results_storage = "drf:tests.non-existant-model"
self.bn3.full_clean()
# Test 'dfm' invalid field
with self.assertRaises(ValidationError):
self.bn3.results_storage = "drf:tests.UserInfo.n-e-field"
self.bn3.full_clean()
# Test 'dfm' correct content
self.bn3.results_storage = "dmf:test_models.UserInfo.cluster_1"
self.assertEqual(self.bn3.full_clean(), None)
def test_bn_node_validation(self):
# Test First Step: fields corresponds to Node type
with self.assertRaises(ValidationError):
self.mu.deterministic_params = "a, b"
self.mu.full_clean()
self.setUp()
with self.assertRaises(ValidationError):
self.mu.node_type = \
models.BayesianNetworkNode.NODE_TYPE_DETERMINISTIC
self.mu.full_clean()
# Test Second Step: Validations on Stochastic Types
# Stochastic Nodes must have a Distribution
self.setUp()
with self.assertRaises(ValidationError):
self.mu.distribution = None
self.mu.full_clean()
# Stochastic Nodes must have a Distribution Params
self.setUp()
with self.assertRaises(ValidationError):
self.mu.distribution_params = None
self.mu.full_clean()
# Test Third Step: Validations on Deterministic Types
# Deterministic Nodes must have a function
self.setUp()
with self.assertRaises(ValidationError):
self.z.deterministic = None
self.z.full_clean()
# Deterministic Nodes must have function parameters
self.setUp()
with self.assertRaises(ValidationError):
self.z.deterministic_params = None
self.z.full_clean()
# Test Fourth Step: Arg parsing
self.setUp()
with self.assertRaises(ValidationError):
self.mu.distribution_params = '#my_param1, %my_param2'
self.mu.full_clean()
# Test Final Step: BayesPy initialization
self.setUp()
with self.assertRaises(ValidationError):
self.mu.distribution_params = "1, 2, 3, 4, 5"
self.mu.full_clean()
def test_node_column_validation(self):
# Node Columns must reference a model
self.setUp()
with self.assertRaises(ValidationError):
self.ui_avg1_col.ref_model = None
self.ui_avg1_col.full_clean()
# Node Columns must be linked to a field or a callable of a model
self.setUp()
with self.assertRaises(ValidationError):
self.ui_avg1_col.ref_column = None
self.ui_avg1_col.full_clean()
# Node Columns must be linked to an existing fields of a model
self.setUp()
with self.assertRaises(ValidationError):
self.ui_avg1_col.ref_column = "non-existant-field"
self.ui_avg1_col.full_clean()
def test_bn_get_nodes_names(self):
expected_output = ['mu', 'tau', 'userinfo.avg1']
actual_output = list(self.bn1.get_nodes_names())
self.assertEqual(expected_output, actual_output)
def test_node_get_data(self):
# Test no columns assigned
self.setUp()
with self.assertRaises(ValueError):
self.ui_avg1.data_columns.all().delete()
self.ui_avg1.get_data()
# Test not-matching column lengths
self.setUp()
smaller_data_column, _ = \
models.BayesianNetworkNodeColumn.objects.get_or_create(
node=self.ui_avg1,
ref_model=ContentType.objects.get(
model="userinfo2", app_label="test_models"),
ref_column="avg2"
)
with self.assertRaises(ValidationError):
self.ui_avg1.data_columns.add(smaller_data_column)
self.ui_avg1.get_data()
smaller_data_column.delete()
# Test correct functioning
self.setUp()
expected_output = list(test_models.UserInfo.objects.values_list(
"avg1", flat=True))
actual_output = list(self.ui_avg1.get_data())
self.assertEqual(expected_output, actual_output)
def test_node_get_params_type(self):
self.assertEqual(self.mu.get_params_type(), "distribution")
self.assertEqual(self.z.get_params_type(), "deterministic")
def test_node_reset_engine_object(self):
self.bn1.perform_inference(recalculate=True)
self.ui_avg1 = self.bn1.nodes.last()
self.assertTrue(self.ui_avg1.engine_object is not None)
self.assertTrue(self.ui_avg1.engine_object_timestamp is not None)
self.ui_avg1.reset_engine_object()
self.assertTrue(self.ui_avg1.engine_object is None)
self.assertTrue(self.ui_avg1.engine_object_timestamp is None)
def test_node_get_engine_inferred_object(self):
self.bn1.perform_inference(recalculate=True)
expected_output = self.bn1.engine_object['userinfo.avg1']
actual_output = self.ui_avg1.get_engine_inferred_object()
self.assertEqual(expected_output, actual_output)
def test_node_resolve_eos_in_params(self):
self.z.deterministic_params = "x1, x2, x3"
with self.assertRaises(ValueError):
self.z.get_engine_object()
self.z.deterministic_params = "x1, x2, kp=x3"
with self.assertRaises(ValueError):
self.z.get_engine_object()
def test_bn_meta_iterations(self):
self.setUp()
self.bn1.engine_meta_iterations = 5
self.bn1.perform_inference(recalculate=True)
# There must be a dict of size 5
self.assertTrue(len(self.bn1._eo_meta_iterations) == 5)
# containing the same likelihood as there isn't random initialization
for iteration in self.bn1._eo_meta_iterations:
self.assertEqual(
str(self.bn1._eo_meta_iterations[iteration]["L"])[:7],
"-630.42"
)
def test_bn_engine_iterations(self):
self.setUp()
self.bn1.engine_iterations = 1
self.bn1.perform_inference(recalculate=True)
# There must be a dict of size 1, as engine_meta_iterations defaults to
# 1
self.assertTrue(len(self.bn1._eo_meta_iterations) == 1)
# containing the likelihood of the second iteration
self.assertTrue(
str(self.bn1._eo_meta_iterations[0]["eo"].L[0]) != "nan"
)
self.assertEqual(
str(self.bn1._eo_meta_iterations[0]["eo"].L[1]),
"nan"
)
def test_bn_update_eos_struct(self):
bn1_eos_struct = {n.name: {"dm": n, "eo": None}
for n in self.bn1.nodes.all()}
node = self.bn1.nodes.get(name="userinfo.avg1")
models.BayesianNetwork.update_eos_struct(bn1_eos_struct, node)
self.assertTrue('Gamma' in
str(bn1_eos_struct['tau']['eo'].__class__))
self.assertTrue('GaussianARD' in
str(bn1_eos_struct['mu']['eo'].__class__))
def test_node_children(self):
expected_output = [self.ui_avg1]
actual_output = list(self.mu.children())
self.assertEqual(expected_output, actual_output)
def test_bn_whole_clustering(self):
self.setUp()
# Test metadata initialization
expected_initial_metadata = {
"clusters_labels": {},
"prev_clusters_labels": {},
"clusters_means": {},
"prev_clusters_means": {},
"clusters_sizes": {},
"prev_clusters_sizes": {},
"columns": [],
}
self.assertEqual(self.bn3.metadata, expected_initial_metadata)
# Test inference and clustering methods through metadata
self.bn3.perform_inference(recalculate=True)
expected_metadata = {
'prev_clusters_labels': {},
'prev_clusters_means': {},
'clusters_means': {
'A': np.array([0., 0.]),
'B': np.array([16., 16.]),
'C': np.array([20., 20.]),
'D': np.array([20., 20.]),
'E': np.array([25., 25.]),
},
'clusters_labels': {'4': 'E', '1': 'A', '5': 'A', '3': 'A',
'2': 'B', '8': 'A', '7': 'A', '0': 'C',
'6': 'D', '9': 'A'},
'clusters_sizes': {'A': 0, 'B': 50, 'C': 51, 'D': 49, 'E': 50},
'columns': ['avg_time_pages', 'avg_time_pages_a']
}
output_metadata = self.bn3.metadata
self.assertEqual(
output_metadata["prev_clusters_labels"],
expected_metadata["prev_clusters_labels"]
)
self.assertEqual(
output_metadata["prev_clusters_means"],
expected_metadata["prev_clusters_means"]
)
# Test BN.metadata_update_clusters_sizes()
self.assertEqual(
output_metadata["clusters_sizes"],
expected_metadata["clusters_sizes"]
)
# Test BN.assign_clusters_labels()
for cluster in expected_metadata["clusters_means"]:
o_cm = output_metadata["clusters_means"][cluster]
e_cm = expected_metadata["clusters_means"][cluster]
# Check that the cluster means are 'reasonably close' to
# the original ones
self.assertTrue(np.linalg.norm(e_cm - o_cm) ** 2 < 1)
del(output_metadata["clusters_means"][cluster])
self.assertEqual(
output_metadata["clusters_means"],
{}
)
self.assertEqual(
output_metadata["clusters_labels"],
expected_metadata["clusters_labels"],
)
# Test BN.columns_names_to_metadata()
self.assertEqual(
output_metadata["columns"],
expected_metadata["columns"]
)
# Test Results Storage
# BN.get_results()
results = self.bn3.get_results()
# Test resullts are OK (omitting the rest for avoiding pasting a
# list of size 200)
self.assertEqual(results[150:], ["B" for x in range(50)])
# Edge case
self.assertFalse(self.bn1.get_results())
# BN.store_results()
self.bn3.store_results()
stored_results = test_models.UserInfo.objects.all().values_list(
'cluster_1', flat=True)
# Test results are stored OK
self.assertEqual(list(results), list(stored_results))
# -> Test BN.threshold_actions validations
self.bn3.threshold_actions = ":recalculate :not-allowed-action"
with self.assertRaises(ValidationError):
self.bn3.full_clean()
# -> Test BN.counter, BN.counter_threshold and BN.threshold_actions
# Test Triggering an inference
self.threshold_actions = ":recalculate"
prev_timestamp = self.bn3.engine_object_timestamp
self.bn3.counter = 2
self.bn3.save()
# Test the inference has been run by the timestamp
self.assertTrue(self.bn3.engine_object_timestamp > prev_timestamp)
# Test the counter was reset
self.assertEqual(self.bn3.counter, 0)
# Test BN.assign_cluster()
self.assertEqual(
self.bn3.assign_cluster([10, 10]),
"B"
)
self.bn3.reset_inference()
self.assertFalse(
self.bn3.assign_cluster([10, 10])
)
self.assertFalse(
self.bn1.assign_cluster([10, 10])
)
def test_node_args_parsing(self):
# Test "general" parsing
args_string = ('True, :ifr, numpy.ones(2), [[1,2], [3,4]], '
'type=rect, sizes=[3, 4,], coords = ([1,2],[3,4]), '
'func=numpy.zeros(2), plates=:no')
expected_output = {
'args': [
True,
':ifr',
np.array([1., 1.]),
[[1, 2], [3, 4]]
],
'kwargs': {
'type': 'rect',
'sizes': [3, 4],
'coords': ([1, 2], [3, 4]),
'func': np.array([0., 0.]),
'plates': ':no',
}
}
output = parse_node_args(args_string)
# "np.array == np.array" does not return a single bool in NumPy,
# then the comparison "output == expected_output" does not work
# with Django tests. I think I also hit a bug, because for some
# reason, the comparison function that unittest uses for nested
# lists is the array comparison of NumPy and not the standard list
# comparison of Python.
# Test Positional Args
positions_tested = []
for position, arg in enumerate(output["args"]):
# For nested lists, don't know why but it keeps using the
# NumPy array comparison despites of not being of its class
if isinstance(arg, np.ndarray) or isinstance(arg, list):
comp = (expected_output["args"][position] ==
output["args"][position])
if not isinstance(comp, bool):
comp = all(comp)
self.assertEqual(comp, True)
else:
self.assertEqual(
expected_output["args"][position],
output["args"][position]
)
positions_tested.insert(0, position)
# Remove the tested elements from output
for pt in positions_tested:
del(output['args'][pt])
# Test Keyword Args
for kw in expected_output['kwargs'].keys():
if (isinstance(expected_output['kwargs'][kw], np.ndarray) or
isinstance(expected_output['kwargs'][kw], list)):
comp = (expected_output['kwargs'][kw] == output["kwargs"][kw])
if not isinstance(comp, bool):
comp = all(comp)
self.assertEqual(comp, True)
else:
self.assertEqual(
expected_output['kwargs'][kw],
output["kwargs"][kw]
)
# Remove the tested element from output
del(output['kwargs'][kw])
# Check there is nothing left in the output
self.assertEqual(output, {"args": [], "kwargs": {}})
# Test not allowed functions
with self.assertRaises(ValueError):
parse_node_args("shutil.rmtree('/')")
with self.assertRaises(ValueError):
parse_node_args("eval('<malicious_code>')")
# Test referencing to a function
args_string = ('@bayespy.nodes.Gaussian()')
expected_output = {
'args': [Gaussian],
'kwargs': {}
}
output = parse_node_args(args_string)
self.assertEqual(output, expected_output)
# Test invalid function invocaton
with self.settings(DJANGO_AI_WHITELISTED_MODULES=["numpy", ]):
# Reimport the function with the new settings
from django_ai.bayesian_networks.utils import \
parse_node_args as pnn
with self.assertRaises(ValueError):
pnn("numpy.ones(k)")
# Test flat output in args parsing
expected_output = [np.array([1., 1.])]
output = parse_node_args("numpy.ones(2)", flat=True)
self.assertTrue(all(output[0] == expected_output[0]))
output.pop(0)
self.assertEqual(output, [])
def test_utils_misc(self):
# Test Mahalanobis Distance
self.assertEqual(
mahalanobis_distance([0, 1], [1, 0], [[2, 0], [0, 2]]), 1.0
)
def tearDown(self):
self.bn1.image.delete()
self.mu.image.delete()
self.tau.image.delete()
test_models.UserInfo.objects.all().update(cluster_1=None)
| lgpl-3.0 | 6,808,538,192,501,962,000 | 38.927431 | 79 | 0.564995 | false |
spadae22/odoo | addons/stock_account/wizard/stock_invoice_onshipping.py | 1 | 6916 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
JOURNAL_TYPE_MAP = {
('outgoing', 'customer'): ['sale'],
('outgoing', 'supplier'): ['purchase_refund'],
('outgoing', 'transit'): ['sale', 'purchase_refund'],
('incoming', 'supplier'): ['purchase'],
('incoming', 'customer'): ['sale_refund'],
('incoming', 'transit'): ['purchase', 'sale_refund'],
}
class stock_invoice_onshipping(osv.osv_memory):
def _get_journal(self, cr, uid, context=None):
journal_obj = self.pool.get('account.journal')
journal_type = self._get_journal_type(cr, uid, context=context)
journals = journal_obj.search(cr, uid, [('type', '=', journal_type)])
return journals and journals[0] or False
def _get_journal_type(self, cr, uid, context=None):
if context is None:
context = {}
res_ids = context and context.get('active_ids', [])
pick_obj = self.pool.get('stock.picking')
pickings = pick_obj.browse(cr, uid, res_ids, context=context)
pick = pickings and pickings[0]
if not pick or not pick.move_lines:
return 'sale'
type = pick.picking_type_id.code
usage = pick.move_lines[0].location_id.usage if type == 'incoming' else pick.move_lines[0].location_dest_id.usage
return JOURNAL_TYPE_MAP.get((type, usage), ['sale'])[0]
_name = "stock.invoice.onshipping"
_description = "Stock Invoice Onshipping"
_columns = {
'journal_id': fields.many2one('account.journal', 'Destination Journal', required=True),
'journal_type': fields.selection([('purchase_refund', 'Refund Purchase'), ('purchase', 'Create Vendor Invoice'),
('sale_refund', 'Refund Sale'), ('sale', 'Create Customer Invoice')], 'Journal Type', readonly=True),
'group': fields.boolean("Group by partner"),
'invoice_date': fields.date('Invoice Date'),
}
_defaults = {
'journal_type': _get_journal_type,
'journal_id' : _get_journal,
}
def onchange_journal_id(self, cr, uid, ids, journal_id, context=None):
if context is None:
context = {}
domain = {}
value = {}
active_id = context.get('active_id')
if active_id:
picking = self.pool['stock.picking'].browse(cr, uid, active_id, context=context)
type = picking.picking_type_id.code
usage = picking.move_lines[0].location_id.usage if type == 'incoming' else picking.move_lines[0].location_dest_id.usage
journal_types = JOURNAL_TYPE_MAP.get((type, usage), ['sale', 'purchase', 'sale_refund', 'purchase_refund'])
domain['journal_id'] = [('type', 'in', journal_types)]
if journal_id:
journal = self.pool['account.journal'].browse(cr, uid, journal_id, context=context)
value['journal_type'] = journal.type
return {'value': value, 'domain': domain}
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
res = super(stock_invoice_onshipping, self).view_init(cr, uid, fields_list, context=context)
pick_obj = self.pool.get('stock.picking')
count = 0
active_ids = context.get('active_ids',[])
for pick in pick_obj.browse(cr, uid, active_ids, context=context):
if pick.invoice_state != '2binvoiced':
count += 1
if len(active_ids) == count:
raise osv.except_osv(_('Warning!'), _('None of these picking lists require invoicing.'))
return res
def open_invoice(self, cr, uid, ids, context=None):
if context is None:
context = {}
invoice_ids = self.create_invoice(cr, uid, ids, context=context)
if not invoice_ids:
raise osv.except_osv(_('Error!'), _('No invoice created!'))
data = self.browse(cr, uid, ids[0], context=context)
action_model = False
action = {}
journal2type = {'sale':'out_invoice', 'purchase':'in_invoice' , 'sale_refund':'out_refund', 'purchase_refund':'in_refund'}
inv_type = journal2type.get(data.journal_type) or 'out_invoice'
data_pool = self.pool.get('ir.model.data')
if inv_type == "out_invoice":
action_id = data_pool.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree1')
elif inv_type == "in_invoice":
action_id = data_pool.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree2')
elif inv_type == "out_refund":
action_id = data_pool.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree3')
elif inv_type == "in_refund":
action_id = data_pool.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree4')
if action_id:
action_pool = self.pool['ir.actions.act_window']
action = action_pool.read(cr, uid, action_id, context=context)
action['domain'] = "[('id','in', ["+','.join(map(str,invoice_ids))+"])]"
return action
return True
def create_invoice(self, cr, uid, ids, context=None):
context = dict(context or {})
picking_pool = self.pool.get('stock.picking')
data = self.browse(cr, uid, ids[0], context=context)
journal2type = {'sale':'out_invoice', 'purchase':'in_invoice', 'sale_refund':'out_refund', 'purchase_refund':'in_refund'}
context['date_inv'] = data.invoice_date
acc_journal = self.pool.get("account.journal")
inv_type = journal2type.get(data.journal_type) or 'out_invoice'
context['inv_type'] = inv_type
active_ids = context.get('active_ids', [])
res = picking_pool.action_invoice_create(cr, uid, active_ids,
journal_id = data.journal_id.id,
group = data.group,
type = inv_type,
context=context)
return res
| agpl-3.0 | -8,529,741,307,361,504,000 | 45.106667 | 143 | 0.590081 | false |
Ecogenomics/GTDBNCBI | scripts_dev/lpsn_scrape/lpsn_to_database.py | 1 | 4370 | #!/usr/bin/env python
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__prog_name__ = 'update_database_from_ftp.py'
__prog_desc__ = ('Update the LPSN tables in GTDB. ' +
'LPSN tables are independant of the metadata GTDB information')
__author__ = 'Pierre Chaumeil'
__copyright__ = 'Copyright 2016'
__credits__ = ['Pierre Chaumeil']
__license__ = 'GPL3'
__version__ = '0.0.1'
__maintainer__ = 'Pierre Chaumeil'
__email__ = '[email protected]'
__status__ = 'Development'
import os
import argparse
import sys
from database_configuration import GenomeDatabaseConnectionLPSNUpdate
class UpdateLPSNDatabase(object):
def __init__(self, path):
self.path = path
self.lpsn_genera_file = os.path.join(path, 'lpsn_genera.tsv')
self.lpsn_strains_file = os.path.join(path, 'lpsn_strains.tsv')
self.lpsn_species_file = os.path.join(path, 'lpsn_species.tsv')
self.temp_con = GenomeDatabaseConnectionLPSNUpdate.GenomeDatabaseConnectionLPSNUpdate()
self.temp_con.MakePostgresConnection()
self.temp_cur = self.temp_con.cursor()
def runUpdate(self):
# Check if the files exist:
if os.path.isfile(self.lpsn_genera_file) and os.path.isfile(self.lpsn_strains_file) and os.path.isfile(self.lpsn_species_file):
self.temp_cur.execute('TRUNCATE lpsn_genera;')
print "Deletion lpsn_genera done"
fr = open(self.lpsn_genera_file)
fr.readline()
self.temp_cur.copy_from(fr, 'lpsn_genera')
print 'Copy lpsn_genera done'
self.temp_con.commit()
self.temp_cur.execute('TRUNCATE lpsn_species;')
print "Deletion lpsn_species done"
fr = open(self.lpsn_species_file)
fr.readline()
self.temp_cur.copy_from(fr, 'lpsn_species')
print 'Copy lpsn_species done'
self.temp_con.commit()
fr = open(self.lpsn_strains_file)
fr.readline()
self.temp_cur.execute('TRUNCATE lpsn_strains;')
print "Deletion lpsn_strains done"
self.temp_cur.copy_from(fr, 'lpsn_strains')
print 'Copy lpsn_strains done'
self.temp_con.commit()
else:
print 'Some files are missing in {0}'.format(self.path)
self.temp_con.ClosePostgresConnection()
if __name__ == "__main__":
print __prog_name__ + ' v' + __version__ + ': ' + __prog_desc__
print ' by ' + __author__ + ' (' + __email__ + ')' + '\n'
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--lpsn_dir', dest="lpsn_dir",
required=True, help='Directory to the LPSN files lpsn_genera.tsv lpsn_species.tsv lpsn_strains.tsv')
args = parser.parse_args()
try:
update_lpsn_mngr = UpdateLPSNDatabase(args.lpsn_dir)
update_lpsn_mngr.runUpdate()
except SystemExit:
print "\nControlled exit resulting from an unrecoverable error or warning."
except:
print "\nUnexpected error:", sys.exc_info()[0]
raise
| gpl-3.0 | 6,039,277,418,561,814,000 | 41.019231 | 135 | 0.545538 | false |
openvenues/libpostal | scripts/geodata/osm/definitions.py | 1 | 2511 | import os
import re
import six
from collections import defaultdict
from geodata.graph.topsort import topsort
this_dir = os.path.realpath(os.path.dirname(__file__))
DEFAULT_SCRIPT_PATH = os.path.join(this_dir, 'fetch_osm_address_data.sh')
valid_key_regex = re.compile('VALID_(.*?)_KEYS="(.*)"')
variable_regex = re.compile(r'\$VALID_(.*?)_KEYS(?=\b)')
kv_regex = re.compile('([^\s]*)=([^\s]*)')
class OSMDefinitions(object):
ALL = '*'
ADMIN_BORDER = 'admin_border'
ADMIN_NODE = 'admin_node'
AEROWAY = 'aeroway'
AMENITY = 'amenity'
BUILDING = 'building'
HISTORIC = 'historic'
LANDUSE = 'landuse'
NATURAL = 'natural'
LOCALITY = 'locality'
NEIGHBORHOOD = 'neighborhood'
EXTENDED_NEIGHBORHOOD = 'extended_neighborhood'
OFFICE = 'office'
PLACE = 'place'
POPULATED_PLACE = 'populated_place'
SHOP = 'shop'
TOURISM = 'tourism'
VENUE = 'venue'
WATERWAY = 'waterway'
def __init__(self, filename=DEFAULT_SCRIPT_PATH):
script = open(filename).read()
dependencies = defaultdict(list)
definitions = {}
matches = valid_key_regex.findall(script)
match_text = {d.lower(): t for d, t in matches}
for definition, text in matches:
variables = variable_regex.findall(text)
if not variables:
dependencies[definition.lower()] = []
for v in variables:
dependencies[definition.lower()].append(v.lower())
for definition in topsort(dependencies):
definition = definition.lower()
text = match_text[definition]
variables = variable_regex.findall(text)
for v in variables:
v = v.lower()
text = text.replace('$VALID_{}_KEYS'.format(v.upper()), match_text[v])
kvs = defaultdict(set)
for k, v in kv_regex.findall(text):
if v != '':
kvs[k].add(v.lower())
else:
kvs[k].add(self.ALL)
definitions[definition] = kvs
self.definitions = definitions
def meets_definition(self, props, category):
defs = self.definitions.get(category, {})
if not defs:
return False
elif self.ALL in defs:
return True
for k, v in six.iteritems(props):
if v.lower() in defs.get(k.lower(), set()):
return True
return False
osm_definitions = OSMDefinitions()
| mit | -6,333,495,864,088,283,000 | 27.213483 | 86 | 0.571485 | false |
allanlei/django-saas | saas/multidb/models.py | 1 | 4469 | from django.db import models
from django.conf import settings
from django.utils import simplejson as json
from django.db import connections
from django.core.exceptions import ValidationError
from django.db.utils import ConnectionDoesNotExist
import managers
from signals import db_pre_load, db_post_load, db_pre_unload, db_post_unload
DEFAULT = settings.DATABASES['default']
def validate_json(value):
try:
json.loads(value)
except ValueError:
raise ValidationError('Database extra is not JSON serializable')
class Database(models.Model):
ENGINES = (
('django.db.backends.postgresql_psycopg2', 'django.db.backends.postgresql_psycopg2'),
('django.db.backends.postgresql', 'django.db.backends.postgresql'),
('django.db.backends.mysql', 'django.db.backends.mysql'),
('django.db.backends.sqlite3', 'django.db.backends.sqlite3'),
('django.db.backends.oracle', 'django.db.backends.oracle'),
)
db = models.CharField(max_length=256, unique=True, help_text='The database name that goes into Django settings')
engine = models.CharField(max_length=48, default=DEFAULT['ENGINE'], choices=ENGINES, help_text='Django database engine type')
name = models.CharField(max_length=256, null=False, blank=False, help_text='The name of the database')
user = models.CharField(max_length=24, blank=True, help_text='The database user')
password = models.CharField(max_length=512, blank=True, help_text='The password for the database user. Encrypted')
host = models.CharField(max_length=96, blank=True, default=DEFAULT['HOST'], help_text='The hostname of the database server')
port = models.CharField(max_length=24, blank=True, default=DEFAULT['PORT'], help_text='The port of the database server')
extra = models.TextField(default='{}', validators=[validate_json])
objects = managers.DatabaseManager()
def __unicode__(self):
return u'%s(%s)' % (self.db, self.engine.split('.')[-1])
@property
def settings(self):
return {
'ENGINE': self.engine,
'NAME': self.name,
'USER': self.user,
'PASSWORD': self.password,
'HOST': self.host,
'PORT': self.port,
'OPTIONS': self.options,
}
@property
def options(self):
try:
return json.loads(self.extra)
except json.JSONDecodeError:
self.extra = '{}'
return json.loads(self.extra)
@options.setter
def options(self, value):
self.extra = json.dumps(value)
def is_loaded(self):
return self.db in settings.DATABASES
def load(self):
db_pre_load.send(sender=self.__class__, instance=self)
loaded = False
if not self.is_loaded():
settings.DATABASES[self.db] = self.settings
loaded = True
db_post_load.send(sender=self.__class__, instance=self, loaded=loaded)
def unload(self):
db_pre_unload.send(sender=self.__class__, instance=self)
if self.is_loaded():
del settings.DATABASES[self.db]
self.disconnect()
db_post_unload.send(sender=self.__class__, instance=self)
def disconnect(self):
try:
connections[self.db].close()
except ConnectionDoesNotExist:
pass
if not self.is_loaded() and self.db in connections._connections:
del connections._connections[self.db]
def __enter__(self):
self.load()
def __exit__(self, *args, **exceptions):
self.unload()
from signals import create_db, drop_db, unload_db, startup_db
from django.db.backends.signals import connection_created
#if getattr(settings, 'SAAS_MULTIDB_STARTUP', True): connection_created.connect(startup_db, dispatch_uid='db_autoload')
if getattr(settings, 'SAAS_MULTIDB_AUTOCREATE', True): models.signals.post_save.connect(create_db, sender=Database)
if getattr(settings, 'SAAS_MULTIDB_AUTODROP', True): models.signals.post_delete.connect(drop_db, sender=Database)
if getattr(settings, 'SAAS_MULTIDB_AUTOUNLOAD', True): models.signals.post_delete.connect(unload_db, sender=Database)
def conn_created(sender, connection, **kwargs):
for key, conn in connections._connections.items():
if conn == connection: print 'Connected to %s' % key
#connection_created.connect(conn_created)
| bsd-3-clause | 302,155,965,839,929,400 | 34.188976 | 129 | 0.653166 | false |
opencorato/sayit | speeches/south_migrations/0054__rename_popolospeaker.py | 1 | 19144 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('speeches_popolospeaker', 'speeches_speaker')
db.alter_column('speeches_recordingtimestamp', 'speaker_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['speeches.Speaker'], null=True, on_delete=models.SET_NULL))
db.alter_column('speeches_speech', 'speaker_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['speeches.Speaker'], null=True, on_delete=models.SET_NULL))
if not db.dry_run:
orm['contenttypes.contenttype'].objects.filter(app_label='speeches', model='speaker').update(model='oldspeaker')
orm['contenttypes.contenttype'].objects.filter(app_label='speeches', model='popolospeaker').update(model='speaker')
def backwards(self, orm):
db.rename_table('speeches_speaker', 'speeches_popolospeaker')
db.alter_column('speeches_recordingtimestamp', 'speaker_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['speeches.PopoloSpeaker'], null=True, on_delete=models.SET_NULL))
db.alter_column('speeches_speech', 'speaker_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['speeches.PopoloSpeaker'], null=True, on_delete=models.SET_NULL))
if not db.dry_run:
orm['contenttypes.contenttype'].objects.filter(app_label='speeches', model='speaker').update(model='popolospeaker')
orm['contenttypes.contenttype'].objects.filter(app_label='speeches', model='oldspeaker').update(model='speaker')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'instances.instance': {
'Meta': {'object_name': 'Instance'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_instances'", 'null': 'True', 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('instances.fields.DNSLabelField', [], {'unique': 'True', 'max_length': '63', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'instances'", 'blank': 'True', 'to': "orm['auth.User']"})
},
'popolo.contactdetail': {
'Meta': {'object_name': 'ContactDetail'},
'contact_type': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created_at': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'end_date': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'start_date': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'updated_at': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'popolo.identifier': {
'Meta': {'object_name': 'Identifier'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'scheme': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'})
},
'popolo.link': {
'Meta': {'object_name': 'Link'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'popolo.othername': {
'Meta': {'object_name': 'OtherName'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'end_date': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'start_date': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'popolo.person': {
'Meta': {'object_name': 'Person'},
'additional_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'birth_date': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'created_at': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'death_date': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'family_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'given_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'honorific_prefix': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'honorific_suffix': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'patronymic_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'sort_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'start_date': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'updated_at': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'})
},
'popolo.source': {
'Meta': {'object_name': 'Source'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'speeches.recording': {
'Meta': {'object_name': 'Recording'},
'audio': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'audio_duration': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['instances.Instance']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'speeches.recordingtimestamp': {
'Meta': {'ordering': "('timestamp',)", 'object_name': 'RecordingTimestamp'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['instances.Instance']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'recording': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'related_name': "'timestamps'", 'to': "orm['speeches.Recording']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['speeches.Speaker']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'speech': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['speeches.Speech']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'speeches.section': {
'Meta': {'ordering': "('id',)", 'unique_together': "(('parent', 'slug', 'instance'),)", 'object_name': 'Section'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['instances.Instance']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['speeches.Section']"}),
'slug': ('sluggable.fields.SluggableField', [], {'unique_with': "('parent', 'instance')", 'max_length': '50', 'populate_from': "'title'"}),
'source_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'speeches.slug': {
'Meta': {'object_name': 'Slug'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'redirect': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'speeches.speaker': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('instance', 'slug'),)", 'object_name': 'Speaker', '_ormbases': ['popolo.Person']},
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['instances.Instance']"}),
'person_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['popolo.Person']", 'unique': 'True', 'primary_key': 'True'}),
'slug': ('sluggable.fields.SluggableField', [], {'unique_with': "('instance',)", 'max_length': '50', 'populate_from': "'name'"})
},
'speeches.speech': {
'Meta': {'ordering': "('start_date', 'start_time', 'id')", 'object_name': 'Speech'},
'audio': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'blank': 'True'}),
'celery_task_id': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['instances.Instance']"}),
'location': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['speeches.Section']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'source_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['speeches.Speaker']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'speaker_display': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'start_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['speeches.Tag']", 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'speeches.tag': {
'Meta': {'object_name': 'Tag'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['instances.Instance']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['speeches']
| agpl-3.0 | 8,611,772,012,662,599,000 | 84.464286 | 195 | 0.558765 | false |
RNAcentral/rnacentral-import-pipeline | rnacentral_pipeline/databases/genecards_suite/core/helpers.py | 1 | 1351 | # -*- coding: utf-8 -*-
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rnacentral_pipeline.databases.helpers import phylogeny as phy
from . import data
def primary_id(context: data.Context, row) -> str:
gene = context.gene(row)
rna_id = context.urs(row)
return "%s:%s:%s" % (context.database, gene, rna_id)
def accession(context: data.Context, row) -> str:
return primary_id(context, row)
def taxid(context, row) -> int:
rna_id = context.urs(row)
return int(rna_id.split("_", 1)[1])
def species(context: data.Context, row) -> str:
return phy.species(taxid(context, row))
def lineage(context: data.Context, row) -> str:
return phy.lineage(taxid(context, row))
def common_name(context: data.Context, row) -> str:
return phy.common_name(taxid(context, row))
| apache-2.0 | 924,909,323,599,753,000 | 29.022222 | 72 | 0.723168 | false |
hcs/mailman | src/mailman/core/switchboard.py | 1 | 11246 | # Copyright (C) 2001-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Queuing and dequeuing message/metadata pickle files.
Messages are represented as email.message.Message objects (or an instance ofa
subclass). Metadata is represented as a Python dictionary. For every
message/metadata pair in a queue, a single file containing two pickles is
written. First, the message is written to the pickle, then the metadata
dictionary is written.
"""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'Switchboard',
'handle_ConfigurationUpdatedEvent',
]
import os
import time
import email
import pickle
import cPickle
import hashlib
import logging
from zope.interface import implementer
from mailman.config import config
from mailman.email.message import Message
from mailman.interfaces.configuration import ConfigurationUpdatedEvent
from mailman.interfaces.switchboard import ISwitchboard
from mailman.utilities.filesystem import makedirs
from mailman.utilities.string import expand
# 20 bytes of all bits set, maximum hashlib.sha.digest() value.
shamax = 0xffffffffffffffffffffffffffffffffffffffffL
# Small increment to add to time in case two entries have the same time. This
# prevents skipping one of two entries with the same time until the next pass.
DELTA = .0001
# We count the number of times a file has been moved to .bak and recovered.
# In order to prevent loops and a message flood, when the count reaches this
# value, we move the file to the bad queue as a .psv.
MAX_BAK_COUNT = 3
elog = logging.getLogger('mailman.error')
@implementer(ISwitchboard)
class Switchboard:
"""See `ISwitchboard`."""
def __init__(self, name, queue_directory,
slice=None, numslices=1, recover=False):
"""Create a switchboard object.
:param name: The queue name.
:type name: str
:param queue_directory: The queue directory.
:type queue_directory: str
:param slice: The slice number for this switchboard, or None. If not
None, it must be [0..`numslices`).
:type slice: int or None
:param numslices: The total number of slices to split this queue
directory into. It must be a power of 2.
:type numslices: int
:param recover: True if backup files should be recovered.
:type recover: bool
"""
assert (numslices & (numslices - 1)) == 0, (
'Not a power of 2: {0}'.format(numslices))
self.name = name
self.queue_directory = queue_directory
# If configured to, create the directory if it doesn't yet exist.
if config.create_paths:
makedirs(self.queue_directory, 0770)
# Fast track for no slices
self._lower = None
self._upper = None
# BAW: test performance and end-cases of this algorithm
if numslices <> 1:
self._lower = ((shamax + 1) * slice) / numslices
self._upper = (((shamax + 1) * (slice + 1)) / numslices) - 1
if recover:
self.recover_backup_files()
def enqueue(self, _msg, _metadata=None, **_kws):
"""See `ISwitchboard`."""
if _metadata is None:
_metadata = {}
# Calculate the SHA hexdigest of the message to get a unique base
# filename. We're also going to use the digest as a hash into the set
# of parallel runner processes.
data = _metadata.copy()
data.update(_kws)
listname = data.get('listname', '--nolist--')
# Get some data for the input to the sha hash.
now = time.time()
if data.get('_plaintext'):
protocol = 0
msgsave = cPickle.dumps(str(_msg), protocol)
else:
protocol = pickle.HIGHEST_PROTOCOL
msgsave = cPickle.dumps(_msg, protocol)
# listname is unicode but the input to the hash function must be an
# 8-bit string (eventually, a bytes object).
hashfood = msgsave + listname.encode('utf-8') + repr(now)
# Encode the current time into the file name for FIFO sorting. The
# file name consists of two parts separated by a '+': the received
# time for this message (i.e. when it first showed up on this system)
# and the sha hex digest.
filebase = repr(now) + '+' + hashlib.sha1(hashfood).hexdigest()
filename = os.path.join(self.queue_directory, filebase + '.pck')
tmpfile = filename + '.tmp'
# Always add the metadata schema version number
data['version'] = config.QFILE_SCHEMA_VERSION
# Filter out volatile entries. Use .keys() so that we can mutate the
# dictionary during the iteration.
for k in data.keys():
if k.startswith('_'):
del data[k]
# We have to tell the dequeue() method whether to parse the message
# object or not.
data['_parsemsg'] = (protocol == 0)
# Write to the pickle file the message object and metadata.
with open(tmpfile, 'w') as fp:
fp.write(msgsave)
cPickle.dump(data, fp, protocol)
fp.flush()
os.fsync(fp.fileno())
os.rename(tmpfile, filename)
return filebase
def dequeue(self, filebase):
"""See `ISwitchboard`."""
# Calculate the filename from the given filebase.
filename = os.path.join(self.queue_directory, filebase + '.pck')
backfile = os.path.join(self.queue_directory, filebase + '.bak')
# Read the message object and metadata.
with open(filename) as fp:
# Move the file to the backup file name for processing. If this
# process crashes uncleanly the .bak file will be used to
# re-instate the .pck file in order to try again.
os.rename(filename, backfile)
msg = cPickle.load(fp)
data = cPickle.load(fp)
if data.get('_parsemsg'):
# Calculate the original size of the text now so that we won't
# have to generate the message later when we do size restriction
# checking.
original_size = len(msg)
msg = email.message_from_string(msg, Message)
msg.original_size = original_size
data['original_size'] = original_size
return msg, data
def finish(self, filebase, preserve=False):
"""See `ISwitchboard`."""
bakfile = os.path.join(self.queue_directory, filebase + '.bak')
try:
if preserve:
bad_dir = config.switchboards['bad'].queue_directory
psvfile = os.path.join(bad_dir, filebase + '.psv')
os.rename(bakfile, psvfile)
else:
os.unlink(bakfile)
except EnvironmentError:
elog.exception(
'Failed to unlink/preserve backup file: %s', bakfile)
@property
def files(self):
"""See `ISwitchboard`."""
return self.get_files()
def get_files(self, extension='.pck'):
"""See `ISwitchboard`."""
times = {}
lower = self._lower
upper = self._upper
for f in os.listdir(self.queue_directory):
# By ignoring anything that doesn't end in .pck, we ignore
# tempfiles and avoid a race condition.
filebase, ext = os.path.splitext(f)
if ext <> extension:
continue
when, digest = filebase.split('+', 1)
# Throw out any files which don't match our bitrange. BAW: test
# performance and end-cases of this algorithm. MAS: both
# comparisons need to be <= to get complete range.
if lower is None or (lower <= long(digest, 16) <= upper):
key = float(when)
while key in times:
key += DELTA
times[key] = filebase
# FIFO sort
return [times[key] for key in sorted(times)]
def recover_backup_files(self):
"""See `ISwitchboard`."""
# Move all .bak files in our slice to .pck. It's impossible for both
# to exist at the same time, so the move is enough to ensure that our
# normal dequeuing process will handle them. We keep count in
# _bak_count in the metadata of the number of times we recover this
# file. When the count reaches MAX_BAK_COUNT, we move the .bak file
# to a .psv file in the bad queue.
for filebase in self.get_files('.bak'):
src = os.path.join(self.queue_directory, filebase + '.bak')
dst = os.path.join(self.queue_directory, filebase + '.pck')
with open(src, 'rb+') as fp:
try:
msg = cPickle.load(fp)
data_pos = fp.tell()
data = cPickle.load(fp)
except Exception as error:
# If unpickling throws any exception, just log and
# preserve this entry
elog.error('Unpickling .bak exception: %s\n'
'Preserving file: %s', error, filebase)
self.finish(filebase, preserve=True)
else:
data['_bak_count'] = data.get('_bak_count', 0) + 1
fp.seek(data_pos)
if data.get('_parsemsg'):
protocol = 0
else:
protocol = 1
cPickle.dump(data, fp, protocol)
fp.truncate()
fp.flush()
os.fsync(fp.fileno())
if data['_bak_count'] >= MAX_BAK_COUNT:
elog.error('.bak file max count, preserving file: %s',
filebase)
self.finish(filebase, preserve=True)
else:
os.rename(src, dst)
def handle_ConfigurationUpdatedEvent(event):
"""Initialize the global switchboards for input/output."""
if not isinstance(event, ConfigurationUpdatedEvent):
return
config = event.config
for conf in config.runner_configs:
name = conf.name.split('.')[-1]
assert name not in config.switchboards, (
'Duplicate runner name: {0}'.format(name))
substitutions = config.paths
substitutions['name'] = name
path = expand(conf.path, substitutions)
config.switchboards[name] = Switchboard(name, path)
| gpl-3.0 | -6,313,387,617,212,159,000 | 40.043796 | 78 | 0.599057 | false |
evanepio/dotmanca | dotmanca/users/tests/test_views.py | 1 | 2027 | from django.test import RequestFactory
from django.test import TestCase
from ..views import (
UserRedirectView,
UserUpdateView
)
from ..models import User
class BaseUserTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='testuser',
email=None,
password='notalamodespassword')
self.factory = RequestFactory()
class TestUserRedirectView(BaseUserTestCase):
def test_get_redirect_url(self):
# Instantiate the view directly. Never do this outside a test!
view = UserRedirectView()
# Generate a fake request
request = self.factory.get('/fake-url')
# Attach the user to the request
request.user = self.user
# Attach the request to the view
view.request = request
# Expect: '/users/testuser/', as that is the default username for
# self.make_user()
self.assertEqual(
view.get_redirect_url(),
'/users/testuser/'
)
class TestUserUpdateView(BaseUserTestCase):
def setUp(self):
# call BaseUserTestCase.setUp()
super(TestUserUpdateView, self).setUp()
# Instantiate the view directly. Never do this outside a test!
self.view = UserUpdateView()
# Generate a fake request
request = self.factory.get('/fake-url')
# Attach the user to the request
request.user = self.user
# Attach the request to the view
self.view.request = request
def test_get_success_url(self):
# Expect: '/users/testuser/', as that is the default username for
# self.make_user()
self.assertEqual(
self.view.get_success_url(),
'/users/testuser/'
)
def test_get_object(self):
# Expect: self.user, as that is the request's user object
self.assertEqual(
self.view.get_object(),
self.user
)
| mit | -7,667,624,099,219,692,000 | 28.808824 | 76 | 0.593981 | false |
all-of-us/raw-data-repository | rdr_service/services/google_sheets_client.py | 1 | 13026 | import backoff
from googleapiclient import discovery
from googleapiclient.errors import HttpError
from oauth2client.service_account import ServiceAccountCredentials
import socket
from rdr_service.services.gcp_utils import gcp_get_iam_service_key_info
class GoogleSheetsClient:
"""
Allows for interacting with a spreadsheet in google drive. This class is designed to be used as a context manager
and requires that:
- A service account (with a json keyfile) is authenticated
- The service account has the correct permissions to edit the google spreadsheet
Please carefully verify that this works for your purpose if you re-use this. There are some things that don't
currently work (such as formula manipulation and making new tabs).
"""
def __init__(self, spreadsheet_id, service_key_id, tab_offsets=None):
"""
:param spreadsheet_id: Google Drive id of the spreadsheet.
:param service_key_id: Key id for the service account used.
:type tab_offsets: Dictionary specifying tab names and offsets for them (defined in Google Sheet cell
notation such as B4). Giving a cell value will specify that any changes for that tab use that cell
as the origin. So with an origin of B4 an update to C5 would be given as row 1 and column 1.
Used to prevent updating headers in the target spreadsheet.
WARNING: Does not support columns past Z
"""
# Load credentials from service key file
self.service_key_id = service_key_id
self._spreadsheet_id = spreadsheet_id
self._default_tab_id = None
self._tabs = None
self._empty_cell_value = ''
self._tab_offsets = {tab_name: {
'row': int(offset[1:]) - 1, # convert row number specified in a system of counting from 1
'col': ord(offset[:1].upper()) - ord('A'), # Get column number (A = 0, B = 1, ...)
'offset_str': offset
} for tab_name, offset in tab_offsets.items()} if tab_offsets else {}
def _build_service(self):
service_key_info = gcp_get_iam_service_key_info(self.service_key_id)
api_credentials = ServiceAccountCredentials.from_json_keyfile_name(service_key_info['key_path'])
# The Google API client uses sockets, and the requests can take longer than the default timeout.
# The proposed solution is to increase the default timeout manually
# https://github.com/googleapis/google-api-python-client/issues/632
# The socket seems to be created when calling discover.build, so this temporarily increases the timeout for
# new sockets when the Google service creates its socket.
default_socket_timeout = socket.getdefaulttimeout()
num_seconds_in_five_minutes = 300
socket.setdefaulttimeout(num_seconds_in_five_minutes)
# Set up for being able to interact with the sheet in Drive
sheets_api_service = discovery.build('sheets', 'v4', credentials=api_credentials)
# Set the timeout back for anything else in the code that would use sockets
socket.setdefaulttimeout(default_socket_timeout)
return sheets_api_service
def __enter__(self):
self.download_values()
return self
def __exit__(self, *_):
self.upload_values()
@classmethod
def _initialize_empty_tab(cls):
return []
def _get_offset_row_col(self, tab_id):
tab_offset_data = self._tab_offsets.get(tab_id, {
'row': 0,
'col': 0
})
return tab_offset_data['row'], tab_offset_data['col']
def _get_offset_string(self, tab_id):
tab_offset_data = self._tab_offsets.get(tab_id, {
'offset_str': 'A1'
})
return tab_offset_data['offset_str']
@backoff.on_exception(backoff.constant, HttpError, max_tries=4, jitter=None, interval=30)
def download_values(self):
"""
Retrieve the values as they currently are in google drive.
Note: this will overwrite any changes that have been made this instance of the document using `update_cell`.
:return: None
"""
self._tabs = {}
# API call documented at https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get
request = self._build_service().spreadsheets().get(spreadsheetId=self._spreadsheet_id, includeGridData=True)
response = request.execute()
# Parse the retrieved spreadsheet
tab_data = response['sheets']
for tab in tab_data:
tab_id = tab['properties'].get('title')
# Set the default tab to the first tab
if self._default_tab_id is None:
self._default_tab_id = tab_id
# Initialize the internal tab structure and parse the values from the response
self._tabs[tab_id] = self._initialize_empty_tab()
tab_grid_data = tab['data'][0].get('rowData', [])
for row_number, row_data in enumerate(tab_grid_data):
row_values = row_data.get('values')
if row_values:
for col_number, cell_data in enumerate(row_values):
row_offset, col_offset = self._get_offset_row_col(tab_id)
if row_number >= row_offset and col_number >= col_offset:
cell_value = cell_data.get('formattedValue', self._empty_cell_value)
self.update_cell(row_number - row_offset, col_number - col_offset, cell_value, tab_id)
def set_current_tab(self, tab_id):
"""
Change the default tab. Used to make updating multiple fields on one tab cleaner
(so the tab id doesn't need to be given with the location for each cell value).
:param tab_id: Name of the tab to use as the default.
:return: None
"""
self._default_tab_id = tab_id
def update_cell(self, row: int, col: int, value: str, tab_id=None):
"""
Change the value of a cell.
Any changes made will be stored locally until the next call to `upload_values`
(or when the context ends).
:param row: row number of the cell, starting from 0 at the top of the spreadsheet
:param col: column number of the cell, starting from 0 at the left of the spreadsheet
:param value: value to store
:param tab_id: Name of the tab to modify. The default tab is used if this parameter isn't provided.
:return: None
"""
if not isinstance(col, int):
col = int(col)
values_grid = self._tabs.get(tab_id or self._default_tab_id)
# Increase the number of rows we have if the caller is setting a cell on a
# row farther out than what is initialized
while row >= len(values_grid):
values_grid.append([self._empty_cell_value])
row_for_update = values_grid[row]
# Increase the number of columns we have in the row if the caller is setting a
# cell on a cell father out than what is initialized in the row
while col >= len(row_for_update):
row_for_update.append(self._empty_cell_value)
row_for_update[col] = value
def truncate_tab_at_row(self, row, tab_id=None):
"""
Clears all values from the sheet at and below the given row (setting their cells equal to an empty string).
:param row: Row to start clearing, starting from 0 at the top of the document
:param tab_id: Tab to clear values from, defaults to the current tab if not provided
"""
values_grid = self._tabs.get(tab_id or self._default_tab_id)
current_row = row
while current_row < len(values_grid): # Iterate through the rows
# Replace everything in the row with empty strings
values_grid[current_row] = [self._empty_cell_value] * len(values_grid[current_row])
current_row += 1
def insert_new_row_at(self, row_index, tab_id=None):
"""
Creates a new, empty row at the given row index. The current row at the given index will be moved down.
:param row_index: Index, counting from 0, for the new row
:param tab_id: Tab to add the new row to, defaults to the current tab if not provided
"""
values_grid = self._tabs.get(tab_id or self._default_tab_id)
values_grid.insert(row_index, [self._empty_cell_value])
# All the following rows will be moved down.
# Any row in front of a row that moves down will be uploaded to the document in the same position as the one
# that moved down. Any row in front of one that moves down needs to have as many cells as the one it's
# replacing, so that it will overwrite all the values left over from the row that it pushed down.
while row_index < len(values_grid) - 1: # The last row isn't replacing anything, so doesn't need to be checked
row_to_expand = values_grid[row_index]
number_of_cells_to_replace = len(values_grid[row_index + 1])
while number_of_cells_to_replace > len(row_to_expand):
row_to_expand.append(self._empty_cell_value)
row_index += 1
def remove_row_at(self, row_index, tab_id=None):
"""
Removes a row from the sheet.
:param row_index: Index, counting from 0, for the row to remove
:param tab_id: Tab to remove the row from, defaults to the current tab if not provided
"""
values_grid = self._tabs.get(tab_id or self._default_tab_id)
number_of_cells_replaced = len(values_grid[row_index])
del values_grid[row_index]
# Removing a row in the document means every row moves up, including the last one.
# So we need to insert a row at the end to overwrite the values left from when the original last row moves up.
# (The number of cells is expanded later in this method).
values_grid.append([self._empty_cell_value])
# All following rows will be moved up.
# Any rows after a row that moves up will be uploaded to the document in the same position as the one before it.
# If the following row doesn't have as many cells as the row it's replacing, then it wouldn't
# overwrite all the cells and some trailing values could be left over. All rows might need to have
# extra cells added so they will overwrite all the cells left from the row they're replacing.
while row_index < len(values_grid):
next_row = values_grid[row_index]
while number_of_cells_replaced > len(next_row):
next_row.append(self._empty_cell_value)
# Get the number of cells in this row, the row after it will be taking it's place in the document
number_of_cells_replaced = len(next_row)
row_index += 1
def get_row_at(self, row_index, tab_id=None):
"""
Retrieves the list of values at the given row. If the indexed row doesn't already exist, this method will
expand the grid until it does.
:param row_index: Index, counting from 0, for the row to retrieve
:param tab_id: Tab to read the row from, defaults to the current tab if not provided
:return: List of values that make up the given row
"""
values_grid = self._tabs.get(tab_id or self._default_tab_id)
while row_index >= len(values_grid):
values_grid.append([self._empty_cell_value])
return list(values_grid[row_index])
@backoff.on_exception(backoff.constant, HttpError, max_tries=4, jitter=None, interval=30)
def upload_values(self):
"""
Upload the local data to the google drive spreadsheet.
Note: any changes made to the target spreadsheet since the last call to `download_values` will be overwritten.
"""
request = self._build_service().spreadsheets().values().batchUpdate(
spreadsheetId=self._spreadsheet_id,
body={
'valueInputOption': 'RAW',
'data': [{
'range': f"'{tab_id}'!{self._get_offset_string(tab_id)}",
'values': tab_data
} for tab_id, tab_data in self._tabs.items()]
}
)
request.execute()
def get_tab_values(self, tab_id=None):
"""
Returns the values of the specified tab (or the current tab if no tab was specified).
Empty cells are represented by empty strings.
:param tab_id: Identifier of the tab to retrieve values from.
:return: A two dimensional list of strings that represent the cell values, organized by
rows (from the top down) and then columns (from left to right).
"""
if tab_id is None:
tab_id = self._default_tab_id
value_grid = self._tabs.get(tab_id)
return [[value for value in row] for row in value_grid]
| bsd-3-clause | -8,798,723,942,164,269,000 | 45.355872 | 120 | 0.63189 | false |
w0rp/w0rpzone | blog/urls.py | 1 | 2639 | from django.conf.urls import re_path
from django.contrib.auth.decorators import login_required
from w0rplib.url import redir
from .feed import LatestArticleFeed
from .views import (
ArticleBanCommenterView,
ArticleDeleteCommentView,
ArticleDetailView,
ArticleEditPageView,
ArticleMonthArchiveView,
ArticlePageView,
ArticleUnbanCommenterView,
DeleteArticleView,
EditArticleView,
NewArticleView,
article_bounce_view,
upload_file_view,
)
urlpatterns = [
# Loading the main site gets you page 1.
re_path(
r"^$",
ArticlePageView.as_view(),
{"page": "1"},
name="blog-home",
),
# Redirect the first page back to the blog main page, for SEO.
redir(r"^page/0*1/$", "/blog"),
# Redirect appending "login" to the blog URL to the right login URL,
# which will redirect back to the blog.
redir(r"^login/$", "/login/?next=/blog"),
re_path(
r"^page/(?P<page>[\d]+)/$",
ArticlePageView.as_view(),
name="article-page"
),
re_path(
r"^delete/(?P<slug>[\w-]+)/$",
login_required(DeleteArticleView.as_view()),
name="delete-article"
),
re_path(
r"^edit-page/(?P<page>[\d]+)/$",
login_required(ArticleEditPageView.as_view()),
name="article-edit-list"
),
re_path(
r"^post/(?P<slug>[\w-]+)/$",
ArticleDetailView.as_view(),
name="article-detail"
),
re_path(
r"^post/(?P<slug>[\w-]+)/comment-bounce/$",
article_bounce_view,
name="article-comment-bounce"
),
re_path(
r"^post/(?P<slug>[\w-]+)/delete-comment/(?P<pk>\d+)/$",
ArticleDeleteCommentView.as_view(),
name="delete-comment"
),
re_path(
r"^post/(?P<slug>[\w-]+)/ban-comment/(?P<pk>\d+)/$",
ArticleBanCommenterView.as_view(),
name="ban-commenter"
),
re_path(
r"^post/(?P<slug>[\w-]+)/unban-comment/(?P<pk>\d+)/$",
ArticleUnbanCommenterView.as_view(),
name="unban-commenter"
),
re_path(
r"^date/(?P<year>\d{4})/(?P<month>1[0-2]|0[1-9])/$",
ArticleMonthArchiveView.as_view(month_format="%m"),
name="article-archive"
),
re_path(
r"^latest/feed/$",
LatestArticleFeed(),
name="article-feed"
),
re_path(
r"^new/$",
NewArticleView.as_view(),
name="new-article",
),
re_path(
r"^edit/(?P<slug>[\w-]+)/$",
EditArticleView.as_view(),
name="edit-article"
),
re_path(r"^upload/$", upload_file_view, name="upload-file"),
]
| bsd-2-clause | -3,337,851,615,868,272,000 | 26.489583 | 72 | 0.558924 | false |
hbenarab/mt-iebkg | run_on_ollie_dataset.py | 1 | 8580 | __author__ = 'heni'
import datetime
import math
import numpy
import pickle
from utils.tools import get_accuracy
from ollie_comparison.utils.training_tools import create_word2ind,create_network,get_labeled_data
from utils.tools import shuffle
def run_on_ollie_dataset(iob_ollie_dataset_path,use_cross_validation):
settings = {'partial_training': 0.8,
'partial_testing': 0.2,
'fold': 10, # 5 folds 0,1,2,3,4
'lr': 0.05,
'verbose': 1,
'decay': False, # decay on the learning rate if improvement stops
'win': 7, # number of words in the context window
'bs': 9, # number of backprop through time steps
'nhidden': 100, # number of hidden units
'seed': 345,
'emb_dimension': 100, # dimension of word embedding
'nepochs': 50}
# iob_ollie_dataset_file=open(iob_ollie_dataset_path,'r')
indices=create_word2ind(iob_ollie_dataset_path)
words_index=indices['wordIndex']
labels_index=indices['labelIndex']
word2index = words_index.getCurrentIndex()
index2word = words_index.getIndex2Word()
label2index = labels_index.getCurrentIndex()
index2label = labels_index.getIndex2Word()
vocsize=len(word2index)
nclasses=len(label2index)
new_network_folder = datetime.datetime.now().strftime('%Y-%m-%d_%Hh%M')
rnn,model_folder=create_network(settings,nclasses,vocsize,new_network_folder)
print('RNN model created and saved under %s' % model_folder)
[labeled_data,labeled_data_size]=get_labeled_data(iob_ollie_dataset_path)
print('Labeled data size for articles: ',labeled_data_size)
sentences_list, labels_list = labeled_data.getData()
while [] in sentences_list:
print('Empty sentences were found. They will be removed')
empty=sentences_list.index([])
sentences_list.pop(empty)
labels_list.pop(empty)
assert len(sentences_list)==len(labels_list)
number_labeled_sentences = len(sentences_list)
print('The training phase of the RNN model on the Ollie dataset will begin now')
rnn=rnn.load(model_folder)
#########################################################
# training with consideration to parameters in settings #
#########################################################
if not use_cross_validation:
print('No cross-validation techniques will be used in this training process')
shuffle([sentences_list, labels_list], settings['seed'])
training_size = int(math.floor(settings['partial_training'] * number_labeled_sentences))
testing_size = int(math.floor(settings['partial_testing'] * number_labeled_sentences))
print('Training size: [0:{0}] = {0}'.format(training_size))
train_sentences = sentences_list[0:training_size]
train_labels = labels_list[0:training_size]
print('Testing size: [{0}:{1}] = {2}'.format(training_size, training_size + testing_size, testing_size))
test_sentences = sentences_list[training_size:training_size + testing_size]
test_labels = labels_list[training_size:training_size + testing_size]
else:
print('Cross validation will be used')
####################
# training process #
####################
# number_train_sentences = len(train_sentences)
# number_train_labels_toGuess = sum([len(x) for x in test_labels])
# print('Starting training with {0} labeled sentences in total for {1} epochs.'.
# format(number_train_sentences, settings['nepochs']))
best_accuracy = -numpy.inf
current_learning_rate = settings['lr']
best_epoch = 0
f1_of_best_acc=0
conf_mat_of_best_acc=None
for e in range(0, settings['nepochs']):
print('Epoch {0}'.format(e))
print('----------------------------------------------')
if use_cross_validation:
####################
# validation phase #
####################
print('Validation phase in process')
shuffle([sentences_list, labels_list], settings['seed'])
divide_in_folds=lambda lst,sz:[lst[i:i+sz] for i in range(0,len(lst),sz)]
if len(sentences_list)%settings['fold']==0:
size_of_fold=math.floor(len(sentences_list)/settings['fold'])
else:
size_of_fold=(math.floor(len(sentences_list)/settings['fold']))+1
sentences_in_folds=divide_in_folds(sentences_list,size_of_fold)
labels_in_folds=divide_in_folds(labels_list,size_of_fold)
assert len(sentences_in_folds)==settings['fold']
assert len(sentences_in_folds)==len(labels_in_folds)
all_validation_accuracies=[]
for j in range(0,len(sentences_in_folds)):
ex_tr_sent=sentences_in_folds[:]
ex_tr_labels=labels_in_folds[:]
# val_sent=sentences_in_folds[j]
# val_labels=labels_in_folds[j]
# assert len(val_sent)==len(val_labels)
val_sent=ex_tr_sent.pop(j)
val_labels=ex_tr_labels.pop(j)
assert len(val_sent)==len(val_labels)
assert len(ex_tr_sent)==len(ex_tr_labels)
tr_sent=[]
tr_labels=[]
for c in range(0,len(ex_tr_sent)):
tr_sent.extend(ex_tr_sent[c])
tr_labels.extend(ex_tr_labels[c])
assert len(tr_sent)==len(tr_labels)
train_dict={'sentences':tr_sent,'labels':tr_labels}
validation_dict={'sentences':val_sent,'labels':val_labels}
print('Training the fold number %i will begin now' % (j+1))
[current_validation_accuracy,f1,conf_mat]=get_accuracy(rnn,train_dict,validation_dict,word2index,label2index,settings,
current_learning_rate,e,index2word,is_validation=True)
all_validation_accuracies.append(current_validation_accuracy)
assert len(all_validation_accuracies)==settings['fold']
mean_validation=sum(all_validation_accuracies)/len(all_validation_accuracies)
if mean_validation>best_accuracy:
best_accuracy=mean_validation
f1_of_best_acc=f1
conf_mat_of_best_acc=conf_mat
print('New best validation accuracy: %2.2f%%' % best_accuracy)
# rnn.save(model_folder)
print('A new RNN has been saved.')
else:
print('Validation phase did not come up with a better accuracy (only %2.2f%%).'
'. A new epoch will begin' % mean_validation)
# rnn=rnn.load(model_folder)
#continue
##################
# Training phase #
##################
else:
shuffle([train_sentences, train_labels], settings['seed'])
print('Training in progress')
# rnn=rnn.load(model_folder)
# print('RNN saved during the validation phase has been loaded')
training_dict={'sentences':train_sentences,'labels':train_labels}
testing_dict={'sentences':test_sentences,'labels':test_labels}
[testing_accuracy,f1,conf_mat]=get_accuracy(rnn,training_dict,testing_dict,word2index,label2index,settings,
current_learning_rate,e,index2word,is_validation=False)
print('Accuracy during the testing phase (number of correct guessed labels) at %2.2f%%.' % testing_accuracy)
# check if current epoch is the best
if testing_accuracy> best_accuracy:
best_accuracy = testing_accuracy
best_epoch = e
f1_of_best_acc=f1
conf_mat_of_best_acc=conf_mat
rnn.save(model_folder)
print('Better testing accuracy !!')
else:
rnn=rnn.load(model_folder)
if abs(best_epoch-e)>=5:
current_learning_rate*=0.5
if current_learning_rate<1e-5: break
print('BEST RESULT: epoch ', best_epoch, 'with best accuracy: ', best_accuracy, '.',)
# iob_ollie_dataset_file.close()
pickle.dump([best_accuracy,f1_of_best_acc,conf_mat_of_best_acc],open('perf.pck','wb'))
# import sys
# sys.path.append('/home/heni/git/masterThesisKG/mt-iebkg')
run_on_ollie_dataset('data/ollie-scored.iob.txt',use_cross_validation=False) | mit | -4,446,617,290,510,862,000 | 44.163158 | 134 | 0.583217 | false |
righetz/pyurl | pyurl.py | 1 | 4354 | #!/usr/bin/python3
"""Simple CUrl porting for Python3
"""
import urllib.request, re
import sys
import argparse
from urllib.parse import urlencode
import gettext
import locale
def main():
""""main method"""
language_set()
parser = argparse.ArgumentParser() #setting possible arguments
parser.add_argument('-o', metavar='output_file', help=_('Write output to file'))
parser.add_argument('-i', action='store_true', help=_('Include request headers'))
parser.add_argument('url', help=_('Define target URL'))
parser.add_argument('-d', metavar='DATA', help=_('Http POST data between quotation marks'))
parser.add_argument('-c', action='store_true', help=_('Show Http code'))
parser.add_argument('-a', metavar='user_agent', help=_('Set custom user agent'))
parser.add_argument('-k', action='store_true', help=_('headers only'))
check_args_and_exec(parser.parse_args())
def language_set():
"""read from UNIX or windows locale informations and set language"""
ita = gettext.translation('pyurl', localedir='locale', languages=['it'])
eng = gettext.translation('pyurl', localedir='locale', languages=['en'])
if locale.getlocale()[0] == 'it_IT' or locale.getlocale()[0] == 'ita':
ita.install()
else:
eng.install()
def check_args_and_exec(args):
"""arguments control and functions invoke"""
headers = ""
post_data = None
url = str(args.url)
if args.d is not None:
post_data = data_post_format(args.d)
if not re.match("http://", url):
url = "http://" + url
text = get_source(url, post_data, args.c, args.a)
if args.i or args.k:
if args.i and not args.k:
args.c = None
headers = get_headers(url, args.a, args.c)
if args.k is True:
text = ""
if args.o is not None:
save_to_file(text, args.o, headers)
else:
if headers:
print(headers)
print(text)
def connect(url, post_data, user_agent):
"""connection method"""
try:
if user_agent == None:
user_agent = "PyUrl V1.0"
req = urllib.request.Request(
url,
headers={"User-Agent" : user_agent
}
)
if post_data != None:
req.data = post_data.encode('utf-8')
src = urllib.request.urlopen(req)
except urllib.error.HTTPError as err:
sys.exit(err)
except urllib.error.URLError:
sys.exit(_("Could not resolve host %s\nCheck your connection") % url)
return src
def data_post_format(data_string):
"""format input data to be handled by urllib.request"""
data_list = data_string.split("&")
data_map = {}
for dato in data_list:
temp = dato.split("=")
try:
data_map[temp[0]] = temp[1] #check if user input is correct
except IndexError:
sys.exit(_("Specify every POST input as \"key=value\" "))
return urlencode(data_map)
def get_source(url, post_data, http_code, user_agent):
"""set connection to url and extract source"""
src = connect(url, post_data, user_agent)
charset = src.headers.get_param('charset')
if not charset:
charset = 'utf-8' # workaround for missing charset header data
content = []
if http_code:
content.append(_("Http code: %d\n\n ")% src.getcode())
while True:
line = src.readline()
if line:
content.append(line.decode(charset))
else:
src.close()
break
return "".join(content)
def get_headers(url, user_agent, http_code):
"""return URL headers"""
src = connect(url, None, user_agent)
if http_code:
return (_("Http code: %d\n\n ") % src.getcode()) + str(src.headers)
else:
return str(src.headers)
def save_to_file(text, outfile, headers):
"""write to file"""
try:
file_writer = open(outfile, 'w')
except FileNotFoundError:
sys.exit(_("Specified directory does not exists"))
except IsADirectoryError:
sys.exit(_("Target path is a directory, include file name"))
except IOError:
sys.exit(_("Input/Output error\nMaybe you don't have enough privileges?"))
if headers:
file_writer.write(headers)
file_writer.write(text)
file_writer.close()
if __name__ == "__main__":
main()
| gpl-2.0 | -7,379,587,351,273,032,000 | 32.492308 | 95 | 0.602205 | false |
mstone/vscan | cpplint.py | 1 | 123523 | #!/usr/bin/python2.4
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Here are some issues that I've had people identify in my code during reviews,
# that I think are possible to flag automatically in a lint tool. If these were
# caught by lint, it would save time both for myself and that of my reviewers.
# Most likely, some of these are beyond the scope of the current lint framework,
# but I think it is valuable to retain these wish-list items even if they cannot
# be immediately implemented.
#
# Suggestions
# -----------
# - Check for no 'explicit' for multi-arg ctor
# - Check for boolean assign RHS in parens
# - Check for ctor initializer-list colon position and spacing
# - Check that if there's a ctor, there should be a dtor
# - Check accessors that return non-pointer member variables are
# declared const
# - Check accessors that return non-const pointer member vars are
# *not* declared const
# - Check for using public includes for testing
# - Check for spaces between brackets in one-line inline method
# - Check for no assert()
# - Check for spaces surrounding operators
# - Check for 0 in pointer context (should be NULL)
# - Check for 0 in char context (should be '\0')
# - Check for camel-case method name conventions for methods
# that are not simple inline getters and setters
# - Check that base classes have virtual destructors
# put " // namespace" after } that closes a namespace, with
# namespace's name after 'namespace' if it is named.
# - Do not indent namespace contents
# - Avoid inlining non-trivial constructors in header files
# include base/basictypes.h if DISALLOW_EVIL_CONSTRUCTORS is used
# - Check for old-school (void) cast for call-sites of functions
# ignored return value
# - Check gUnit usage of anonymous namespace
# - Check for class declaration order (typedefs, consts, enums,
# ctor(s?), dtor, friend declarations, methods, member vars)
#
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Linted extensions are .cc, .cpp, and .h. Other file types will be ignored.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
# \ used for clearer layout -- pylint: disable-msg=C6013
_ERROR_CATEGORIES = [
'build/class',
'build/deprecated',
'build/endif_comment',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/nolint',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/rtti',
'runtime/sizeof',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/virtual',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/indent',
'whitespace/labels',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overrided by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = [ '-build/include_alpha' ]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a seperate i18n file.
# Headers that we consider STL headers.
_STL_HEADERS = frozenset([
'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'new',
'pair.h', 'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
'utility', 'vector', 'vector.h',
])
# Non-STL C++ system headers.
_CPP_HEADERS = frozenset([
'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream.h',
'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
'numeric', 'ostream.h', 'parsestream.h', 'pfstream.h', 'PlotFile.h',
'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h',
'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
_regexp_compile_cache = {}
# Finds occurrences of NOLINT or NOLINT(...).
_RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
# FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
m = _RE_SUPPRESSION.search(raw_line)
if m:
category = m.group(1)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(linenum)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(linenum)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"Resets the set of NOLINT suppressions to empty."
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
dict.__init__(self)
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparisson.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
header_path: Header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
canonical_header = self.CanonicalizeAlphabeticalOrder(header_path)
if self._last_header > canonical_header:
return False
self._last_header = canonical_header
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analizing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN? Try to find a git or hg top level directory by searching up
# from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""Returns true iff confidence >= verbose, category passes
filter and is not NOLINT-suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos]
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
for linenum in range(len(lines)):
self.lines.append(CleanseComments(lines[linenum]))
elided = self._CollapseStrings(lines[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[', finds the the
linenum/pos that correspond to the closing of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
startchar = line[pos]
if startchar not in '({[':
return (line, clean_lines.NumLines(), -1)
if startchar == '(': endchar = ')'
if startchar == '[': endchar = ']'
if startchar == '{': endchar = '}'
num_open = line.count(startchar) - line.count(endchar)
while linenum < clean_lines.NumLines() and num_open > 0:
linenum += 1
line = clean_lines.elided[linenum]
num_open += line.count(startchar) - line.count(endchar)
# OK, now find the endchar that actually got us back to even
endpos = len(line)
while num_open >= 0:
endpos = line.rfind(')', 0, endpos)
num_open -= 1 # chopped off another )
return (line, linenum, endpos + 1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
fileinfo = FileInfo(filename)
return re.sub(r'[-./\s]', '_', fileinfo.RepositoryName()).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForUnicodeReplacementCharacters(filename, lines, error):
"""Logs an error for each line containing Unicode replacement characters.
These indicate that either the file contained invalid UTF-8 (likely)
or Unicode replacement characters (which it shouldn't). Note that
it's possible for this to throw off line numbering if the invalid
UTF-8 occurred adjacent to a newline.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. They\'re '
'ugly and unnecessary, and you should use concatenation instead".')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('rand(', 'rand_r('),
('readdir(', 'readdir_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_function, multithread_safe_function in threading_list:
ix = line.find(single_thread_function)
# Comparisons made explicit for clarity -- pylint: disable-msg=C6403
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _ClassInfo(object):
"""Stores information about a class."""
def __init__(self, name, linenum):
self.name = name
self.linenum = linenum
self.seen_open_brace = False
self.is_derived = False
self.virtual_method_linenumber = None
self.has_virtual_destructor = False
self.brace_depth = 0
class _ClassState(object):
"""Holds the current state of the parse relating to class declarations.
It maintains a stack of _ClassInfos representing the parser's guess
as to the current nesting of class declarations. The innermost class
is at the top (back) of the stack. Typically, the stack will either
be empty or have exactly one entry.
"""
def __init__(self):
self.classinfo_stack = []
def CheckFinished(self, filename, error):
"""Checks that all classes have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
if self.classinfo_stack:
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
error(filename, self.classinfo_stack[0].linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
self.classinfo_stack[0].name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
class_state, error):
"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
- classes with virtual methods need virtual destructors (compiler warning
available, but not turned on yet.)
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(auto|register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Track class entry and exit, and attempt to find cases within the
# class declaration that don't meet the C++ style
# guidelines. Tracking is very dependent on the code matching Google
# style guidelines, but it seems to perform well enough in testing
# to be a worthwhile addition to the checks.
classinfo_stack = class_state.classinfo_stack
# Look for a class declaration
class_decl_match = Match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?(class|struct)\s+(\w+(::\w+)*)', line)
if class_decl_match:
classinfo_stack.append(_ClassInfo(class_decl_match.group(3), linenum))
# Everything else in this function uses the top of the stack if it's
# not empty.
if not classinfo_stack:
return
classinfo = classinfo_stack[-1]
# If the opening brace hasn't been seen look for it and also
# parent class declarations.
if not classinfo.seen_open_brace:
# If the line has a ';' in it, assume it's a forward declaration or
# a single-line class declaration, which we won't process.
if line.find(';') != -1:
classinfo_stack.pop()
return
classinfo.seen_open_brace = (line.find('{') != -1)
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', line):
classinfo.is_derived = True
if not classinfo.seen_open_brace:
return # Everything else in this function is for after open brace
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = Match(r'(?<!explicit)\s+%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args and
args.group(1) != 'void' and
not Match(r'(const\s+)?%s\s*&' % re.escape(base_classname),
args.group(1).strip())):
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
# Look for methods declared virtual.
if Search(r'\bvirtual\b', line):
classinfo.virtual_method_linenumber = linenum
# Only look for a destructor declaration on the same line. It would
# be extremely unlikely for the destructor declaration to occupy
# more than one line.
if Search(r'~%s\s*\(' % base_classname, line):
classinfo.has_virtual_destructor = True
# Look for class end.
brace_depth = classinfo.brace_depth
brace_depth = brace_depth + line.count('{') - line.count('}')
if brace_depth <= 0:
classinfo = classinfo_stack.pop()
# Try to detect missing virtual destructor declarations.
# For now, only warn if a non-derived class with virtual methods lacks
# a virtual destructor. This is to make it less likely that people will
# declare derived virtual destructors without declaring the base
# destructor virtual.
if ((classinfo.virtual_method_linenumber is not None) and
(not classinfo.has_virtual_destructor) and
(not classinfo.is_derived)): # Only warn for base classes
error(filename, classinfo.linenum, 'runtime/virtual', 4,
'The class %s probably needs a virtual destructor due to '
'having virtual method(s), one declared at line %d.'
% (classinfo.name, classinfo.virtual_method_linenumber))
else:
classinfo.brace_depth = brace_depth
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and commments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
raw = clean_lines.raw_lines
raw_line = raw[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(comment, filename, linenum, error):
"""Checks for common mistakes in TODO comments.
Args:
comment: The text of the comment from the line in question.
filename: The name of the current file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable-msg=C6403
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
def CheckSpacing(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't have too many
blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.raw_lines
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
if IsBlankLine(line):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if (prevbrace != -1 and prev_line[prevbrace:].find('}') == -1
and prev_line[:prevbrace].find('namespace') == -1):
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the paramters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Blank line at the start of a code block. Is this needed?')
# This doesn't ignore whitespace at the end of a namespace block
# because that is too hard without pairing open/close braces;
# however, a special exception is made for namespace closing
# brackets which have a comment containing "namespace".
#
# Also, ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('namespace') == -1
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Blank line at the end of a code block. Is this needed?')
# Next, we complain if there's a comment too near the text
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable-msg=C6403
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not Match(r'^\s*{ //', line) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# There should always be a space between the // and the comment
commentend = commentpos + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or are an empty C++ style Doxygen comment, like:
# ///
# or they begin with multiple slashes followed by a space:
# //////// Header comment
match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
Search(r'^/$', line[commentend:]) or
Search(r'^/+ ', line[commentend:]))
if not match:
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
# Alas, we can't test < or > because they're legitimately used sans spaces
# (a->b, vector<int> a). The only time we can tell is a < with no >, and
# only if it's not template params list spilling into the next line.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
if not match:
# Note that while it seems that the '<[^<]*' term in the following
# regexp could be simplified to '<.*', which would indeed match
# the same class of strings, the [^<] means that searching for the
# regexp takes linear rather than quadratic time.
if not Search(r'<[^<]*,\s*$', line): # template params spill
match = Search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# We allow no-spaces around << and >> when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
match = Search(r'[^0-9\s](<<|>>)[^0-9\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if not len(match.group(2)) in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
# You should always have a space after a comma (either as fn arg or operator)
if Search(r',[^\s]', line):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
# Except after an opening paren, you should have spaces before your braces.
# And since you should never have braces at the beginning of a line, this is
# an easy test.
if Search(r'[^ (]{', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use { } instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use { } instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use { } instead.')
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone
# is using braces in a block to explicitly create a new scope,
# which is commonly used to control the lifetime of
# stack-allocated variables. We don't detect this perfectly: we
# just don't complain if the last non-whitespace character on the
# previous non-blank line is ';', ':', '{', or '}'.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if not Search(r'[;:}{]\s*$', prevline):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
if endline[endpos:].find('{') == -1: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
else: # common case: else not followed by a multi-line if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Braces shouldn't be followed by a ; unless they're defining a struct
# or initializing an array.
# We can't tell in general, but we can for some common cases.
prevlinenum = linenum
while True:
(prevline, prevlinenum) = GetPreviousNonBlankLine(clean_lines, prevlinenum)
if Match(r'\s+{.*}\s*;', line) and not prevline.count(';'):
line = prevline + line
else:
break
if (Search(r'{.*}\s*;', line) and
line.count('{') == line.count('}') and
not Search(r'struct|class|enum|\s*=\s*{', line)):
error(filename, linenum, 'readability/braces', 4,
"You don't need a ; after a }")
def ReplaceableCheck(operator, macro, line):
"""Determine whether a basic CHECK can be replaced with a more specific one.
For example suggest using CHECK_EQ instead of CHECK(a == b) and
similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
Args:
operator: The C++ operator used in the CHECK.
macro: The CHECK or EXPECT macro being called.
line: The current source line.
Returns:
True if the CHECK can be replaced with a more specific one.
"""
# This matches decimal and hex integers, strings, and chars (in that order).
match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
# Expression to match two sides of the operator with something that
# looks like a literal, since CHECK(x == iterator) won't compile.
# This means we can't catch all the cases where a more specific
# CHECK is possible, but it's less annoying than dealing with
# extraneous warnings.
match_this = (r'\s*' + macro + r'\((\s*' +
match_constant + r'\s*' + operator + r'[^<>].*|'
r'.*[^<>]' + operator + r'\s*' + match_constant +
r'\s*\))')
# Don't complain about CHECK(x == NULL) or similar because
# CHECK_EQ(x, NULL) won't compile (requires a cast).
# Also, don't complain about more complex boolean expressions
# involving && or || such as CHECK(a == b || c == d).
return Match(match_this, line) and not Search(r'NULL|&&|\|\|', line)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
raw_lines = clean_lines.raw_lines
current_macro = ''
for macro in _CHECK_MACROS:
if raw_lines[linenum].find(macro) >= 0:
current_macro = macro
break
if not current_macro:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
line = clean_lines.elided[linenum] # get rid of comments and strings
# Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
for operator in ['==', '!=', '>=', '>', '<=', '<']:
if ReplaceableCheck(operator, current_macro, line):
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[current_macro][operator],
current_macro, operator))
break
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for c in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(c) in ('W', 'F'):
width += 2
elif not unicodedata.combining(c):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
error: The function to call with any errors found.
"""
raw_lines = clean_lines.raw_lines
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace() and line[-1] != '':
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Labels should always be indented at least one space.
elif not initial_spaces and line[:2] != '//' and Search(r'[^:]:\s*$',
line):
error(filename, linenum, 'whitespace/labels', 4,
'Labels should always be indented at least one space. '
'If this is a member-initializer list in a constructor or '
'the base class list in a class definition, the colon should '
'be on the following line.')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line)):
line_width = GetLineWidth(line)
if line_width > 100:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than 100 characters')
elif line_width > 80:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= 80 characters long')
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 4,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckSpacing(filename, clean_lines, linenum, error)
CheckCheck(filename, clean_lines, linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_stl_h = include in _STL_HEADERS
is_cpp_h = is_stl_h or include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
if not include_state.IsInAlphabeticalOrder(include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state,
error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Create an extended_line, which is the concatenation of the current and
# next lines, for more effective checking of code that may span more than one
# line.
if linenum + 1 < clean_lines.NumLines():
extended_line = line + clean_lines.elided[linenum + 1]
else:
extended_line = line
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# TODO(unknown): figure out if they're using default arguments in fn proto.
# Check for non-const references in functions. This is tricky because &
# is also used to take the address of something. We allow <> for templates,
# (ignoring whatever is between the braces) and : for classes.
# These are complicated re's. They try to capture the following:
# paren (for fn-prototype start), typename, &, varname. For the const
# version, we're willing for const to be before typename or after
# Don't check the implemention on same line.
fnline = line.split('{', 1)[0]
if (len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) >
len(re.findall(r'\([^()]*\bconst\s+(?:typename\s+)?(?:struct\s+)?'
r'(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) +
len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+\s+const(\s?&|&\s?)[\w]+',
fnline))):
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>".
if not Search(
r'(swap|Swap|operator[<>][<>])\s*\(\s*(?:[\w:]|<.*>)+\s*&',
fnline):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer.')
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
r'(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
if match:
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts.
if (match.group(1) is None and # If new operator, then this isn't a cast
not Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
match.group(2))
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)',
error)
# This doesn't catch all cases. Consider (const char * const)"hello".
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
if Search(
r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
if match and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
match.group(3)):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
# Check that we're not using RTTI outside of testing code.
if Search(r'\bdynamic_cast<', line) and not _IsTestFilename(filename):
error(filename, linenum, 'runtime/rtti', 5,
'Do not use dynamic_cast<>. If you need to cast within a class '
"hierarchy, use static_cast<> to upcast. Google doesn't support "
'RTTI.')
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
if Search(r'\bsscanf\b', line):
error(filename, linenum, 'runtime/printf', 1,
'sscanf can be ok, but is slow and can overflow buffers.')
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
match = re.search(r'\b((?:string)?printf)\s*\(([\w.\->()]+)\)', line, re.I)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (match.group(1), match.group(2)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token becasue we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
if not Search(r'^\s*};', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
This also handles sizeof(type) warnings, due to similarity of content.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast or static_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
"""
match = Search(pattern, line)
if not match:
return
# e.g., sizeof(int)
sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
if sizeof_match:
error(filename, linenum, 'runtime/sizeof', 1,
'Using sizeof(type). Use sizeof(varname) instead if possible')
return
remainder = line[match.end(0):]
# The close paren is for function pointers as arguments to a function.
# eg, void foo(void (*bar)(int));
# The semicolon check is a more basic function check; also possibly a
# function pointer typedef.
# eg, void foo(int); or void foo(int) const;
# The equals check is for function pointer assignment.
# eg, void *(*foo)(int) = ...
#
# Right now, this will only catch cases where there's a single argument, and
# it's unnamed. It should probably be expanded to check for multiple
# arguments with some unnamed.
function_match = Match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)))', remainder)
if function_match:
if (not function_match.group(3) or
function_match.group(3) == ';' or
raw_line.find('/*') < 0):
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_HEADERS_ACCEPTED_BUT_NOT_PROMOTED = {
# We can trust with reasonable confidence that map gives us pair<>, too.
'pair<>': ('map', 'multimap', 'hash_map', 'hash_multimap')
}
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was succesfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, linenum))
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
m = _RE_PATTERN_STRING.search(line)
if m:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:m.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and succesfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = os.path.abspath(filename)
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
for header in include_state.keys(): #NOLINT
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_state, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if template in _HEADERS_ACCEPTED_BUT_NOT_PROMOTED:
headers = _HEADERS_ACCEPTED_BUT_NOT_PROMOTED[template]
if [True for header in headers if header in include_state]:
continue
if required_header_unstripped.strip('<>"') not in include_state:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
def ProcessLine(filename, file_extension,
clean_lines, line, include_state, function_state,
class_state, error):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
error)
CheckForNonStandardConstructs(filename, clean_lines, line,
class_state, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
def ProcessFileData(filename, file_extension, lines, error):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is termined with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
class_state = _ClassState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, class_state, error)
class_state.CheckFinished(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForUnicodeReplacementCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessFile(filename, vlevel):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
"""
_SetVerboseLevel(vlevel)
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
# Remove trailing '\r'.
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if (filename != '-' and file_extension != 'cc' and file_extension != 'h'
and file_extension != 'cpp'):
sys.stderr.write('Ignoring %s; not a .cc or .h file\n' % filename)
else:
ProcessFileData(filename, file_extension, lines, Error)
if carriage_return_found and os.linesep != '\r\n':
# Use 0 for linenum since outputing only one error for potentially
# several lines.
Error(filename, 0, 'whitespace/newline', 1,
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
sys.stderr.write('Done processing %s\n' % filename)
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if not val in ('emacs', 'vs7'):
PrintUsage('The only allowed output formats are emacs and vs7.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| bsd-3-clause | -166,617,796,872,889,120 | 38.502079 | 86 | 0.64915 | false |
idaholab/raven | plugins/ExamplePlugin/src/SumOfExponential.py | 1 | 5349 | """
Author: A. Alfonsi
Date : 11/17/2017
"""
import numpy as np
import math
from PluginBaseClasses.ExternalModelPluginBase import ExternalModelPluginBase
class SumOfExponential(ExternalModelPluginBase):
# Example External Model plugin class
#################################
#### RAVEN API methods BEGIN ####
#################################
def _readMoreXML(self, container, xmlNode):
"""
Method to read the portion of the XML that belongs to this plugin
@ In, container, object, self-like object where all the variables can be stored
@ In, xmlNode, xml.etree.ElementTree.Element, XML node that needs to be read
@ Out, None
"""
container.coefficients = {}
container.startValue = None
container.endValue = None
container.numberPoints = 10
outputVarNode = xmlNode.find("outputVariable")
if outputVarNode is None:
raise IOError("ExamplePlugin: <outputVariable> XML block must be inputted!")
container.outputVariable = outputVarNode.text.strip()
monotonicVarNode = xmlNode.find("monotonicVariable")
if monotonicVarNode is None:
raise IOError("ExamplePlugin: <monotonicVariable> XML block must be inputted!")
container.monotonicVariableName = monotonicVarNode.text.strip()
for child in xmlNode:
if child.tag.strip() == "variables":
# get verbosity if it exists
container.variables = [var.strip() for var in child.text.split(",")]
if container.outputVariable not in container.variables:
raise IOError("ExamplePlugin: "+container.outputVariable+" variable MUST be present in the <variables> definition!")
if container.monotonicVariableName not in container.variables:
raise IOError("ExamplePlugin: "+container.monotonicVariableName+" variable MUST be present in the <variables> definition!")
if len(container.variables) < 2:
raise IOError("ExamplePlugin: at least 1 input and 1 output variable ("+container.outputVariable+") must be listed in the <variables> definition!!")
if child.tag.strip() == "coefficient":
if "varName" not in child.attrib:
raise IOError("ExamplePlugin: attribute varName must be present in <coefficient> XML node!")
container.coefficients[child.attrib['varName']] = float(child.text)
if child.tag.strip() == "startMonotonicVariableValue":
container.startValue = float(child.text)
if child.tag.strip() == "endMonotonicVariableValue":
container.endValue = float(child.text)
if child.tag.strip() == "numberCalculationPoints":
container.numberPoints = int(child.text)
if container.startValue is None:
raise IOError("ExamplePlugin: <startMonotonicVariableValue> XML has not been inputted!")
if container.endValue is None:
raise IOError("ExamplePlugin: <endMonotonicVariableValue> XML has not been inputted!")
container.variables.pop(container.variables.index("Xi"))
container.variables.pop(container.variables.index("monotonicVariable"))
def initialize(self, container,runInfoDict,inputFiles):
"""
Method to initialize this plugin
@ In, container, object, self-like object where all the variables can be stored
@ In, runInfoDict, dict, dictionary containing all the RunInfo parameters (XML node <RunInfo>)
@ In, inputFiles, list, list of input files (if any)
@ Out, None
"""
for var in container.variables:
if var not in container.coefficients:
container.coefficients[var] = 1.0
print("ExamplePlugin: not found coefficient for variable "+var+". Default value is 1.0!")
container.stepSize = (container.endValue - container.startValue)/float(container.numberPoints)
def run(self, container, Inputs):
"""
This is a simple example of the run method in a plugin.
This method takes the variables in input and computes
oneOutputOfThisPlugin(t) = var1Coefficient*exp(var1*t)+var2Coefficient*exp(var2*t) ...
@ In, container, object, self-like object where all the variables can be stored
@ In, Inputs, dict, dictionary of inputs from RAVEN
"""
Xi = np.zeros(container.numberPoints+1)
monotonicVariable = np.zeros(container.numberPoints+1)
monoVarVal = container.startValue
monotonicVariable[0] = container.startValue
varCoeff = np.asarray([container.coefficients[var] for var in container.variables])
varExponents = np.asarray([Inputs[var]*monoVarVal for var in container.variables])
Xi[0] = np.sum(varCoeff*np.exp(varExponents))
for step in range(container.numberPoints):
monoVarVal+=container.stepSize
monotonicVariable[step+1] = monoVarVal
varExponents = np.asarray([Inputs[var]*(monoVarVal-monotonicVariable[step]) for var in container.variables])
if np.max(varExponents) >= np.finfo(varExponents.dtype).maxexp:
print("ExamplePlugin: the exponents of the exponential cause overflow. Increase the number of <numberCalculationPoints>!")
Xi[step+1] = np.sum(varCoeff*np.exp(varExponents))
Xi[step+1]+=Xi[step]
container.__dict__[container.outputVariable] = Xi
container.__dict__[container.monotonicVariableName] = monotonicVariable
###############################
#### RAVEN API methods END ####
###############################
| apache-2.0 | -1,037,211,517,340,434,800 | 49.462264 | 158 | 0.689849 | false |
jtrain/django-cloud-media | cloud_media/forms.py | 1 | 7970 | """
A collection of forms for adding a new resource in the admin.
"""
from django import forms
from django.conf import settings
from django.contrib.admin.helpers import AdminForm
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from django.utils.importlib import import_module
from django.utils.encoding import force_unicode
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from cloud_media.models import Resource
from cloud_media.wizard import FormWizard
import cloud_media.settings as backup_settings
BACKENDS = getattr(
settings,
'CLOUD_MEDIA_HOSTING_BACKENDS',
backup_settings.CLOUD_MEDIA_HOSTING_BACKENDS)
HOSTING_PROVIDERS = getattr(
settings,
'CLOUD_MEDIA_HOSTING_PROVIDERS',
backup_settings.CLOUD_MEDIA_HOSTING_PROVIDERS)
#----------------------------------------------------------------------
# Mixins.
class AdminFormMixin(forms.Form):
"""
Provides some admin-form-like features to ease the pain of having non
modeladmin forms in the admin.
Idea inspired by the formadmin project.
"""
fieldsets = ()
prepopulated_fields = {}
readonly_fields = None
model_admin = None
def adminform(self):
if not self.fieldsets:
self.fieldsets = [
(None,
{'fields':
self.fields.keys()})
]
adminform = AdminForm(self, self.fieldsets, self.prepopulated_fields,
self.readonly_fields, self.model_admin)
return adminform
#--------------------------------------------------------------------------
# wizard.
class RemoteMediaWizard(FormWizard):
"""
User fills in generic title + description on page 1.
Page 2 is dynamic. The form shown depends on the remote host chosen
for the file. It could be a BlipForm or a YoutubeForm etc..
"""
_mixins = (AdminFormMixin,)
@property
def mixins(self):
return self._mixins
@property
def __name__(self):
return self.__class__.__name__
def get_template(self, step):
return 'cloud_media/forms/wizard.html'
def done(self, request, form_list):
"""
The first form should specify the title, description and resource_type.
The final form should provide the resource_id.
"""
data = {}
resource_id = None
for form in form_list:
try:
resource_id = form.get_resource_id(request, self.backend)
except AttributeError:
pass
data.update(form.cleaned_data)
if not resource_id:
raise forms.ValidationError("Backend failed to provide resource id")
data['resource_id'] = resource_id
# remove data that is extra to that required by Resource model.
required_fields = set(f.name for f in Resource._meta.fields)
provided_fields = set(data)
data_to_remove = provided_fields - required_fields
map(data.pop, data_to_remove)
resource = Resource.objects.create(**data)
# redirect or remove popup window.
return self._model_admin.response_add(request, resource)
def process_step(self, request, form, step):
"""
Dynamically set the final form_list depending on the set request_type.
"""
super(RemoteMediaWizard, self).process_step(request, form, step)
resource_type = form.cleaned_data.get('resource_type')
if not resource_type:
return
# user can override default backend form in settings.
try:
NextForm = settings.CLOUD_MEDIA_HOSTING_UPLOAD_FORM[resource_type]
except (AttributeError, KeyError):
# not overridden select form based on backend.
backendname = BACKENDS.get(resource_type, BACKENDS.get('default'))
self.backend = _load_backend(backendname)()
NextForm = self.backend.get_form()
self.form_list[1] = NextForm
def add_mixins(self, form, mixins):
"""
Add a new set of base classes to the form's class for dynamic
inheritance of Mixins.
"""
form.__class__.__bases__ = mixins
def render_template(self, request, form, previous_fields, step,
context=None):
"""
Renders the template for the given step, returning an HttpResponse
object.
Override this method if you want to add a custom context, return a
different MIME type, etc. If you only need to override the template
name, use get_template() instead.
The template will be rendered with the following context:
step_field -- The name of the hidden field containing the step.
step0 -- The current step (zero-based).
step -- The current step (one-based).
step_count -- The total number of steps.
form -- The Form instance for the current step (either empty
or with errors).
previous_fields -- A string representing every previous data field,
plus hashes for completed forms, all in the form of
hidden fields. Note that you'll need to run this
through the "safe" template filter, to prevent
auto-escaping, because it's raw HTML.
"""
context = context or {}
context.update(self.extra_context)
# allow dynamic mixins to be added to the form.
self.add_mixins(form, self.mixins)
return render_to_response(self.get_template(step), dict(context,
step_field=self.step_field_name,
step0=step,
step=step + 1,
step_count=self.num_steps(),
form=form,
is_popup='_popup' in request.REQUEST,
previous_fields=previous_fields
), context_instance=RequestContext(request))
def parse_params(self, request, admin=None, *args, **kwargs):
self._model_admin = admin
opts = admin.model._meta
self.extra_context.update({
'title': u'Add %s' % force_unicode(opts.verbose_name),
'current_app': admin.admin_site.name,
'has_change_permission': admin.has_change_permission(request),
'add': True,
'opts': opts,
'root_path': admin.admin_site.root_path,
'app_label': opts.app_label,
})
#--------------------------------------------------------------------------
# Forms.
class RemoteMediaBasicForm(forms.Form):
"""
A basic form to capture title, description and resource_type.
"""
title = forms.CharField(max_length=255)
description = forms.CharField(widget=forms.Textarea)
resource_type = forms.ChoiceField(
choices=HOSTING_PROVIDERS,
help_text=_("Where would you like to upload to?")
)
remote_media_wizard = RemoteMediaWizard([RemoteMediaBasicForm, 0])
#----------------------------------------------------------------------------
# Helpers.
_backends_cache = {}
def _load_backend(backend):
if not backend:
raise ImproperlyConfigured(
"%s isn't in your CLOUD_MEDIA_HOSTING_BACKENDS"
"and neither is 'default'" % resource_type)
if backend not in _backends_cache:
module_name, func_name = backend.rsplit('.', 1)
_backends_cache[backend] = getattr(import_module(module_name),
func_name)
return _backends_cache[backend]
| bsd-3-clause | 5,641,450,394,097,979,000 | 33.353448 | 80 | 0.572146 | false |
alfredodeza/execnet | testing/test_serializer.py | 1 | 7330 | # -*- coding: utf-8 -*-
import subprocess
import sys
import tempfile
import execnet
import py
import pytest
MINOR_VERSIONS = {"3": "543210", "2": "76"}
def _find_version(suffix=""):
name = "python" + suffix
executable = py.path.local.sysfind(name)
if executable is None:
if sys.platform == "win32" and suffix == "3":
for name in ("python31", "python30"):
executable = py.path.local(r"c:\\{}\python.exe".format(name))
if executable.check():
return executable
for tail in MINOR_VERSIONS.get(suffix, ""):
path = py.path.local.sysfind("{}.{}".format(name, tail))
if path:
return path
else:
py.test.skip("can't find a {!r} executable".format(name))
return executable
TEMPDIR = _py2_wrapper = _py3_wrapper = None
def setup_module(mod):
mod.TEMPDIR = py.path.local(tempfile.mkdtemp())
if sys.version_info > (3, 0):
mod._py3_wrapper = PythonWrapper(py.path.local(sys.executable))
mod._py2_wrapper = PythonWrapper(_find_version("2"))
else:
mod._py3_wrapper = PythonWrapper(_find_version("3"))
mod._py2_wrapper = PythonWrapper(py.path.local(sys.executable))
def teardown_module(mod):
TEMPDIR.remove(True)
# we use the execnet folder in order to avoid tiggering a missing apipkg
pyimportdir = str(py.path.local(execnet.__file__).dirpath())
class PythonWrapper(object):
def __init__(self, executable):
self.executable = executable
def dump(self, obj_rep):
script_file = TEMPDIR.join("dump.py")
script_file.write(
"""
import sys
sys.path.insert(0, %r)
import gateway_base as serializer
if sys.version_info > (3, 0): # Need binary output
sys.stdout = sys.stdout.detach()
sys.stdout.write(serializer.dumps_internal(%s))
"""
% (pyimportdir, obj_rep)
)
popen = subprocess.Popen(
[str(self.executable), str(script_file)],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
stdout, stderr = popen.communicate()
ret = popen.returncode
if ret:
raise py.process.cmdexec.Error(
ret, ret, str(self.executable), stdout, stderr
)
return stdout
def load(self, data, option_args="__class__"):
script_file = TEMPDIR.join("load.py")
script_file.write(
r"""
import sys
sys.path.insert(0, %r)
import gateway_base as serializer
if sys.version_info > (3, 0):
sys.stdin = sys.stdin.detach()
loader = serializer.Unserializer(sys.stdin)
loader.%s
obj = loader.load()
sys.stdout.write(type(obj).__name__ + "\n")
sys.stdout.write(repr(obj))"""
% (pyimportdir, option_args)
)
popen = subprocess.Popen(
[str(self.executable), str(script_file)],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
stdout, stderr = popen.communicate(data)
ret = popen.returncode
if ret:
raise py.process.cmdexec.Error(
ret, ret, str(self.executable), stdout, stderr
)
return [s.decode("ascii") for s in stdout.splitlines()]
def __repr__(self):
return "<PythonWrapper for {}>".format(self.executable)
@pytest.fixture
def py2(request):
return _py2_wrapper
@pytest.fixture
def py3(request):
return _py3_wrapper
@pytest.fixture(params=["py2", "py3"])
def dump(request):
return request.getfixturevalue(request.param).dump
@pytest.fixture(params=["py2", "py3"])
def load(request):
return request.getfixturevalue(request.param).load
simple_tests = [
# type: expected before/after repr
("int", "4"),
("float", "3.25"),
("complex", "(1.78+3.25j)"),
("list", "[1, 2, 3]"),
("tuple", "(1, 2, 3)"),
("dict", "{(1, 2, 3): 32}"),
]
@py.test.mark.parametrize(["tp_name", "repr"], simple_tests)
def test_simple(tp_name, repr, dump, load):
p = dump(repr)
tp, v = load(p)
assert tp == tp_name
assert v == repr
def test_set(py2, py3, dump):
p = dump("set((1, 2, 3))")
tp, v = py2.load(p)
assert tp == "set"
# assert v == "set([1, 2, 3])" # ordering prevents this assertion
assert v.startswith("set([") and v.endswith("])")
assert "1" in v and "2" in v and "3" in v
tp, v = py3.load(p)
assert tp == "set"
# assert v == "{1, 2, 3}" # ordering prevents this assertion
assert v.startswith("{") and v.endswith("}")
assert "1" in v and "2" in v and "3" in v
p = dump("set()")
tp, v = py2.load(p)
assert tp == "set"
assert v == "set([])"
tp, v = py3.load(p)
assert tp == "set"
assert v == "set()"
def test_frozenset(py2, py3, dump):
p = dump("frozenset((1, 2, 3))")
tp, v = py2.load(p)
assert tp == "frozenset"
assert v == "frozenset([1, 2, 3])"
tp, v = py3.load(p)
assert tp == "frozenset"
assert v == "frozenset({1, 2, 3})"
p = dump("frozenset()")
tp, v = py2.load(p)
assert tp == "frozenset"
assert v == "frozenset([])"
tp, v = py3.load(p)
assert tp == "frozenset"
assert v == "frozenset()"
def test_long(py2, py3):
really_big = "9223372036854775807324234"
p = py2.dump(really_big)
tp, v = py2.load(p)
assert tp == "long"
assert v == really_big + "L"
tp, v = py3.load(p)
assert tp == "int"
assert v == really_big
p = py3.dump(really_big)
tp, v == py3.load(p)
assert tp == "int"
assert v == really_big
tp, v = py2.load(p)
assert tp == "long"
assert v == really_big + "L"
def test_small_long(py2, py3):
p = py2.dump("123L")
tp, s = py2.load(p)
assert s == "123L"
tp, s = py3.load(p)
assert s == "123"
def test_bytes(py2, py3):
p = py3.dump("b'hi'")
tp, v = py2.load(p)
assert tp == "str"
assert v == "'hi'"
tp, v = py3.load(p)
assert tp == "bytes"
assert v == "b'hi'"
def test_str(py2, py3):
p = py2.dump("'xyz'")
tp, s = py2.load(p)
assert tp == "str"
assert s == "'xyz'"
tp, s = py3.load(p, "py2str_as_py3str=True")
assert tp == "str"
assert s == "'xyz'"
tp, s = py3.load(p, "py2str_as_py3str=False")
assert s == "b'xyz'"
assert tp == "bytes"
def test_unicode(py2, py3):
p = py2.dump("u'hi'")
tp, s = py2.load(p)
assert tp == "unicode"
assert s == "u'hi'"
tp, s = py3.load(p)
assert tp == "str"
assert s == "'hi'"
p = py3.dump("'hi'")
tp, s = py3.load(p)
assert tp == "str"
assert s == "'hi'"
tp, s = py2.load(p)
# depends on unserialization defaults
assert tp == "unicode"
assert s == "u'hi'"
def test_bool(py2, py3):
p = py2.dump("True")
tp, s = py2.load(p)
assert tp == "bool"
assert s == "True"
tp, s = py3.load(p)
assert s == "True"
assert tp == "bool"
p = py2.dump("False")
tp, s = py2.load(p)
assert s == "False"
def test_none(dump, load):
p = dump("None")
tp, s = load(p)
assert s == "None"
def test_tuple_nested_with_empty_in_between(py2):
p = py2.dump("(1, (), 3)")
tp, s = py2.load(p)
assert tp == "tuple"
assert s == "(1, (), 3)"
| mit | 9,194,328,453,528,448,000 | 24.629371 | 77 | 0.557981 | false |
twitter/pants | src/python/pants/task/task.py | 1 | 28963 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
from abc import abstractmethod
from builtins import filter, map, object, set, str, zip
from contextlib import contextmanager
from hashlib import sha1
from itertools import repeat
from future.utils import PY3
from pants.base.exceptions import TaskError
from pants.base.worker_pool import Work
from pants.build_graph.target_filter_subsystem import TargetFilter
from pants.cache.artifact_cache import UnreadableArtifact, call_insert, call_use_cached_files
from pants.cache.cache_setup import CacheSetup
from pants.invalidation.build_invalidator import (BuildInvalidator, CacheKeyGenerator,
UncacheableCacheKeyGenerator)
from pants.invalidation.cache_manager import InvalidationCacheManager, InvalidationCheck
from pants.option.optionable import Optionable
from pants.option.options_fingerprinter import OptionsFingerprinter
from pants.option.scope import ScopeInfo
from pants.reporting.reporting_utils import items_to_report_element
from pants.source.source_root import SourceRootConfig
from pants.subsystem.subsystem_client_mixin import SubsystemClientMixin
from pants.util.dirutil import safe_mkdir, safe_rm_oldest_items_in_dir
from pants.util.memo import memoized_method, memoized_property
from pants.util.meta import AbstractClass, classproperty
class TaskBase(SubsystemClientMixin, Optionable, AbstractClass):
"""Defines a lifecycle that prepares a task for execution and provides the base machinery
needed to execute it.
Provides the base lifecycle methods that allow a task to interact with the command line, other
tasks and the user. The lifecycle is linear and run via the following sequence:
1. register_options - declare options configurable via cmd-line flag or config file.
2. product_types - declare the product types your task is capable of producing.
3. alternate_target_roots - propose a different set of target roots to use than those specified
via the CLI for the active pants run.
4. prepare - request any products needed from other tasks.
5. __init__ - distill configuration into the information needed to execute.
Provides access to the current run context for scoping work.
Also provides the basic facilities for doing work efficiently including providing a work directory
for scratch space on disk, an invalidator for checking which targets need work done on, and an
artifact cache for re-using previously cached work.
#TODO(John Sirois): Lifecycle is currently split between TaskBase and Task and lifecycle
(interface) and helpers (utility) are currently conflated. Tease these apart and narrow the scope
of the helpers. Ideally console tasks don't inherit a workdir, invalidator or build cache for
example.
"""
options_scope_category = ScopeInfo.TASK
# We set this explicitly on the synthetic subclass, so that it shares a stable name with
# its superclass, which is not necessary for regular use, but can be convenient in tests.
_stable_name = None
@classmethod
def implementation_version(cls):
"""
:API: public
"""
return [('TaskBase', 2)]
@classmethod
@memoized_method
def implementation_version_str(cls):
return '.'.join(['_'.join(map(str, x)) for x in cls.implementation_version()])
@classmethod
@memoized_method
def implementation_version_slug(cls):
return sha1(cls.implementation_version_str().encode('utf-8')).hexdigest()[:12]
@classmethod
def stable_name(cls):
"""The stable name of this task type.
We synthesize subclasses of the task types at runtime, and these synthesized subclasses
may have random names (e.g., in tests), so this gives us a stable name to use across runs,
e.g., in artifact cache references.
"""
return cls._stable_name or cls._compute_stable_name()
@classmethod
def _compute_stable_name(cls):
return '{}_{}'.format(cls.__module__, cls.__name__).replace('.', '_')
@classmethod
def subsystem_dependencies(cls):
return (super(TaskBase, cls).subsystem_dependencies() +
(CacheSetup.scoped(cls), BuildInvalidator.Factory, SourceRootConfig) +
((TargetFilter.scoped(cls),) if cls.target_filtering_enabled else tuple()))
@classmethod
def product_types(cls):
"""The list of products this Task produces. Set the product type(s) for this
task i.e. the product type(s) this task creates e.g ['classes'].
By default, each task is considered as creating a unique product type(s).
Subclasses that create products, should override this to specify their unique product type(s).
:API: public
"""
return []
@classmethod
def supports_passthru_args(cls):
"""Subclasses may override to indicate that they can use passthru args.
:API: public
"""
return False
@classmethod
def _scoped_options(cls, options):
return options[cls.options_scope]
@classmethod
def get_alternate_target_roots(cls, options, address_mapper, build_graph):
# Subclasses should not generally need to override this method.
return cls.alternate_target_roots(cls._scoped_options(options), address_mapper, build_graph)
@classmethod
def alternate_target_roots(cls, options, address_mapper, build_graph):
"""Allows a Task to propose alternate target roots from those specified on the CLI.
At most 1 unique proposal is allowed amongst all tasks involved in the run. If more than 1
unique list of target roots is proposed an error is raised during task scheduling.
:API: public
:returns list: The new target roots to use or None to accept the CLI specified target roots.
"""
@classmethod
def invoke_prepare(cls, options, round_manager):
# Subclasses should not generally need to override this method.
return cls.prepare(cls._scoped_options(options), round_manager)
@classmethod
def prepare(cls, options, round_manager):
"""Prepares a task for execution.
Called before execution and prior to any tasks that may be (indirectly) depended upon.
Typically a task that requires products from other goals would register interest in those
products here and then retrieve the requested product mappings when executed.
:API: public
"""
def __init__(self, context, workdir):
"""Subclass __init__ methods, if defined, *must* follow this idiom:
class MyTask(Task):
def __init__(self, *args, **kwargs):
super(MyTask, self).__init__(*args, **kwargs)
...
This allows us to change Task.__init__()'s arguments without
changing every subclass. If the subclass does not need its own
initialization, this method can (and should) be omitted entirely.
:API: public
"""
super(TaskBase, self).__init__()
self.context = context
self._workdir = workdir
self._task_name = type(self).__name__
self._cache_key_errors = set()
self._cache_factory = CacheSetup.create_cache_factory_for_task(self)
self._force_invalidated = False
@memoized_property
def _build_invalidator(self):
return BuildInvalidator.Factory.create(build_task=self.fingerprint)
def get_options(self):
"""Returns the option values for this task's scope.
:API: public
"""
return self.context.options.for_scope(self.options_scope)
def get_passthru_args(self):
"""Returns the passthru args for this task, if it supports them.
:API: public
"""
if not self.supports_passthru_args():
raise TaskError('{0} Does not support passthru args.'.format(self.stable_name()))
else:
return self.context.options.passthru_args_for_scope(self.options_scope)
@property
def skip_execution(self):
"""Whether this task should be skipped.
Tasks can override to specify skipping behavior (e.g., based on an option).
:API: public
"""
return False
@property
def act_transitively(self):
"""Whether this task should act on the transitive closure of the target roots.
Tasks can override to specify transitivity behavior (e.g., based on an option).
Note that this property is consulted by get_targets(), but tasks that bypass that
method must make their own decision on whether to act transitively or not.
:API: public
"""
return True
@classproperty
def target_filtering_enabled(cls):
"""Whether this task should apply configured filters against targets.
Tasks can override to enable target filtering (e.g. based on tags) and must
access targets via get_targets()
:API: public
"""
return False
def get_targets(self, predicate=None):
"""Returns the candidate targets this task should act on.
This method is a convenience for processing optional transitivity. Tasks may bypass it
and make their own decisions on which targets to act on.
NOTE: This method was introduced in 2018, so at the time of writing few tasks consult it.
Instead, they query self.context.targets directly.
TODO: Fix up existing targets to consult this method, for uniformity.
Note that returned targets have not been checked for invalidation. The caller should do
so as needed, typically by calling self.invalidated().
:API: public
"""
initial_targets = (self.context.targets(predicate) if self.act_transitively
else list(filter(predicate, self.context.target_roots)))
if not self.target_filtering_enabled:
return initial_targets
else:
return self._filter_targets(initial_targets)
def _filter_targets(self, targets):
included_targets = TargetFilter.scoped_instance(self).apply(targets)
excluded_targets = set(targets).difference(included_targets)
if excluded_targets:
self.context.log.info("{} target(s) excluded".format(len(excluded_targets)))
for target in excluded_targets:
self.context.log.debug("{} excluded".format(target.address.spec))
return included_targets
@memoized_property
def workdir(self):
"""A scratch-space for this task that will be deleted by `clean-all`.
It's guaranteed that no other task has been given this workdir path to use and that the workdir
exists.
:API: public
"""
safe_mkdir(self._workdir)
return self._workdir
@memoized_property
def versioned_workdir(self):
"""The Task.workdir suffixed with a fingerprint of the Task implementation version.
When choosing whether to store values directly in `self.workdir` or below it in
the directory returned by this property, you should generally prefer this value.
:API: public
"""
versioned_workdir = os.path.join(self.workdir, self.implementation_version_slug())
safe_mkdir(versioned_workdir)
return versioned_workdir
def _options_fingerprint(self, scope):
options_hasher = sha1()
options_hasher.update(scope.encode('utf-8'))
options_fp = OptionsFingerprinter.combined_options_fingerprint_for_scope(
scope,
self.context.options,
build_graph=self.context.build_graph,
include_passthru=self.supports_passthru_args(),
)
options_hasher.update(options_fp.encode('utf-8'))
return options_hasher.hexdigest() if PY3 else options_hasher.hexdigest().decode('utf-8')
@memoized_property
def fingerprint(self):
"""Returns a fingerprint for the identity of the task.
A task fingerprint is composed of the options the task is currently running under.
Useful for invalidating unchanging targets being executed beneath changing task
options that affect outputted artifacts.
A task's fingerprint is only valid after the task has been fully initialized.
"""
hasher = sha1()
hasher.update(self.stable_name().encode('utf-8'))
hasher.update(self._options_fingerprint(self.options_scope).encode('utf-8'))
hasher.update(self.implementation_version_str().encode('utf-8'))
for dep in self.subsystem_closure_iter():
hasher.update(self._options_fingerprint(dep.options_scope).encode('utf-8'))
return hasher.hexdigest() if PY3 else hasher.hexdigest().decode('utf-8')
def artifact_cache_reads_enabled(self):
return self._cache_factory.read_cache_available()
def artifact_cache_writes_enabled(self):
return self._cache_factory.write_cache_available()
def invalidate(self):
"""Invalidates all targets for this task."""
self._build_invalidator.force_invalidate_all()
@property
def create_target_dirs(self):
"""Whether to create a results_dir per VersionedTarget in the workdir of the Task.
This defaults to the value of `self.cache_target_dirs` (as caching them requires
creating them), but may be overridden independently to create the dirs without caching
them.
:API: public
"""
return self.cache_target_dirs
@property
def cache_target_dirs(self):
"""Whether to cache files in VersionedTarget's results_dir after exiting an invalidated block.
Subclasses may override this method to return True if they wish to use this style
of "automated" caching, where each VersionedTarget is given an associated results directory,
which will automatically be uploaded to the cache. Tasks should place the output files
for each VersionedTarget in said results directory. It is highly suggested to follow this
schema for caching, rather than manually making updates to the artifact cache.
:API: public
"""
return False
@property
def incremental(self):
"""Whether this Task implements incremental building of individual targets.
Incremental tasks with `cache_target_dirs` set will have the results_dir of the previous build
for a target cloned into the results_dir for the current build (where possible). This
copy-on-write behaviour allows for immutability of the results_dir once a target has been
marked valid.
:API: public
"""
return False
@property
def cache_incremental(self):
"""For incremental tasks, indicates whether the results of incremental builds should be cached.
Deterministic per-target incremental compilation is a relatively difficult thing to implement,
so this property provides an escape hatch to avoid caching things in that riskier case.
:API: public
"""
return False
@contextmanager
def invalidated(self,
targets,
invalidate_dependents=False,
silent=False,
fingerprint_strategy=None,
topological_order=False):
"""Checks targets for invalidation, first checking the artifact cache.
Subclasses call this to figure out what to work on.
:API: public
:param targets: The targets to check for changes.
:param invalidate_dependents: If True then any targets depending on changed targets are
invalidated.
:param silent: If true, suppress logging information about target invalidation.
:param fingerprint_strategy: A FingerprintStrategy instance, which can do per task,
finer grained fingerprinting of a given Target.
:param topological_order: Whether to invalidate in dependency order.
If no exceptions are thrown by work in the block, the build cache is updated for the targets.
Note: the artifact cache is not updated. That must be done manually.
:returns: Yields an InvalidationCheck object reflecting the targets.
:rtype: InvalidationCheck
"""
invalidation_check = self._do_invalidation_check(fingerprint_strategy,
invalidate_dependents,
targets,
topological_order)
self._maybe_create_results_dirs(invalidation_check.all_vts)
if invalidation_check.invalid_vts and self.artifact_cache_reads_enabled():
with self.context.new_workunit('cache'):
cached_vts, uncached_vts, uncached_causes = \
self.check_artifact_cache(self.check_artifact_cache_for(invalidation_check))
if cached_vts:
cached_targets = [vt.target for vt in cached_vts]
self.context.run_tracker.artifact_cache_stats.add_hits(self._task_name, cached_targets)
if not silent:
self._report_targets('Using cached artifacts for ', cached_targets, '.')
if uncached_vts:
uncached_targets = [vt.target for vt in uncached_vts]
self.context.run_tracker.artifact_cache_stats.add_misses(self._task_name,
uncached_targets,
uncached_causes)
if not silent:
self._report_targets('No cached artifacts for ', uncached_targets, '.')
# Now that we've checked the cache, re-partition whatever is still invalid.
invalidation_check = InvalidationCheck(invalidation_check.all_vts, uncached_vts)
if not silent:
targets = []
for vt in invalidation_check.invalid_vts:
targets.extend(vt.targets)
if len(targets):
target_address_references = [t.address.reference() for t in targets]
msg_elements = [
'Invalidated ',
items_to_report_element(target_address_references, 'target'),
'.',
]
self.context.log.info(*msg_elements)
self._update_invalidation_report(invalidation_check, 'pre-check')
# Cache has been checked to create the full list of invalid VTs.
# Only copy previous_results for this subset of VTs.
if self.incremental:
for vts in invalidation_check.invalid_vts:
vts.copy_previous_results()
# This may seem odd: why would we need to invalidate a VersionedTargetSet that is already
# invalid? But the name force_invalidate() is slightly misleading in this context - what it
# actually does is delete the key file created at the end of the last successful task run.
# This is necessary to avoid the following scenario:
#
# 1) In state A: Task suceeds and writes some output. Key is recorded by the invalidator.
# 2) In state B: Task fails, but writes some output. Key is not recorded.
# 3) After reverting back to state A: The current key is the same as the one recorded at the
# end of step 1), so it looks like no work needs to be done, but actually the task
# must re-run, to overwrite the output written in step 2.
#
# Deleting the file ensures that if a task fails, there is no key for which we might think
# we're in a valid state.
for vts in invalidation_check.invalid_vts:
vts.force_invalidate()
# Yield the result, and then mark the targets as up to date.
yield invalidation_check
self._update_invalidation_report(invalidation_check, 'post-check')
for vt in invalidation_check.invalid_vts:
vt.update()
# Background work to clean up previous builds.
if self.context.options.for_global_scope().workdir_max_build_entries is not None:
self._launch_background_workdir_cleanup(invalidation_check.all_vts)
def _update_invalidation_report(self, invalidation_check, phase):
invalidation_report = self.context.invalidation_report
if invalidation_report:
for vts in invalidation_check.all_vts:
invalidation_report.add_vts(self._task_name, vts.targets, vts.cache_key, vts.valid,
phase=phase)
def _do_invalidation_check(self,
fingerprint_strategy,
invalidate_dependents,
targets,
topological_order):
if self._cache_factory.ignore:
cache_key_generator = UncacheableCacheKeyGenerator()
else:
cache_key_generator = CacheKeyGenerator(
self.context.options.for_global_scope().cache_key_gen_version,
self.fingerprint)
cache_manager = InvalidationCacheManager(self.workdir,
cache_key_generator,
self._build_invalidator,
invalidate_dependents,
fingerprint_strategy=fingerprint_strategy,
invalidation_report=self.context.invalidation_report,
task_name=self._task_name,
task_version_slug=self.implementation_version_slug(),
artifact_write_callback=self.maybe_write_artifact)
# If this Task's execution has been forced, invalidate all our target fingerprints.
if self._cache_factory.ignore and not self._force_invalidated:
self.invalidate()
self._force_invalidated = True
return cache_manager.check(targets, topological_order=topological_order)
def maybe_write_artifact(self, vt):
if self._should_cache_target_dir(vt):
self.update_artifact_cache([(vt, [vt.current_results_dir])])
def _launch_background_workdir_cleanup(self, vts):
workdir_build_cleanup_job = Work(self._cleanup_workdir_stale_builds,
[(vts,)],
'workdir_build_cleanup')
self.context.submit_background_work_chain([workdir_build_cleanup_job])
def _cleanup_workdir_stale_builds(self, vts):
# workdir_max_build_entries has been assured of not None before invoking this method.
workdir_max_build_entries = self.context.options.for_global_scope().workdir_max_build_entries
max_entries_per_target = max(2, workdir_max_build_entries)
for vt in vts:
live_dirs = list(vt.live_dirs())
if not live_dirs:
continue
root_dir = os.path.dirname(vt.results_dir)
safe_rm_oldest_items_in_dir(root_dir, max_entries_per_target, excludes=live_dirs)
def _should_cache_target_dir(self, vt):
"""Return true if the given vt should be written to a cache (if configured)."""
return (
self.cache_target_dirs and
vt.cacheable and
(not vt.is_incremental or self.cache_incremental) and
self.artifact_cache_writes_enabled()
)
def _maybe_create_results_dirs(self, vts):
"""If `cache_target_dirs`, create results_dirs for the given versioned targets."""
if self.create_target_dirs:
for vt in vts:
vt.create_results_dir()
def check_artifact_cache_for(self, invalidation_check):
"""Decides which VTS to check the artifact cache for.
By default we check for each invalid target. Can be overridden, e.g., to
instead check only for a single artifact for the entire target set.
"""
return invalidation_check.invalid_vts
def check_artifact_cache(self, vts):
"""Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a tuple (cached, uncached, uncached_causes) of VersionedTargets that were
satisfied/unsatisfied from the cache. Uncached VTS are also attached with their
causes for the miss: `False` indicates a legit miss while `UnreadableArtifact`
is due to either local or remote cache failures.
"""
return self.do_check_artifact_cache(vts)
def do_check_artifact_cache(self, vts, post_process_cached_vts=None):
"""Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a tuple (cached, uncached, uncached_causes) of VersionedTargets that were
satisfied/unsatisfied from the cache.
"""
if not vts:
return [], [], []
read_cache = self._cache_factory.get_read_cache()
items = [(read_cache, vt.cache_key, vt.current_results_dir if self.cache_target_dirs else None)
for vt in vts]
res = self.context.subproc_map(call_use_cached_files, items)
cached_vts = []
uncached_vts = []
uncached_causes = []
# Note that while the input vts may represent multiple targets (for tasks that overrride
# check_artifact_cache_for), the ones we return must represent single targets.
# Once flattened, cached/uncached vts are in separate lists. Each uncached vts is paired
# with why it is missed for stat reporting purpose.
for vt, was_in_cache in zip(vts, res):
if was_in_cache:
cached_vts.extend(vt.versioned_targets)
else:
uncached_vts.extend(vt.versioned_targets)
uncached_causes.extend(repeat(was_in_cache, len(vt.versioned_targets)))
if isinstance(was_in_cache, UnreadableArtifact):
self._cache_key_errors.update(was_in_cache.key)
if post_process_cached_vts:
post_process_cached_vts(cached_vts)
for vt in cached_vts:
vt.update()
return cached_vts, uncached_vts, uncached_causes
def update_artifact_cache(self, vts_artifactfiles_pairs):
"""Write to the artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of absolute paths to artifacts for the VersionedTargetSet.
"""
update_artifact_cache_work = self._get_update_artifact_cache_work(vts_artifactfiles_pairs)
if update_artifact_cache_work:
self.context.submit_background_work_chain([update_artifact_cache_work],
parent_workunit_name='cache')
def _get_update_artifact_cache_work(self, vts_artifactfiles_pairs):
"""Create a Work instance to update an artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of paths to artifacts for the VersionedTargetSet.
"""
cache = self._cache_factory.get_write_cache()
if cache:
if len(vts_artifactfiles_pairs) == 0:
return None
# Do some reporting.
targets = set()
for vts, _ in vts_artifactfiles_pairs:
targets.update(vts.targets)
self._report_targets(
'Caching artifacts for ',
list(targets),
'.',
logger=self.context.log.debug,
)
always_overwrite = self._cache_factory.overwrite()
# Cache the artifacts.
args_tuples = []
for vts, artifactfiles in vts_artifactfiles_pairs:
overwrite = always_overwrite or vts.cache_key in self._cache_key_errors
args_tuples.append((cache, vts.cache_key, artifactfiles, overwrite))
return Work(lambda x: self.context.subproc_map(call_insert, x), [(args_tuples,)], 'insert')
else:
return None
def _report_targets(self, prefix, targets, suffix, logger=None):
target_address_references = [t.address.reference() for t in targets]
msg_elements = [
prefix,
items_to_report_element(target_address_references, 'target'),
suffix,
]
logger = logger or self.context.log.info
logger(*msg_elements)
def require_single_root_target(self):
"""If a single target was specified on the cmd line, returns that target.
Otherwise throws TaskError.
:API: public
"""
target_roots = self.context.target_roots
if len(target_roots) == 0:
raise TaskError('No target specified.')
elif len(target_roots) > 1:
raise TaskError('Multiple targets specified: {}'
.format(', '.join([repr(t) for t in target_roots])))
return target_roots[0]
def determine_target_roots(self, goal_name):
"""Helper for tasks that scan for default target roots.
:param string goal_name: The goal name to use for any warning emissions.
"""
if not self.context.target_roots:
print('WARNING: No targets were matched in goal `{}`.'.format(goal_name), file=sys.stderr)
# For the v2 path, e.g. `./pants list` is a functional no-op. This matches the v2 mode behavior
# of e.g. `./pants --changed-parent=HEAD list` (w/ no changes) returning an empty result.
return self.context.target_roots
class Task(TaskBase):
"""An executable task.
Tasks form the atoms of work done by pants and when executed generally produce artifacts as a
side effect whether these be files on disk (for example compilation outputs) or characters output
to the terminal (for example dependency graph metadata).
:API: public
"""
def __init__(self, context, workdir):
"""
Add pass-thru Task Constructor for public API visibility.
:API: public
"""
super(Task, self).__init__(context, workdir)
@abstractmethod
def execute(self):
"""Executes this task.
:API: public
"""
class QuietTaskMixin(object):
"""A mixin to signal that pants shouldn't print verbose progress information for this task."""
pass
| apache-2.0 | 8,560,133,421,249,656,000 | 38.621067 | 100 | 0.686497 | false |
coderbone/SickRage-alt | setup.py | 1 | 2381 | # -*- coding: utf-8 -*
"""
Use setup tools to install sickchill
"""
import os
from setuptools import find_packages, setup
from requirements.sort import file_to_dict
try:
from babel.messages import frontend as babel
except ImportError:
babel = None
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__)))
with open(os.path.join(ROOT, 'readme.md'), 'r') as r:
long_description = r.read()
def get_requirements(rel_file_path):
file_path = os.path.join(ROOT, rel_file_path)
data = file_to_dict(file_path)
if data is False:
print('get_requirements failed')
return []
return [pkg['install'] for pkg in data
if pkg['active'] and pkg['install']]
requirements = get_requirements('requirements/requirements.txt')
commands = {}
if babel:
commands.update({
'compile_catalog': babel.compile_catalog,
'extract_messages': babel.extract_messages,
'init_catalog': babel.init_catalog,
'update_catalog': babel.update_catalog
})
setup(
name="sickchill",
version="0.0.1",
description="Automatic Video Library Manager for TV Shows",
long_description=long_description,
url='https://sickchill.github.io',
download_url='https://github.com/SickChill/SickChill.git',
author='miigotu',
author_email='[email protected]',
license='GPLv2',
packages=find_packages(),
# install_requires=requirements, # Commented-out for now
install_requires=[
'pytz',
'requests',
'mako',
'configobj'
],
test_suite="tests",
tests_require=[
'coveralls',
'nose',
'rednose',
'mock',
'vcrpy-unittest',
'babel',
'flake8-coding',
'isort'
],
python_requires='>=2.7, <3',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Multimedia :: Video',
],
cmdclass=commands,
message_extractors={
'gui': [
('**/views/**.mako', 'mako', {'input_encoding': 'utf-8'}),
('**/js/*.min.js', 'ignore', None),
('**/js/*.js', 'javascript', {'input_encoding': 'utf-8'})
],
'sickchill': [('**.py', 'python', None)],
'sickbeard': [('**.py', 'python', None)],
},
)
| gpl-3.0 | 890,928,035,765,774,600 | 24.329787 | 70 | 0.582948 | false |
alphagov/notifications-api | tests/app/v2/notifications/test_post_notifications.py | 1 | 48523 | import uuid
from unittest import mock
from unittest.mock import call
import pytest
from boto.exception import SQSError
from flask import current_app, json
from app.dao import templates_dao
from app.dao.service_sms_sender_dao import dao_update_service_sms_sender
from app.models import (
EMAIL_TYPE,
INTERNATIONAL_SMS_TYPE,
NOTIFICATION_CREATED,
SMS_TYPE,
Notification,
)
from app.schema_validation import validate
from app.v2.errors import RateLimitError
from app.v2.notifications.notification_schemas import (
post_email_response,
post_sms_response,
)
from tests import create_authorization_header
from tests.app.db import (
create_api_key,
create_reply_to_email,
create_service,
create_service_sms_sender,
create_service_with_inbound_number,
create_template,
)
from tests.conftest import set_config_values
@pytest.mark.parametrize("reference", [None, "reference_from_client"])
def test_post_sms_notification_returns_201(client, sample_template_with_placeholders, mocker, reference):
mocked = mocker.patch('app.celery.provider_tasks.deliver_sms.apply_async')
data = {
'phone_number': '+447700900855',
'template_id': str(sample_template_with_placeholders.id),
'personalisation': {' Name': 'Jo'}
}
if reference:
data.update({"reference": reference})
auth_header = create_authorization_header(service_id=sample_template_with_placeholders.service_id)
response = client.post(
path='/v2/notifications/sms',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201
resp_json = json.loads(response.get_data(as_text=True))
assert validate(resp_json, post_sms_response) == resp_json
notifications = Notification.query.all()
assert len(notifications) == 1
assert notifications[0].status == NOTIFICATION_CREATED
notification_id = notifications[0].id
assert notifications[0].postage is None
assert notifications[0].document_download_count is None
assert resp_json['id'] == str(notification_id)
assert resp_json['reference'] == reference
assert resp_json['content']['body'] == sample_template_with_placeholders.content.replace("(( Name))", "Jo")
assert resp_json['content']['from_number'] == current_app.config['FROM_NUMBER']
assert 'v2/notifications/{}'.format(notification_id) in resp_json['uri']
assert resp_json['template']['id'] == str(sample_template_with_placeholders.id)
assert resp_json['template']['version'] == sample_template_with_placeholders.version
assert 'services/{}/templates/{}'.format(sample_template_with_placeholders.service_id,
sample_template_with_placeholders.id) \
in resp_json['template']['uri']
assert not resp_json["scheduled_for"]
assert mocked.called
def test_post_sms_notification_uses_inbound_number_as_sender(client, notify_db_session, mocker):
service = create_service_with_inbound_number(inbound_number='1')
template = create_template(service=service, content="Hello (( Name))\nYour thing is due soon")
mocked = mocker.patch('app.celery.provider_tasks.deliver_sms.apply_async')
data = {
'phone_number': '+447700900855',
'template_id': str(template.id),
'personalisation': {' Name': 'Jo'}
}
auth_header = create_authorization_header(service_id=service.id)
response = client.post(
path='/v2/notifications/sms',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201
resp_json = json.loads(response.get_data(as_text=True))
assert validate(resp_json, post_sms_response) == resp_json
notifications = Notification.query.all()
assert len(notifications) == 1
notification_id = notifications[0].id
assert resp_json['id'] == str(notification_id)
assert resp_json['content']['from_number'] == '1'
assert notifications[0].reply_to_text == '1'
mocked.assert_called_once_with([str(notification_id)], queue='send-sms-tasks')
def test_post_sms_notification_uses_inbound_number_reply_to_as_sender(client, notify_db_session, mocker):
service = create_service_with_inbound_number(inbound_number='07123123123')
template = create_template(service=service, content="Hello (( Name))\nYour thing is due soon")
mocked = mocker.patch('app.celery.provider_tasks.deliver_sms.apply_async')
data = {
'phone_number': '+447700900855',
'template_id': str(template.id),
'personalisation': {' Name': 'Jo'}
}
auth_header = create_authorization_header(service_id=service.id)
response = client.post(
path='/v2/notifications/sms',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201
resp_json = json.loads(response.get_data(as_text=True))
assert validate(resp_json, post_sms_response) == resp_json
notifications = Notification.query.all()
assert len(notifications) == 1
notification_id = notifications[0].id
assert resp_json['id'] == str(notification_id)
assert resp_json['content']['from_number'] == '447123123123'
assert notifications[0].reply_to_text == '447123123123'
mocked.assert_called_once_with([str(notification_id)], queue='send-sms-tasks')
def test_post_sms_notification_returns_201_with_sms_sender_id(
client, sample_template_with_placeholders, mocker
):
sms_sender = create_service_sms_sender(service=sample_template_with_placeholders.service, sms_sender='123456')
mocked = mocker.patch('app.celery.provider_tasks.deliver_sms.apply_async')
data = {
'phone_number': '+447700900855',
'template_id': str(sample_template_with_placeholders.id),
'personalisation': {' Name': 'Jo'},
'sms_sender_id': str(sms_sender.id)
}
auth_header = create_authorization_header(service_id=sample_template_with_placeholders.service_id)
response = client.post(
path='/v2/notifications/sms',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201
resp_json = json.loads(response.get_data(as_text=True))
assert validate(resp_json, post_sms_response) == resp_json
assert resp_json['content']['from_number'] == sms_sender.sms_sender
notifications = Notification.query.all()
assert len(notifications) == 1
assert notifications[0].reply_to_text == sms_sender.sms_sender
mocked.assert_called_once_with([resp_json['id']], queue='send-sms-tasks')
def test_post_sms_notification_uses_sms_sender_id_reply_to(
client, sample_template_with_placeholders, mocker
):
sms_sender = create_service_sms_sender(service=sample_template_with_placeholders.service, sms_sender='07123123123')
mocked = mocker.patch('app.celery.provider_tasks.deliver_sms.apply_async')
data = {
'phone_number': '+447700900855',
'template_id': str(sample_template_with_placeholders.id),
'personalisation': {' Name': 'Jo'},
'sms_sender_id': str(sms_sender.id)
}
auth_header = create_authorization_header(service_id=sample_template_with_placeholders.service_id)
response = client.post(
path='/v2/notifications/sms',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201
resp_json = json.loads(response.get_data(as_text=True))
assert validate(resp_json, post_sms_response) == resp_json
assert resp_json['content']['from_number'] == '447123123123'
notifications = Notification.query.all()
assert len(notifications) == 1
assert notifications[0].reply_to_text == '447123123123'
mocked.assert_called_once_with([resp_json['id']], queue='send-sms-tasks')
def test_notification_reply_to_text_is_original_value_if_sender_is_changed_after_post_notification(
client, sample_template, mocker
):
sms_sender = create_service_sms_sender(service=sample_template.service, sms_sender='123456', is_default=False)
mocker.patch('app.celery.provider_tasks.deliver_sms.apply_async')
data = {
'phone_number': '+447700900855',
'template_id': str(sample_template.id),
'sms_sender_id': str(sms_sender.id)
}
auth_header = create_authorization_header(service_id=sample_template.service_id)
response = client.post(
path='/v2/notifications/sms',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
dao_update_service_sms_sender(service_id=sample_template.service_id,
service_sms_sender_id=sms_sender.id,
is_default=sms_sender.is_default,
sms_sender='updated')
assert response.status_code == 201
notifications = Notification.query.all()
assert len(notifications) == 1
assert notifications[0].reply_to_text == '123456'
def test_should_cache_template_lookups_in_memory(mocker, client, sample_template):
mock_get_template = mocker.patch(
'app.dao.templates_dao.dao_get_template_by_id_and_service_id',
wraps=templates_dao.dao_get_template_by_id_and_service_id,
)
mocker.patch('app.celery.provider_tasks.deliver_sms.apply_async')
data = {
'phone_number': '+447700900855',
'template_id': str(sample_template.id),
}
for _ in range(5):
auth_header = create_authorization_header(service_id=sample_template.service_id)
client.post(
path='/v2/notifications/sms',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header]
)
assert mock_get_template.call_count == 1
assert mock_get_template.call_args_list == [
call(service_id=str(sample_template.service_id), template_id=str(sample_template.id), version=None)
]
assert Notification.query.count() == 5
def test_should_cache_template_and_service_in_redis(mocker, client, sample_template):
from app.schemas import service_schema, template_schema
mock_redis_get = mocker.patch(
'app.redis_store.get',
return_value=None,
)
mock_redis_set = mocker.patch(
'app.redis_store.set',
)
mocker.patch('app.celery.provider_tasks.deliver_sms.apply_async')
data = {
'phone_number': '+447700900855',
'template_id': str(sample_template.id),
}
auth_header = create_authorization_header(service_id=sample_template.service_id)
client.post(
path='/v2/notifications/sms',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header]
)
expected_service_key = f'service-{sample_template.service_id}'
expected_templates_key = f'service-{sample_template.service_id}-template-{sample_template.id}-version-None'
assert mock_redis_get.call_args_list == [
call(expected_service_key),
call(expected_templates_key),
]
service_dict = service_schema.dump(sample_template.service).data
template_dict = template_schema.dump(sample_template).data
assert len(mock_redis_set.call_args_list) == 2
service_call, templates_call = mock_redis_set.call_args_list
assert service_call[0][0] == expected_service_key
assert json.loads(service_call[0][1]) == {'data': service_dict}
assert service_call[1]['ex'] == 604_800
assert templates_call[0][0] == expected_templates_key
assert json.loads(templates_call[0][1]) == {'data': template_dict}
assert templates_call[1]['ex'] == 604_800
def test_should_return_template_if_found_in_redis(mocker, client, sample_template):
from app.schemas import service_schema, template_schema
service_dict = service_schema.dump(sample_template.service).data
template_dict = template_schema.dump(sample_template).data
mocker.patch(
'app.redis_store.get',
side_effect=[
json.dumps({'data': service_dict}).encode('utf-8'),
json.dumps({'data': template_dict}).encode('utf-8'),
],
)
mock_get_template = mocker.patch(
'app.dao.templates_dao.dao_get_template_by_id_and_service_id'
)
mock_get_service = mocker.patch(
'app.dao.services_dao.dao_fetch_service_by_id'
)
mocker.patch('app.celery.provider_tasks.deliver_sms.apply_async')
data = {
'phone_number': '+447700900855',
'template_id': str(sample_template.id),
}
auth_header = create_authorization_header(service_id=sample_template.service_id)
response = client.post(
path='/v2/notifications/sms',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header]
)
assert response.status_code == 201
assert mock_get_template.called is False
assert mock_get_service.called is False
@pytest.mark.parametrize("notification_type, key_send_to, send_to",
[("sms", "phone_number", "+447700900855"),
("email", "email_address", "[email protected]")])
def test_post_notification_returns_400_and_missing_template(client, sample_service,
notification_type, key_send_to, send_to):
data = {
key_send_to: send_to,
'template_id': str(uuid.uuid4())
}
auth_header = create_authorization_header(service_id=sample_service.id)
response = client.post(
path='/v2/notifications/{}'.format(notification_type),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400
assert response.headers['Content-type'] == 'application/json'
error_json = json.loads(response.get_data(as_text=True))
assert error_json['status_code'] == 400
assert error_json['errors'] == [{"error": "BadRequestError",
"message": 'Template not found'}]
@pytest.mark.parametrize("notification_type, key_send_to, send_to", [
("sms", "phone_number", "+447700900855"),
("email", "email_address", "[email protected]"),
("letter", "personalisation", {"address_line_1": "The queen", "postcode": "SW1 1AA"})
])
def test_post_notification_returns_401_and_well_formed_auth_error(client, sample_template,
notification_type, key_send_to, send_to):
data = {
key_send_to: send_to,
'template_id': str(sample_template.id)
}
response = client.post(
path='/v2/notifications/{}'.format(notification_type),
data=json.dumps(data),
headers=[('Content-Type', 'application/json')])
assert response.status_code == 401
assert response.headers['Content-type'] == 'application/json'
error_resp = json.loads(response.get_data(as_text=True))
assert error_resp['status_code'] == 401
assert error_resp['errors'] == [{'error': "AuthError",
'message': 'Unauthorized: authentication token must be provided'}]
@pytest.mark.parametrize("notification_type, key_send_to, send_to",
[("sms", "phone_number", "+447700900855"),
("email", "email_address", "[email protected]")])
def test_notification_returns_400_and_for_schema_problems(client, sample_template, notification_type, key_send_to,
send_to):
data = {
key_send_to: send_to,
'template': str(sample_template.id)
}
auth_header = create_authorization_header(service_id=sample_template.service_id)
response = client.post(
path='/v2/notifications/{}'.format(notification_type),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400
assert response.headers['Content-type'] == 'application/json'
error_resp = json.loads(response.get_data(as_text=True))
assert error_resp['status_code'] == 400
assert {'error': 'ValidationError',
'message': "template_id is a required property"
} in error_resp['errors']
assert {'error': 'ValidationError',
'message':
'Additional properties are not allowed (template was unexpected)'
} in error_resp['errors']
@pytest.mark.parametrize("reference", [None, "reference_from_client"])
def test_post_email_notification_returns_201(client, sample_email_template_with_placeholders, mocker, reference):
mocked = mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
data = {
"email_address": sample_email_template_with_placeholders.service.users[0].email_address,
"template_id": sample_email_template_with_placeholders.id,
"personalisation": {"name": "Bob"}
}
if reference:
data.update({"reference": reference})
auth_header = create_authorization_header(service_id=sample_email_template_with_placeholders.service_id)
response = client.post(
path="v2/notifications/email",
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201
resp_json = json.loads(response.get_data(as_text=True))
assert validate(resp_json, post_email_response) == resp_json
notification = Notification.query.one()
assert notification.status == NOTIFICATION_CREATED
assert notification.postage is None
assert resp_json['id'] == str(notification.id)
assert resp_json['reference'] == reference
assert notification.reference is None
assert notification.reply_to_text is None
assert notification.document_download_count is None
assert resp_json['content']['body'] == sample_email_template_with_placeholders.content \
.replace('((name))', 'Bob')
assert resp_json['content']['subject'] == sample_email_template_with_placeholders.subject \
.replace('((name))', 'Bob')
assert resp_json['content']['from_email'] == "{}@{}".format(
sample_email_template_with_placeholders.service.email_from, current_app.config['NOTIFY_EMAIL_DOMAIN'])
assert 'v2/notifications/{}'.format(notification.id) in resp_json['uri']
assert resp_json['template']['id'] == str(sample_email_template_with_placeholders.id)
assert resp_json['template']['version'] == sample_email_template_with_placeholders.version
assert 'services/{}/templates/{}'.format(str(sample_email_template_with_placeholders.service_id),
str(sample_email_template_with_placeholders.id)) \
in resp_json['template']['uri']
assert not resp_json["scheduled_for"]
assert mocked.called
@pytest.mark.parametrize('recipient, notification_type', [
('[email protected]', EMAIL_TYPE),
('[email protected]', EMAIL_TYPE),
('[email protected]', EMAIL_TYPE),
('07700 900000', 'sms'),
('07700 900111', 'sms'),
('07700 900222', 'sms')
])
def test_should_not_persist_or_send_notification_if_simulated_recipient(
client,
recipient,
notification_type,
sample_email_template,
sample_template,
mocker):
apply_async = mocker.patch('app.celery.provider_tasks.deliver_{}.apply_async'.format(notification_type))
if notification_type == 'sms':
data = {
'phone_number': recipient,
'template_id': str(sample_template.id)
}
else:
data = {
'email_address': recipient,
'template_id': str(sample_email_template.id)
}
auth_header = create_authorization_header(service_id=sample_email_template.service_id)
response = client.post(
path='/v2/notifications/{}'.format(notification_type),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201
apply_async.assert_not_called()
assert json.loads(response.get_data(as_text=True))["id"]
assert Notification.query.count() == 0
@pytest.mark.parametrize("notification_type, key_send_to, send_to",
[("sms", "phone_number", "07700 900 855"),
("email", "email_address", "[email protected]")])
def test_send_notification_uses_priority_queue_when_template_is_marked_as_priority(
client,
sample_service,
mocker,
notification_type,
key_send_to,
send_to
):
mocker.patch('app.celery.provider_tasks.deliver_{}.apply_async'.format(notification_type))
sample = create_template(
service=sample_service,
template_type=notification_type,
process_type='priority'
)
mocked = mocker.patch('app.celery.provider_tasks.deliver_{}.apply_async'.format(notification_type))
data = {
key_send_to: send_to,
'template_id': str(sample.id)
}
auth_header = create_authorization_header(service_id=sample.service_id)
response = client.post(
path='/v2/notifications/{}'.format(notification_type),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
notification_id = json.loads(response.data)['id']
assert response.status_code == 201
mocked.assert_called_once_with([notification_id], queue='priority-tasks')
@pytest.mark.parametrize(
"notification_type, key_send_to, send_to",
[("sms", "phone_number", "07700 900 855"), ("email", "email_address", "[email protected]")]
)
def test_returns_a_429_limit_exceeded_if_rate_limit_exceeded(
client,
sample_service,
mocker,
notification_type,
key_send_to,
send_to
):
sample = create_template(service=sample_service, template_type=notification_type)
persist_mock = mocker.patch('app.v2.notifications.post_notifications.persist_notification')
deliver_mock = mocker.patch('app.v2.notifications.post_notifications.send_notification_to_queue_detached')
mocker.patch(
'app.v2.notifications.post_notifications.check_rate_limiting',
side_effect=RateLimitError("LIMIT", "INTERVAL", "TYPE"))
data = {
key_send_to: send_to,
'template_id': str(sample.id)
}
auth_header = create_authorization_header(service_id=sample.service_id)
response = client.post(
path='/v2/notifications/{}'.format(notification_type),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
error = json.loads(response.data)['errors'][0]['error']
message = json.loads(response.data)['errors'][0]['message']
status_code = json.loads(response.data)['status_code']
assert response.status_code == 429
assert error == 'RateLimitError'
assert message == 'Exceeded rate limit for key type TYPE of LIMIT requests per INTERVAL seconds'
assert status_code == 429
assert not persist_mock.called
assert not deliver_mock.called
def test_post_sms_notification_returns_400_if_not_allowed_to_send_int_sms(
client,
notify_db_session,
):
service = create_service(service_permissions=[SMS_TYPE])
template = create_template(service=service)
data = {
'phone_number': '20-12-1234-1234',
'template_id': template.id
}
auth_header = create_authorization_header(service_id=service.id)
response = client.post(
path='/v2/notifications/sms',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header]
)
assert response.status_code == 400
assert response.headers['Content-type'] == 'application/json'
error_json = json.loads(response.get_data(as_text=True))
assert error_json['status_code'] == 400
assert error_json['errors'] == [
{"error": "BadRequestError", "message": 'Cannot send to international mobile numbers'}
]
def test_post_sms_notification_with_archived_reply_to_id_returns_400(client, sample_template, mocker):
archived_sender = create_service_sms_sender(
sample_template.service,
'12345',
is_default=False,
archived=True)
mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
data = {
"phone_number": '+447700900855',
"template_id": sample_template.id,
'sms_sender_id': archived_sender.id
}
auth_header = create_authorization_header(service_id=sample_template.service_id)
response = client.post(
path="v2/notifications/sms",
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400
resp_json = json.loads(response.get_data(as_text=True))
assert 'sms_sender_id {} does not exist in database for service id {}'. \
format(archived_sender.id, sample_template.service_id) in resp_json['errors'][0]['message']
assert 'BadRequestError' in resp_json['errors'][0]['error']
@pytest.mark.parametrize('recipient,label,permission_type, notification_type,expected_error', [
('07700 900000', 'phone_number', 'email', 'sms', 'text messages'),
('[email protected]', 'email_address', 'sms', 'email', 'emails')])
def test_post_sms_notification_returns_400_if_not_allowed_to_send_notification(
notify_db_session, client, recipient, label, permission_type, notification_type, expected_error
):
service = create_service(service_permissions=[permission_type])
sample_template_without_permission = create_template(service=service, template_type=notification_type)
data = {
label: recipient,
'template_id': sample_template_without_permission.id
}
auth_header = create_authorization_header(service_id=sample_template_without_permission.service.id)
response = client.post(
path='/v2/notifications/{}'.format(sample_template_without_permission.template_type),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400
assert response.headers['Content-type'] == 'application/json'
error_json = json.loads(response.get_data(as_text=True))
assert error_json['status_code'] == 400
assert error_json['errors'] == [
{"error": "BadRequestError", "message": "Service is not allowed to send {}".format(expected_error)}
]
@pytest.mark.parametrize('restricted', [True, False])
def test_post_sms_notification_returns_400_if_number_not_in_guest_list(
notify_db_session, client, restricted
):
service = create_service(restricted=restricted, service_permissions=[SMS_TYPE, INTERNATIONAL_SMS_TYPE])
template = create_template(service=service)
create_api_key(service=service, key_type='team')
data = {
"phone_number": '+327700900855',
"template_id": template.id,
}
auth_header = create_authorization_header(service_id=service.id, key_type='team')
response = client.post(
path='/v2/notifications/sms',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400
error_json = json.loads(response.get_data(as_text=True))
assert error_json['status_code'] == 400
assert error_json['errors'] == [
{"error": "BadRequestError", "message": 'Can’t send to this recipient using a team-only API key'}
]
def test_post_sms_notification_returns_201_if_allowed_to_send_int_sms(
sample_service,
sample_template,
client,
mocker,
):
mocker.patch('app.celery.provider_tasks.deliver_sms.apply_async')
data = {
'phone_number': '20-12-1234-1234',
'template_id': sample_template.id
}
auth_header = create_authorization_header(service_id=sample_service.id)
response = client.post(
path='/v2/notifications/sms',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201
assert response.headers['Content-type'] == 'application/json'
def test_post_sms_should_persist_supplied_sms_number(client, sample_template_with_placeholders, mocker):
mocked = mocker.patch('app.celery.provider_tasks.deliver_sms.apply_async')
data = {
'phone_number': '+(44) 77009-00855',
'template_id': str(sample_template_with_placeholders.id),
'personalisation': {' Name': 'Jo'}
}
auth_header = create_authorization_header(service_id=sample_template_with_placeholders.service_id)
response = client.post(
path='/v2/notifications/sms',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201
resp_json = json.loads(response.get_data(as_text=True))
notifications = Notification.query.all()
assert len(notifications) == 1
notification_id = notifications[0].id
assert '+(44) 77009-00855' == notifications[0].to
assert resp_json['id'] == str(notification_id)
assert mocked.called
def test_post_notification_raises_bad_request_if_not_valid_notification_type(client, sample_service):
auth_header = create_authorization_header(service_id=sample_service.id)
response = client.post(
'/v2/notifications/foo',
data='{}',
headers=[('Content-Type', 'application/json'), auth_header]
)
assert response.status_code == 404
error_json = json.loads(response.get_data(as_text=True))
assert 'The requested URL was not found on the server.' in error_json['message']
@pytest.mark.parametrize("notification_type",
['sms', 'email'])
def test_post_notification_with_wrong_type_of_sender(
client,
sample_template,
sample_email_template,
notification_type,
fake_uuid):
if notification_type == EMAIL_TYPE:
template = sample_email_template
form_label = 'sms_sender_id'
data = {
'email_address': '[email protected]',
'template_id': str(sample_email_template.id),
form_label: fake_uuid
}
elif notification_type == SMS_TYPE:
template = sample_template
form_label = 'email_reply_to_id'
data = {
'phone_number': '+447700900855',
'template_id': str(template.id),
form_label: fake_uuid
}
auth_header = create_authorization_header(service_id=template.service_id)
response = client.post(
path='/v2/notifications/{}'.format(notification_type),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400
resp_json = json.loads(response.get_data(as_text=True))
assert 'Additional properties are not allowed ({} was unexpected)'.format(form_label) \
in resp_json['errors'][0]['message']
assert 'ValidationError' in resp_json['errors'][0]['error']
def test_post_email_notification_with_valid_reply_to_id_returns_201(client, sample_email_template, mocker):
reply_to_email = create_reply_to_email(sample_email_template.service, '[email protected]')
mocked = mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
data = {
"email_address": sample_email_template.service.users[0].email_address,
"template_id": sample_email_template.id,
'email_reply_to_id': reply_to_email.id
}
auth_header = create_authorization_header(service_id=sample_email_template.service_id)
response = client.post(
path="v2/notifications/email",
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201
resp_json = json.loads(response.get_data(as_text=True))
assert validate(resp_json, post_email_response) == resp_json
notification = Notification.query.first()
assert notification.reply_to_text == '[email protected]'
assert resp_json['id'] == str(notification.id)
assert mocked.called
assert notification.reply_to_text == reply_to_email.email_address
def test_post_email_notification_with_invalid_reply_to_id_returns_400(client, sample_email_template, mocker, fake_uuid):
mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
data = {
"email_address": sample_email_template.service.users[0].email_address,
"template_id": sample_email_template.id,
'email_reply_to_id': fake_uuid
}
auth_header = create_authorization_header(service_id=sample_email_template.service_id)
response = client.post(
path="v2/notifications/email",
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400
resp_json = json.loads(response.get_data(as_text=True))
assert 'email_reply_to_id {} does not exist in database for service id {}'. \
format(fake_uuid, sample_email_template.service_id) in resp_json['errors'][0]['message']
assert 'BadRequestError' in resp_json['errors'][0]['error']
def test_post_email_notification_with_archived_reply_to_id_returns_400(client, sample_email_template, mocker):
archived_reply_to = create_reply_to_email(
sample_email_template.service,
'[email protected]',
is_default=False,
archived=True)
mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
data = {
"email_address": '[email protected]',
"template_id": sample_email_template.id,
'email_reply_to_id': archived_reply_to.id
}
auth_header = create_authorization_header(service_id=sample_email_template.service_id)
response = client.post(
path="v2/notifications/email",
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400
resp_json = json.loads(response.get_data(as_text=True))
assert 'email_reply_to_id {} does not exist in database for service id {}'. \
format(archived_reply_to.id, sample_email_template.service_id) in resp_json['errors'][0]['message']
assert 'BadRequestError' in resp_json['errors'][0]['error']
@pytest.mark.parametrize(
'csv_param',
(
{'is_csv': None},
{'is_csv': False},
{'is_csv': True},
{},
)
)
def test_post_notification_with_document_upload(client, notify_db_session, mocker, csv_param):
service = create_service(service_permissions=[EMAIL_TYPE])
service.contact_link = '[email protected]'
template = create_template(
service=service,
template_type='email',
content="Document 1: ((first_link)). Document 2: ((second_link))"
)
mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
document_download_mock = mocker.patch('app.v2.notifications.post_notifications.document_download_client')
document_download_mock.upload_document.side_effect = lambda service_id, content, is_csv: f'{content}-link'
data = {
"email_address": service.users[0].email_address,
"template_id": template.id,
"personalisation": {
"first_link": {"file": "abababab", **csv_param},
"second_link": {"file": "cdcdcdcd", **csv_param}
}
}
auth_header = create_authorization_header(service_id=service.id)
response = client.post(
path="v2/notifications/email",
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201, response.get_data(as_text=True)
resp_json = json.loads(response.get_data(as_text=True))
assert validate(resp_json, post_email_response) == resp_json
assert document_download_mock.upload_document.call_args_list == [
call(str(service.id), 'abababab', csv_param.get('is_csv')),
call(str(service.id), 'cdcdcdcd', csv_param.get('is_csv'))
]
notification = Notification.query.one()
assert notification.status == NOTIFICATION_CREATED
assert notification.personalisation == {
'first_link': 'abababab-link',
'second_link': 'cdcdcdcd-link'
}
assert notification.document_download_count == 2
assert resp_json['content']['body'] == 'Document 1: abababab-link. Document 2: cdcdcdcd-link'
def test_post_notification_with_document_upload_simulated(client, notify_db_session, mocker):
service = create_service(service_permissions=[EMAIL_TYPE])
service.contact_link = '[email protected]'
template = create_template(
service=service,
template_type='email',
content="Document: ((document))"
)
mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
document_download_mock = mocker.patch('app.v2.notifications.post_notifications.document_download_client')
document_download_mock.get_upload_url.return_value = 'https://document-url'
data = {
"email_address": '[email protected]',
"template_id": template.id,
"personalisation": {"document": {"file": "abababab"}}
}
auth_header = create_authorization_header(service_id=service.id)
response = client.post(
path="v2/notifications/email",
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201, response.get_data(as_text=True)
resp_json = json.loads(response.get_data(as_text=True))
assert validate(resp_json, post_email_response) == resp_json
assert resp_json['content']['body'] == 'Document: https://document-url/test-document'
def test_post_notification_without_document_upload_permission(client, notify_db_session, mocker):
service = create_service(service_permissions=[EMAIL_TYPE])
template = create_template(
service=service,
template_type='email',
content="Document: ((document))"
)
mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
document_download_mock = mocker.patch('app.v2.notifications.post_notifications.document_download_client')
document_download_mock.upload_document.return_value = 'https://document-url/'
data = {
"email_address": service.users[0].email_address,
"template_id": template.id,
"personalisation": {"document": {"file": "abababab"}}
}
auth_header = create_authorization_header(service_id=service.id)
response = client.post(
path="v2/notifications/email",
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400, response.get_data(as_text=True)
def test_post_notification_returns_400_when_get_json_throws_exception(client, sample_email_template):
auth_header = create_authorization_header(service_id=sample_email_template.service_id)
response = client.post(
path="v2/notifications/email",
data="[",
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400
@pytest.mark.parametrize('notification_type, content_type',
[('email', 'application/json'),
('email', 'application/text'),
('sms', 'application/json'),
('sms', 'application/text')]
)
def test_post_notification_when_payload_is_invalid_json_returns_400(
client, sample_service, notification_type, content_type):
auth_header = create_authorization_header(service_id=sample_service.id)
payload_not_json = {
"template_id": "dont-convert-to-json",
}
response = client.post(
path='/v2/notifications/{}'.format(notification_type),
data=payload_not_json,
headers=[('Content-Type', content_type), auth_header],
)
assert response.status_code == 400
error_msg = json.loads(response.get_data(as_text=True))["errors"][0]["message"]
assert error_msg == 'Invalid JSON supplied in POST data'
@pytest.mark.parametrize('notification_type', ['email', 'sms'])
def test_post_notification_returns_201_when_content_type_is_missing_but_payload_is_valid_json(
client, sample_service, notification_type, mocker):
template = create_template(service=sample_service, template_type=notification_type)
mocker.patch('app.celery.provider_tasks.deliver_{}.apply_async'.format(notification_type))
auth_header = create_authorization_header(service_id=sample_service.id)
valid_json = {
"template_id": str(template.id),
}
if notification_type == 'email':
valid_json.update({"email_address": sample_service.users[0].email_address})
else:
valid_json.update({"phone_number": "+447700900855"})
response = client.post(
path='/v2/notifications/{}'.format(notification_type),
data=json.dumps(valid_json),
headers=[auth_header],
)
assert response.status_code == 201
@pytest.mark.parametrize('notification_type', ['email', 'sms'])
def test_post_email_notification_when_data_is_empty_returns_400(client, sample_service, notification_type):
auth_header = create_authorization_header(service_id=sample_service.id)
data = None
response = client.post(
path='/v2/notifications/{}'.format(notification_type),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header],
)
error_msg = json.loads(response.get_data(as_text=True))["errors"][0]["message"]
assert response.status_code == 400
if notification_type == 'sms':
assert error_msg == 'phone_number is a required property'
else:
assert error_msg == 'email_address is a required property'
@pytest.mark.parametrize("notification_type", ("email", "sms"))
def test_post_notifications_saves_email_or_sms_to_queue(client, notify_db_session, mocker, notification_type):
save_task = mocker.patch(f"app.celery.tasks.save_api_{notification_type}.apply_async")
mock_send_task = mocker.patch(f'app.celery.provider_tasks.deliver_{notification_type}.apply_async')
service = create_service(
service_name='high volume service',
)
with set_config_values(current_app, {
'HIGH_VOLUME_SERVICE': [str(service.id)],
}):
template = create_template(service=service, content='((message))', template_type=notification_type)
data = {
"template_id": template.id,
"personalisation": {"message": "Dear citizen, have a nice day"}
}
data.update({"email_address": "[email protected]"}) if notification_type == EMAIL_TYPE \
else data.update({"phone_number": "+447700900855"})
response = client.post(
path=f'/v2/notifications/{notification_type}',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header(service_id=service.id)]
)
json_resp = response.get_json()
assert response.status_code == 201
assert json_resp['id']
assert json_resp['content']['body'] == "Dear citizen, have a nice day"
assert json_resp['template']['id'] == str(template.id)
save_task.assert_called_once_with([mock.ANY], queue=f'save-api-{notification_type}-tasks')
assert not mock_send_task.called
assert len(Notification.query.all()) == 0
@pytest.mark.parametrize("notification_type", ("email", "sms"))
def test_post_notifications_saves_email_or_sms_normally_if_saving_to_queue_fails(
client, notify_db_session, mocker, notification_type
):
save_task = mocker.patch(
f"app.celery.tasks.save_api_{notification_type}.apply_async",
side_effect=SQSError({'some': 'json'}, 'some opname')
)
mock_send_task = mocker.patch(f'app.celery.provider_tasks.deliver_{notification_type}.apply_async')
service = create_service(
service_name='high volume service',
)
with set_config_values(current_app, {
'HIGH_VOLUME_SERVICE': [str(service.id)],
}):
template = create_template(service=service, content='((message))', template_type=notification_type)
data = {
"template_id": template.id,
"personalisation": {"message": "Dear citizen, have a nice day"}
}
data.update({"email_address": "[email protected]"}) if notification_type == EMAIL_TYPE \
else data.update({"phone_number": "+447700900855"})
response = client.post(
path=f'/v2/notifications/{notification_type}',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header(service_id=service.id)]
)
json_resp = response.get_json()
assert response.status_code == 201
assert json_resp['id']
assert json_resp['content']['body'] == "Dear citizen, have a nice day"
assert json_resp['template']['id'] == str(template.id)
save_task.assert_called_once_with([mock.ANY], queue=f'save-api-{notification_type}-tasks')
mock_send_task.assert_called_once_with([json_resp['id']], queue=f'send-{notification_type}-tasks')
assert Notification.query.count() == 1
@pytest.mark.parametrize("notification_type", ("email", "sms"))
def test_post_notifications_doesnt_use_save_queue_for_test_notifications(
client, notify_db_session, mocker, notification_type
):
save_task = mocker.patch(f"app.celery.tasks.save_api_{notification_type}.apply_async")
mock_send_task = mocker.patch(f'app.celery.provider_tasks.deliver_{notification_type}.apply_async')
service = create_service(
service_name='high volume service',
)
with set_config_values(current_app, {
'HIGH_VOLUME_SERVICE': [str(service.id)],
}):
template = create_template(service=service, content='((message))', template_type=notification_type)
data = {
"template_id": template.id,
"personalisation": {"message": "Dear citizen, have a nice day"}
}
data.update({"email_address": "[email protected]"}) if notification_type == EMAIL_TYPE \
else data.update({"phone_number": "+447700900855"})
response = client.post(
path=f'/v2/notifications/{notification_type}',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'),
create_authorization_header(service_id=service.id, key_type='test')]
)
json_resp = response.get_json()
assert response.status_code == 201
assert json_resp['id']
assert json_resp['content']['body'] == "Dear citizen, have a nice day"
assert json_resp['template']['id'] == str(template.id)
assert mock_send_task.called
assert not save_task.called
assert len(Notification.query.all()) == 1
def test_post_notification_does_not_use_save_queue_for_letters(client, sample_letter_template, mocker):
mock_save = mocker.patch("app.v2.notifications.post_notifications.save_email_or_sms_to_queue")
mock_create_pdf_task = mocker.patch('app.celery.tasks.letters_pdf_tasks.get_pdf_for_templated_letter.apply_async')
with set_config_values(current_app, {
'HIGH_VOLUME_SERVICE': [str(sample_letter_template.service_id)],
}):
data = {
'template_id': str(sample_letter_template.id),
'personalisation': {
'address_line_1': 'Her Royal Highness Queen Elizabeth II',
'address_line_2': 'Buckingham Palace',
'address_line_3': 'London',
'postcode': 'SW1 1AA',
}
}
response = client.post(
path='/v2/notifications/letter',
data=json.dumps(data),
headers=[('Content-Type', 'application/json'),
create_authorization_header(service_id=sample_letter_template.service_id)]
)
assert response.status_code == 201
json_resp = response.get_json()
assert not mock_save.called
mock_create_pdf_task.assert_called_once_with([str(json_resp['id'])], queue='create-letters-pdf-tasks')
| mit | 2,010,469,411,737,156,000 | 40.224299 | 120 | 0.655139 | false |
CnPaMeng/WeiboMsgBackupGUI | sina/loginsinacom.py | 1 | 22729 | #!/usr/bin/python
#-*- encoding: utf-8 -*-
# This file is part of the Pameng,
# Pameng website: http://www.cnpameng.com/,
# Sina weibo: http://weibo.com/cnpameng.
# This file is part of WeiboMsgBackup.
# Copyright (C) 2013 Pameng.
# Pameng <[email protected]>, 2013.
# WeiboMsgBackup is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# WeiboMsgBackup is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with WeiboMsgBackup; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>.
import urllib2
import cookielib
import time
import datetime
import json
import re
import random
import urllib
import base64
import StringIO
import gzip
from model.log4py import Log4py
import sys
from model import syscontext
import os
import wx
import rsa
from rsa import transform
logger = Log4py().getLogger("run")
class LoginSinaCom():
def __init__(self, **kwargs):
#INIT cookie load object
self.cj = cookielib.LWPCookieJar()
self.cookie_support = urllib2.HTTPCookieProcessor(self.cj)
self.opener = urllib2.build_opener(self.cookie_support, urllib2.HTTPHandler)
urllib2.install_opener(self.opener)
self.soft_path = kwargs.get("soft_path", "")
self.cookiefile = os.path.join(self.soft_path, "cookie.dat")
self.proxyip = kwargs.get("proxyip", "")
self.pcid = ""
self.servertime = ""
self.nonce = ""
self.pubkey = ''
self.rsakv = ''
def __get_millitime(self):
""" get mill times """
pre = str(int(time.time()))
pos = str(datetime.datetime.now().microsecond)[:3]
p = pre + pos
return p
def get_servertime(self, login_un):
""" get sine server time """
url = 'http://login.sina.com.cn/sso/prelogin.php?entry=account&callback=sinaSSOController.preloginCallBack&su=&rsakt=mod&client=ssologin.js(v1.4.2)&_=%s' % self.__get_millitime()
result = {}
servertime = None
nonce = None
headers = self.__get_headers()
headers['Host'] = 'login.sina.com.cn'
headers['Accept'] = '*/*'
headers['Referer'] = 'http://weibo.com/'
del headers['Accept-encoding']
for i in range(3): #@UnusedVariable
req = self.pack_request(url, headers)
data = urllib2.urlopen(req).read()
p = re.compile('\((.*)\)')
try:
json_data = p.search(data).group(1)
data = json.loads(json_data)
servertime = str(data['servertime'])
nonce = data['nonce']
result["servertime"] = servertime
result["nonce"] = nonce
result["rsakv"] = str(data['rsakv'])
result["pubkey"] = str(data['pubkey'])
self.pcid = str(data['pcid'])
break
except:
msg = u'Get severtime error!'
logger.error(msg)
continue
return result
def get_global_id(self):
""" get sina session id """
time = self.__get_millitime()
url = "http://beacon.sina.com.cn/a.gif"
headers = self.__get_headers()
headers['Host'] = 'beacon.sina.com.cn'
headers['Accept'] = 'image/png,image/*;q=0.8,*/*;q=0.5'
headers['Referer'] = 'http://weibo.com/'
req = self.pack_request(url, headers)
urllib2.urlopen(req)
def get_random_nonce(self, range_num=6):
""" get random nonce key """
nonce = ""
for i in range(range_num): #@UnusedVariable
nonce += random.choice('QWERTYUIOPASDFGHJKLZXCVBNM1234567890')
return nonce
def dec2hex(self, string_num):
base = [str(x) for x in range(10)] + [chr(x) for x in range(ord('A'), ord('A')+6)]
num = int(string_num)
mid = []
while True:
if num == 0: break
num, rem = divmod(num, 16)
mid.append(base[rem])
return ''.join([str(x) for x in mid[::-1]])
def get_pwd(self, pwd, servertime, nonce):
#pwd1 = hashlib.sha1(pwd).hexdigest()
#pwd2 = hashlib.sha1(pwd1).hexdigest()
#pwd3_ = pwd2 + servertime + nonce
#pwd3 = hashlib.sha1(pwd3_).hexdigest()
#return pwd3
p = int(self.pubkey, 16)
pub_key = rsa.PublicKey(p, int('10001', 16))
pwd = '%s\t%s\n%s' % (servertime, nonce, pwd)
pwd = (self.dec2hex(transform.bytes2int(rsa.encrypt(pwd.encode('utf-8'), pub_key))))
return pwd
def get_user(self, username):
username_ = urllib.quote(username)
username = base64.encodestring(username_)[:-1]
return username
def save_verifycode(self, url):
try:
cookiestr = ""
for cookie in self.cj.as_lwp_str(True, True).split("\n"):
cookie = cookie.split(";")[0]
cookie = cookie.replace("\"", "").replace("Set-Cookie3: ", " ").strip() + ";"
cookiestr += cookie
headers = {'Host': 'login.sina.com.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:13.0) Gecko/20100101 Firefox/13.0.1',
'Accept': 'image/png,image/*;q=0.8,*/*;q=0.5',
#'Accept-encoding': 'gzip, deflate',
'Accept-Language': 'zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3',
'Connection': 'keep-alive',
'Referer' : 'http://weibo.com/',
'Cookie' : cookiestr,
}
req = self.pack_request(url, headers)
response = urllib2.urlopen(req, timeout=10)
content = response.read()
f = open(os.path.join(self.soft_path, "pin.png"), "wb")
f.write(content)
f.flush()
f.close()
except:
logger.error(u"save verify code error.")
def login(self, login_un, login_pw):
loginFalg = False
try:
try:
stObj = self.get_servertime(login_un)
self.servertime = stObj.get("servertime")
self.nonce = stObj.get("nonce")
self.pubkey = stObj.get("pubkey")
self.rsakv = stObj.get("rsakv")
except:
return False
#获取会话ID
self.get_global_id()
loginHtml = self.do_login(login_un, login_pw)
loginHtml = loginHtml.replace('"', "'")
#print loginHtml
#p = re.compile('location\.replace\(\'(.*?)\'\)')
try:
p = re.compile('location\.replace\(\'(.*?)\'\)')
login_url = p.search(loginHtml).group(1)
#print login_url
if "retcode=0" in loginHtml:
return self.redo_login(login_url)
#是否需要手动输入验证码
if syscontext.VERIFY_INPUT_FLAG:
logger.info(u"Allow user type verify code.")
pass
else:
logger.error(u"Enable input verify code,return failure.")
return False
#需要验证码,你妹
if "retcode=5" in loginHtml:
logger.error(u"password or account error.")
return False
if "retcode=4040" in loginHtml:
logger.error(u"do login too much times.")
return False
#这次是真的要验证码:code 4049
if "retcode=4049" in login_url:
for i in range(3):
logger.info(u"need verify code.")
verifycode_url = 'http://login.sina.com.cn/cgi/pin.php?r=%s&s=0&p=%s' % (random.randint(20000000,99999999), self.pcid)
self.save_verifycode(verifycode_url)
syscontext.VERIFY_CODE = ""
codeimg = os.path.join(os.path.join(syscontext.userentity.get("path", ""), syscontext.FILE_PATH_DEFAULT), "pin.png")
logger.info(u"verify code img path:%s." % codeimg)
try:
window = syscontext.MAIN_WINDOW
genthread = syscontext.MAIN_GENTHREAD
wx.CallAfter(window.EnableMainWin, False)
wx.CallAfter(window.ShowVerifyCode, codeimg)
#print "before self.acquire"
genthread.lock.acquire()
genthread.lockcondition.wait()
genthread.lock.release()
#print "after self.release"
#veroifyFrame = VerifyCodeFrame(window, filename=codeimg)
#veroifyFrame.Center()
#veroifyFrame.Show(True)
#app.MainLoop()
except:
s = sys.exc_info()
msg = (u"app error %s happened on line %d" % (s[1], s[2].tb_lineno))
logger.error(msg)
door = syscontext.VERIFY_CODE
logger.error(u"get input verify code:%s" % door)
#附加验证码再次登录
self.nonce = self.get_random_nonce()
loginHtml = self.do_login(login_un, login_pw, door=door)
loginHtml = loginHtml.replace('"', "'")
p = re.compile('location\.replace\(\'(.*?)\'\)')
if p.search(loginHtml):
login_url = p.search(loginHtml).group(1)
return self.redo_login(login_url)
else:
if "retcode=2070" in loginHtml:
#小LANG吃翔吧
logger.error(u"verify code:%s error." % door)
continue
else:
break
except:
s = sys.exc_info()
msg = (u"do login %s happened on line %d" % (s[1], s[2].tb_lineno))
logger.error(msg)
loginFalg = False
except Exception:
s = sys.exc_info()
msg = (u"login: %s happened on line %d" % (s[1], s[2].tb_lineno))
logger.error(msg)
return loginFalg
def redo_login(self, login_url):
try:
headers = self.__get_headers()
headers['Referer'] = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.2)'
req = self.pack_request(login_url, headers)
urllib2.urlopen(req)
#self.cj.clear(name="Apache", domain=".sina.com.cn", path="/")
#self.cj.clear(name="SINAGLOBAL", domain=".sina.com.cn", path="/")
self.cj.save(self.cookiefile, True, True)
msg = u'login success'
logger.info(msg)
loginFalg = True
except:
s = sys.exc_info()
msg = (u"redo_login %s happened on line %d" % (s[1], s[2].tb_lineno))
logger.error(msg)
loginFalg = False
return loginFalg
def do_login(self, login_un, login_pw, door=""):
try:
loginFalg = False #登录状态
username = login_un #微博账号
pwd = login_pw #微博密码
url = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.2)'
#POST DATA for login
postdata = {
# 'entry': 'weibo',
# 'gateway': '1',
# 'from': '',
# 'savestate': '7',
# 'userticket': '1',
# 'ssosimplelogin': '1',
# 'vsnf': '1',
# 'vsnval': '',
# 'su': '',
# 'service': 'miniblog',
# 'servertime': '',
# 'nonce': '',
# 'pwencode': 'wsse',
# 'sp': '',
# 'encoding': 'UTF-8',
# 'url': 'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
# 'returntype': 'META'
'entry': 'weibo',
'gateway': '1',
'from': '',
'savestate': '7',
'userticket': '1',
'pagerefer' : '',
'ssosimplelogin': '1',
'vsnf': '1',
'vsnval': '',
'service': 'miniblog',
'pwencode': 'rsa2',
'rsakv' : self.rsakv,
'encoding': 'UTF-8',
'url': 'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
'returntype': 'META',
'prelt' : '26',
}
postdata['servertime'] = self.servertime
postdata['nonce'] = self.nonce
postdata['su'] = self.get_user(username)
postdata['sp'] = self.get_pwd(pwd, self.servertime, self.nonce).lower()
#当需要验证码登录的时候
if door:
postdata['pcid'] = self.pcid
postdata['door'] = door.lower()
#headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; rv:13.0) Gecko/20100101 Firefox/13.0.1'}
headers = {'Host': 'login.sina.com.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:17.0) Gecko/20100101 Firefox/17.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-encoding': 'gzip, deflate',
'Accept-Language': 'zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3',
#'Accept-Charset': 'GB2312,utf-8;q=0.7,*;q=0.7',
'Connection': 'keep-alive',
'Referer' : 'http://weibo.com/',
'Content-Type': 'application/x-www-form-urlencoded',
}
req = self.pack_request(url, headers, postdata)
result = urllib2.urlopen(req)
#cj.save(cookiefile, True, True)
if result.info().get("Content-Encoding") == 'gzip':
text = self.gzip_data(result.read())
else:
text = result.read()
return text
except:
s = sys.exc_info()
msg = (u"do_login: %s happened on line %d" % (s[1], s[2].tb_lineno))
logger.error(msg)
return loginFalg
def check_cookie(self, un, pw, softPath):
loginFalg = True
self.cookiefile = os.path.join(softPath, "cookie.dat")
if os.path.exists(self.cookiefile):
msg = u"cookie dat exist."
logger.info(msg)
if "Set-Cookie" not in open(self.cookiefile,'r').read():
msg = u"but does not contain a valid cookie."
logger.info(msg)
loginFalg = self.login(un, pw)
else:
loginFalg = self.login(un, pw)
if loginFalg:
return self.valid_cookie()
else:
return False
'''
#当HTML参数为空时
#需要在login调用之后执行
#返回cookiestr or Flase
'''
def valid_cookie(self, html=""):
#http://weibo.com/signup/signup
html = str(html)
if not html:
headers = self.__get_headers()
html = self.get_content_head(url="http://weibo.com/kaifulee", headers=headers)
if not html:
msg = u"need relogin."
logger.error(msg)
self.clear_cookiedat(self.cookiefile) #clear cookie file
return False
html = str(html)
html = html.replace('"', "'")
if "sinaSSOController" in html:
p = re.compile('location\.replace\(\'(.*?)\'\)')
#p = re.compile('location\.replace\("(.*?)"\)')
try:
login_url = p.search(html).group(1)
headers = self.__get_headers()
headers['Host'] = 'account.weibo.com'
req = self.pack_request(url=login_url, headers=headers)
result = urllib2.urlopen(req)
#self.cj.clear(name="Apache", domain=".sina.com.cn", path="/")
#self.cj.clear(name="SINAGLOBAL", domain=".sina.com.cn", path="/")
self.cj.save(self.cookiefile, True, True)
if result.info().get("Content-Encoding") == 'gzip':
html = self.gzipData(result.read())
else:
html = result.read()
except:
msg = u"relogin failure."
logger.error(msg)
self.clear_cookiedat(self.cookiefile)
return False
if "违反了新浪微博的安全检测规则" in html:
msg = u"cookie failure."
logger.error(msg)
self.clear_cookiedat(self.cookiefile) #clear cookie file
return False
elif "您的帐号存在异常" in html and "解除限制" in html:
msg = u"账号被限制."
logger.error(msg)
self.clear_cookiedat(self.cookiefile)#clear cookie file
return False
elif "$CONFIG['islogin'] = '0'" in html:
msg = u"登录失败."
logger.error(msg)
self.clear_cookiedat(self.cookiefile)#clear cookie file
return False
elif "$CONFIG['islogin']='1'" in html:
#print "cookie success."
msg = u"cookie success."
logger.info(msg)
#print cj.as_lwp_str(True, True).replace("\n", ";").replace("Set-Cookie3: ", " ").strip()
#cokiestr = ""
#for cookie in self.cj.as_lwp_str(True, True).split("\n"):
# if "Apache" in cookie or "SINAGLOBAL" in cookie:
# continue
# cookie = cookie.split(";")[0]
# cookie = cookie.replace("\"", "").replace("Set-Cookie3: ", " ").strip() + ";"
# cokiestr += cookie
self.cj.save(self.cookiefile, True, True)
return True
else:
msg = u"登录失败."
self.clear_cookiedat(self.cookiefile) #clear cookie file
logger.error(msg)
return False
def get_content_head(self, url, headers={}, data=None):
content = ""
try:
if os.path.exists(self.cookiefile):
self.cj.revert(self.cookiefile, True, True)
self.cookie_support = urllib2.HTTPCookieProcessor(self.cj)
self.opener = urllib2.build_opener(self.cookie_support, urllib2.HTTPHandler)
urllib2.install_opener(self.opener)
else:
return ""
req = self.pack_request(url=url, headers=headers, data=data)
#response = urllib2.urlopen(req, timeout=15)
response = self.opener.open(req, timeout=10)
if response.info().get("Content-Encoding") == 'gzip':
content = self.gzip_data(response.read())
else:
content = response.read()
#time.sleep(0.1*random.randint(10, 20))
except urllib2.HTTPError, e:
return e.code
except:
s=sys.exc_info()
msg = u"get_content Error %s happened on line %d" % (s[1], s[2].tb_lineno)
logger.error(msg)
content = ""
return content
def get_content_cookie(self, url, headers={}, data=None):
content = ""
try:
req = self.pack_request(url=url, headers=headers, data=data)
opener = urllib2.build_opener(self.cookie_support)
response = opener.open(req, timeout=10)
if response.info().get("Content-Encoding") == 'gzip':
content = self.gzip_data(response.read())
else:
content = response.read()
#time.sleep(0.1*random.randint(10, 20))
except:
s=sys.exc_info()
msg = u"get_content Error %s happened on line %d" % (s[1], s[2].tb_lineno)
logger.error(msg)
content = ""
return content
def clear_cookiedat(self, datpath):
try:
os.remove(datpath)
#f = file(datpath, 'w')
#f.truncate()
#f.close()
except:
pass
def pack_request(self, url="", headers={}, data=None):
if data:
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
data = urllib.urlencode(data)
req = urllib2.Request(
url=url,
data=data,
headers=headers
)
proxyip = self.proxyip
if proxyip and "127.0.0.1" not in proxyip:
if proxyip.startswith("http"):
proxyip = proxyip.replace("http://", "")
req.set_proxy(proxyip, "http")
return req
def gzip_data(self, spider_data):
""" get data from gzip """
if 0 == len(spider_data):
return spider_data
spiderDataStream = StringIO.StringIO(spider_data)
spider_data = gzip.GzipFile(fileobj=spiderDataStream).read()
return spider_data
def __get_headers(self):
headers = {'Host': 'weibo.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:13.0) Gecko/20100101 Firefox/13.0.1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-encoding': 'gzip, deflate',
'Accept-Language': 'zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3',
'Connection': 'keep-alive',
}
return headers | gpl-2.0 | -5,311,268,990,533,459,000 | 39.214669 | 186 | 0.492904 | false |
ikvk/imap_tools | tests/messages_data/error_emails/missing_body.py | 1 | 1292 | import datetime
DATA = dict(
subject='REDACTED',
from_='[email protected]',
to=('', '@mailman.enron.com'),
cc=(),
bcc=(),
reply_to=(),
date=datetime.datetime(2001, 11, 27, 15, 2, 35, tzinfo=datetime.timezone(datetime.timedelta(-1, 57600))),
date_str='Tue, 27 Nov 2001 15:02:35 -0800',
text='',
html='',
headers={'message-id': ('<001301c17797$9cd0ef30$a3ab620c@vaio>',), 'from': ('"SCS_2" <[email protected]>',), 'to': ('<Undisclosed-Recipient:@mailman.enron.com;>',), 'subject': ('REDACTED',), 'date': ('Tue, 27 Nov 2001 15:02:35 -0800',), 'mime-version': ('1.0',), 'content-type': ('multipart/mixed;\r\n\tboundary="----=_NextPart_000_000F_01C17754.8C3CAF30"',), 'x-priority': ('3',), 'x-msmail-priority': ('Normal',), 'x-mailer': ('Microsoft Outlook Express 5.00.2919.6700',), 'x-mimeole': ('Produced By Microsoft MimeOLE V5.00.2919.6700',), 'return-path': ('[email protected]',)},
attachments=[],
from_values={'email': '[email protected]', 'name': 'SCS_2', 'full': 'SCS_2 <[email protected]>'},
to_values=({'email': '', 'name': '', 'full': 'Undisclosed-Recipient'}, {'email': '@mailman.enron.com', 'name': '', 'full': '@mailman.enron.com'}),
cc_values=(),
bcc_values=(),
reply_to_values=(),
) | apache-2.0 | 5,465,854,428,567,728,000 | 60.571429 | 593 | 0.606037 | false |
FrodeSolheim/fs-uae-launcher | system/tools/filescanner.py | 1 | 9278 | from typing import Optional
import fsui
from fsgamesys.amiga.amiga import Amiga
from fsgamesys.context import fsgs
from fsgamesys.product import Product
from fswidgets.panel import Panel
from fswidgets.widget import Widget
from launcher.i18n import gettext
from launcher.launcher_signal import LauncherSignal
from launcher.scanner import Scanner
from launcher.settings.scan_paths_group import ScanPathsGroup
from system.classes.window import Window
TIMER_INTERVAL = 100
# FIXM: TODO: When clicking the Stop button, old (existing) data may be purged
class FileScannerWindow(Window):
@classmethod
def refresh_game_database(cls, window: Widget):
return cls(
window, minimal=True, interactive=False, scan_for_files=False
)
def __init__(
self,
parent: Optional[Widget] = None,
minimal: bool = False,
interactive: bool = True,
scan_for_files: bool = True,
):
title = gettext("File scanner")
super().__init__(parent, title=title, maximizable=False)
buttons, layout = fsui.DialogButtons.create_with_layout(
self, create_parent_layout=False
)
buttons.create_close_button()
self.layout.add_spacer(640, 0)
self.interactive = interactive
self.scan_for_files = scan_for_files
self.update_game_database = False
if not minimal:
if Product.includes_amiga():
self.scan_kickstart_group = ScanKickstartGroup(self)
layout.add(self.scan_kickstart_group, fill=True)
layout.add_spacer(20)
heading = gettext(
"Scan for Kickstarts, Files and Configurations"
)
else:
heading = gettext("Scan for ROMs, media and config files")
label = fsui.HeadingLabel(self, heading)
layout.add(label, margin_bottom=10)
self.scan_paths_group = ScanPathsGroup(self)
layout.add(self.scan_paths_group, fill=True, margin=0)
layout.add_spacer(20)
self.scan_progress_group = ScanProgressGroup(self)
layout.add(self.scan_progress_group, fill=True)
if interactive:
self.scan_button = buttons.add_button(
fsui.Button(buttons, gettext("Scan"))
)
self.scan_button.activated.connect(self.on_scan_button)
else:
self.scan_button = None
self.stop_button = buttons.add_button(
fsui.Button(buttons, gettext("Stop"))
)
self.stop_button.activated.connect(self.on_stop_button)
self.old_title = ""
self.old_status = ""
self.has_started_scan = False
self.start_timer(TIMER_INTERVAL)
if not self.interactive:
self.start_scan()
self.destroyed.connect(Scanner.stop)
def set_scan_title(self, text: str):
if not text:
return
if text == self.old_title:
return
self.old_title = text
self.scan_progress_group.title_label.set_text(text)
def set_scan_status(self, text: str):
if not text:
return
if text == self.old_status:
return
self.old_status = text
self.scan_progress_group.status_label.set_text(text)
def on_timer(self):
if not Scanner.running:
if self.has_started_scan:
if Scanner.error:
self.set_scan_title(gettext("Scan error"))
self.set_scan_status(Scanner.error)
else:
if not self.interactive:
self.end_modal(True)
return
self.set_scan_title(gettext("Scan complete"))
self.set_scan_status(
gettext("Click 'Scan' button if you want to re-scan")
)
else:
self.set_scan_title(gettext("No scan in progress"))
self.set_scan_status(
gettext("Click 'Scan' button to start scan")
)
if self.scan_button is not None:
self.scan_button.set_enabled()
self.stop_button.set_enabled(False)
return
status = Scanner.status
self.set_scan_title(status[0])
self.set_scan_status(status[1])
def on_scan_button(self):
self.start_scan()
def start_scan(self):
if self.scan_button is not None:
self.scan_button.set_enabled(False)
self.has_started_scan = True
self.set_scan_title(gettext("Starting scan"))
self.set_scan_status(gettext("Please wait..."))
paths = ScanPathsGroup.get_search_path()
self.stop_button.set_enabled()
Scanner.start(
paths,
scan_for_files=self.scan_for_files,
update_game_database=self.update_game_database,
purge_other_dirs=True,
)
# noinspection PyMethodMayBeStatic
def on_stop_button(self):
Scanner.stop_flag = True
class KickstartStatusGroup(fsui.Panel):
def __init__(self, parent: Widget, title: str, model):
self.model = model
super().__init__(parent)
self.layout = fsui.HorizontalLayout()
self.ok_image = fsui.Image("launcher:/data/ok_emblem.png")
self.na_image = fsui.Image("launcher:/data/na_emblem.png")
self.icon = fsui.ImageView(self, self.na_image)
self.layout.add(self.icon)
self.layout.add_spacer(10)
self.label = fsui.Label(self, title)
self.layout.add(self.label)
self.update()
LauncherSignal.add_listener("scan_done", self)
def onDestroy(self):
LauncherSignal.remove_listener("scan_done", self)
super().onDestroy()
def on_scan_done_signal(self):
self.update()
def update(self):
amiga = Amiga.get_model_config(self.model)
for sha1 in amiga["kickstarts"]:
if fsgs.file.find_by_sha1(sha1):
self.icon.set_image(self.ok_image)
return
self.icon.set_image(self.na_image)
class ScanKickstartGroup(Panel):
def __init__(self, parent: Widget):
super().__init__(parent)
self.layout = fsui.VerticalLayout()
label = fsui.HeadingLabel(
self, gettext("Available Kickstart Versions")
)
self.layout.add(label, margin_bottom=10)
icon_layout = fsui.HorizontalLayout()
self.layout.add(icon_layout, fill=True)
icon_layout.add_spacer(20)
image = fsui.Image("launcher:/data/kickstart.png")
self.image_view = fsui.ImageView(self, image)
icon_layout.add(self.image_view, valign=0.0, margin_right=10)
vert_layout = fsui.VerticalLayout()
icon_layout.add(vert_layout, fill=True, expand=True)
vert_layout.add_spacer(0)
label = fsui.Label(
self,
gettext(
"You should have kickstart files for "
"each Amiga model you want to use:"
),
)
vert_layout.add(label, margin_bottom=0)
hori_layout = fsui.HorizontalLayout()
vert_layout.add(hori_layout, fill=True)
self.kickstart_groups = []
column_layout = fsui.VerticalLayout()
hori_layout.add(column_layout, expand=True, fill=True, margin=10)
self.add_kickstart_group(column_layout, "Amiga 1000", "A1000")
column_layout.add_spacer(10)
self.add_kickstart_group(column_layout, "Amiga 500", "A500")
column_layout.add_spacer(10)
self.add_kickstart_group(column_layout, "Amiga 500+", "A500+")
column_layout = fsui.VerticalLayout()
hori_layout.add(column_layout, expand=True, fill=True, margin=10)
self.add_kickstart_group(column_layout, "Amiga 600", "A600")
column_layout.add_spacer(10)
self.add_kickstart_group(column_layout, "Amiga 1200", "A1200")
column_layout.add_spacer(10)
self.add_kickstart_group(column_layout, "Amiga 3000", "A3000")
column_layout = fsui.VerticalLayout()
hori_layout.add(column_layout, expand=True, fill=True, margin=10)
self.add_kickstart_group(column_layout, "Amiga 4000", "A4000/040")
column_layout.add_spacer(10)
self.add_kickstart_group(column_layout, "Amiga CD32", "CD32")
column_layout.add_spacer(10)
self.add_kickstart_group(column_layout, "Commodore CDTV", "CDTV")
def add_kickstart_group(self, layout, title, model):
group = KickstartStatusGroup(self, title, model)
self.kickstart_groups.append(group)
layout.add(group, fill=True)
class ScanProgressGroup(Panel):
def __init__(self, parent: Widget):
super().__init__(parent)
self.layout = fsui.HorizontalLayout()
self.layout2 = fsui.VerticalLayout()
self.layout.add(self.layout2, fill=True, expand=True)
self.title_label = fsui.HeadingLabel(self, "")
self.layout2.add(self.title_label, fill=True)
self.layout2.add_spacer(10)
self.status_label = fsui.Label(self, "")
self.layout2.add(self.status_label, fill=True)
| gpl-2.0 | -2,840,898,757,939,423,000 | 32.135714 | 78 | 0.598513 | false |
freenas/py-bsd | bsd/pty.py | 1 | 5200 | """Pseudo terminal utilities. (Slightly modified to work on FreeBSD)"""
# Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux.
# See: W. Richard Stevens. 1992. Advanced Programming in the
# UNIX Environment. Chapter 19.
# Author: Steen Lumholt -- with additions by Guido.
from select import select
import os
import tty
__all__ = ["openpty","fork","spawn"]
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
CHILD = 0
def openpty():
"""openpty() -> (master_fd, slave_fd)
Open a pty master/slave pair, using os.openpty() if possible."""
try:
return os.openpty()
except (AttributeError, OSError):
pass
master_fd, slave_name = _open_terminal()
slave_fd = slave_open(slave_name)
return master_fd, slave_fd
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal()
def _open_terminal():
"""Open pty master and return (master_fd, tty_name)."""
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except OSError:
continue
return (fd, '/dev/tty' + x + y)
raise OSError('out of pty devices')
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except OSError:
pass
return result
def fork():
"""fork() -> (pid, master_fd)
Fork and make the child a session leader with a controlling terminal."""
try:
pid, fd = os.forkpty()
except (AttributeError, OSError):
pass
else:
if pid == CHILD:
try:
os.setsid()
except OSError:
# os.forkpty() already set us session leader
pass
return pid, fd
master_fd, slave_fd = openpty()
pid = os.fork()
if pid == CHILD:
# Establish a new session.
os.setsid()
os.close(master_fd)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
# Explicitly open the tty to make it become a controlling tty.
tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
os.close(tmp_fd)
else:
os.close(slave_fd)
# Parent and child process.
return pid, master_fd
def _writen(fd, data):
"""Write all the data to a descriptor."""
while data:
n = os.write(fd, data)
data = data[n:]
def _read(fd):
"""Default read function."""
return os.read(fd, 1024)
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
fds = [master_fd, STDIN_FILENO]
while True:
rfds, wfds, xfds = select(fds, [], [])
if master_fd in rfds:
data = master_read(master_fd)
if not data: # Reached EOF.
return
else:
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
if not data:
fds.remove(STDIN_FILENO)
else:
_writen(master_fd, data)
def spawn(argv, master_read=_read, stdin_read=_read):
"""Create a spawned process."""
if type(argv) == type(''):
argv = (argv,)
pid, master_fd = fork()
if pid == CHILD:
try:
os.execlp(argv[0], *argv)
except:
# If we wanted to be really clever, we would use
# the same method as subprocess() to pass the error
# back to the parent. For now just dump stack trace.
traceback.print_exc()
finally:
os._exit(1)
try:
mode = tty.tcgetattr(STDIN_FILENO)
tty.setraw(STDIN_FILENO)
restore = 1
except tty.error: # This is the same as termios.error
restore = 0
try:
_copy(master_fd, master_read, stdin_read)
except OSError:
# Some OSes never return an EOF on pty, just raise
# an error instead.
pass
finally:
if restore:
tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
return os.waitpid(pid, 0)[1]
| bsd-3-clause | 6,431,780,181,907,985,000 | 27.571429 | 76 | 0.57 | false |
arrti/proxypooler | tests/test_db.py | 1 | 1559 | import pytest
from proxypooler.errors import ProxyPoolerEmptyError
def test_db(conn):
conn.put('127.0.0.1:80', 15)
conn.put('127.0.0.1:81', 14)
conn.put('127.0.0.1:82', 210)
conn.put('127.0.0.1:83', 2)
conn.put('127.0.0.1:84', 100)
assert conn.size == 5
ip = conn.get()[0].decode('utf-8')
assert ip == '127.0.0.1:83'
ip = conn.get()[0].decode('utf-8')
assert ip == '127.0.0.1:81'
assert conn.size == 3
ips = conn.get_list(30)
assert len(ips) == 3
ip = ips[0][0].decode('utf-8')
assert ip == '127.0.0.1:80'
ip = ips[1][0].decode('utf-8')
assert ip == '127.0.0.1:84'
ip = ips[2][0].decode('utf-8')
assert ip == '127.0.0.1:82'
assert conn.size == 0
conn.put('127.0.0.1:83', 2)
conn.put('127.0.0.1:83', 20)
assert conn.size == 1
ip, expire = conn.get()
assert ip.decode('utf-8') == '127.0.0.1:83'
assert expire == 20
conn.put('127.0.0.1:83', 20)
conn.put_list([('127.0.0.1:84', 100), ('127.0.0.1:81', 14), ('127.0.0.1:82', 210)])
assert conn.size == 4
ip = conn.get()[0].decode('utf-8')
assert ip == '127.0.0.1:81'
ip = conn.get()[0].decode('utf-8')
assert ip == '127.0.0.1:83'
ips = conn.get_list(2, rev=True)
assert len(ips) == 2
assert ips[0][0].decode('utf-8') == '127.0.0.1:82'
assert ips[0][1] == 210
assert ips[1][0].decode('utf-8') == '127.0.0.1:84'
assert ips[1][1] == 100
assert conn.size == 0
def test_db_empty(conn):
with pytest.raises(ProxyPoolerEmptyError):
conn.get()
| apache-2.0 | -7,760,928,809,844,205,000 | 27.87037 | 87 | 0.546504 | false |
xsleonard/wsgisubdomain | wsgisubdomain.py | 1 | 2974 | import socket
from threading import Lock
from __about__ import __version__, __title__, __description__
__all__ = ['__version__', '__title__', '__description__',
'SubdomainDispatcher']
class SubdomainDispatcher(object):
""" A WSGI application that gets or creates other WSGI applications
based on the subdomain.
Adapted from:
http://flask.pocoo.org/docs/patterns/appdispatch/#dispatch-by-subdomain
:param create_application: A function that accepts 'subdomain' as a
keyword argument and returns a WSGI application. Subdomain will be
either an empty string for the bare domain, `None` if the request is
for an IP address, or a full subdomain (e.g. 'www' or 'en.dl')
"""
def __init__(self, create_application):
self.create_application = create_application
self.lock = Lock()
self.instances = {}
def __call__(self, environ, start_response):
""" WSGI application interface
:param environ: WSGI environ
:param start_response: WSGI start_response
"""
app = self.get_application(environ)
return app(environ, start_response)
def get_application(self, environ):
""" Retrieve an application for a wsgi environ
:param environ: The environ object sent by wsgi to an application
"""
host = self._get_host(environ)
subdomain = self._extract_subdomain(host)
return self._get_application(subdomain)
def _get_application(self, subdomain):
""" Return a WSGI application for subdomain. The subdomain is
passed to the create_application constructor as a keyword argument.
:param subdomain: Subdomain to get or create an application with
"""
with self.lock:
app = self.instances.get(subdomain)
if app is None:
app = self.create_application(subdomain=subdomain)
self.instances[subdomain] = app
return app
@staticmethod
def _extract_subdomain(host):
""" Returns a subdomain from a host. This host is typically the
HTTP_HOST request envvar. If the host is an IP address, `None` is
returned
:param host: Request's target host
"""
host = host.split(':')[0]
# If the host is an IP address, there is no subdomain to extract
try:
# Check if the host is an ip address
socket.inet_aton(host)
except socket.error:
# It isn't an IP address, return the subdomain
return '.'.join(host.split('.')[:-2])
@staticmethod
def _get_host(environ):
""" Returns the true host from the request's environ.
:param environ: environ variable passed to a wsgi app by wsgi
"""
# HTTP_HOST is preferred to SERVER_NAME, but only SERVER_NAME is
# guaranteed to exist
return environ.get('HTTP_HOST', environ['SERVER_NAME'])
| mit | 1,412,791,263,099,934,500 | 34.404762 | 76 | 0.62273 | false |
olysonek/tuned | tests/unit/hardware/test_device_matcher_udev.py | 1 | 1221 | import unittest2
import pyudev
from tuned.hardware.device_matcher_udev import DeviceMatcherUdev
class DeviceMatcherUdevTestCase(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls.udev_context = pyudev.Context()
cls.matcher = DeviceMatcherUdev()
def test_simple_search(self):
try:
device = pyudev.Devices.from_sys_path(self.udev_context,
"/sys/devices/virtual/tty/tty0")
except AttributeError:
device = pyudev.Device.from_sys_path(self.udev_context,
"/sys/devices/virtual/tty/tty0")
self.assertTrue(self.matcher.match("tty0", device))
try:
device = pyudev.Devices.from_sys_path(self.udev_context,
"/sys/devices/virtual/tty/tty1")
except AttributeError:
device = pyudev.Device.from_sys_path(self.udev_context,
"/sys/devices/virtual/tty/tty1")
self.assertFalse(self.matcher.match("tty0", device))
def test_regex_search(self):
try:
device = pyudev.Devices.from_sys_path(self.udev_context,
"/sys/devices/virtual/tty/tty0")
except AttributeError:
device = pyudev.Device.from_sys_path(self.udev_context,
"/sys/devices/virtual/tty/tty0")
self.assertTrue(self.matcher.match("tty.", device))
self.assertFalse(self.matcher.match("tty[1-9]", device))
| gpl-2.0 | -4,724,523,557,095,025,000 | 32.916667 | 64 | 0.736282 | false |
TheDSCPL/SSRE_2017-2018_group8 | Projeto/Python/cryptopy/crypto/cipher/arc4.py | 1 | 2710 | # -*- coding: utf-8 -*-
""" crypto.cipher.arc4
A Stream Cipher Encryption Algorithm 'Arcfour'
A few lines of code/ideas borrowed from [PING]
[PING] CipherSaber implementation by Ka-Ping Yee <[email protected]>, 5 May 2000.
Some documentation text and test vectors taken from [IDARC4]
[IDARCH4] K.Kaukonen, R.Thayer, "A Stream Cipher Encryption Algorithm 'Arcfour'",
ftp://ietf.org/draft-kaukonen-cipher-arcfour-03.txt
Generally munged to map to crypto.cipher calling conventions
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
November 5, 2002
"""
class ARC4:
""" ARC4 Stream Cipher Algorithm
"""
def __init__(self,key=None):
""" key -> octet string for key """
self.name = 'ARC4'
self.strength = None # depends on keySize
self.blockSize = 1 # blockSize is in bytes
if key != None:
self.setKey(key)
def setKey(self, key):
""" Set initial state from key. Never use the same key more than once!
"""
self.keySize = len(key)
self.strength = self.keySize # this does not include subtracting IV size :-(
i, j, self.state = 0, 0, range(256)
for i in range(256):
j = (j + self.state[i] + ord(key[i % len(key)])) % 256
self.state[i], self.state[j] = self.state[j], self.state[i]
self.keyReady = 1 # Ready
def encrypt(self, plainText, more = None):
""" Encrypt a string and return a binary string
multiple sequential calls can be made using more =1,
this continues the encryption
New sessions of encrypt can NOT be called twice with the same key!!!!
"""
if self.keyReady != 1 : raise 'Error, ARC4 key already used once!'
if more != 1:
self.keyReady = None
cipherText = arcfourBlock(self.state, plainText)
return cipherText
def decrypt(self, cipherText, more = None):
""" Decrypt a string and return a string """
if self.keyReady != 1 :
raise 'set for decryption required'
if more != 1:
self.keyReady = None
plainText = arcfourBlock(self.state, cipherText)
return plainText
def arcfourBlock(state, input):
""" Use state to encrypt input string, returns string """
i, j, output = 0, 0, []
for byte in input:
i = (i + 1) % 256
j = (j + state[i]) % 256
state[i], state[j] = state[j], state[i]
n = (state[i] + state[j]) % 256
output.append(chr(ord(byte) ^ state[n]))
output = ''.join(output) # convert to string
return output
| mit | -4,618,640,495,810,670,000 | 34.657895 | 91 | 0.586716 | false |
lochiiconnectivity/exabgp | lib/exabgp/application/cli.py | 1 | 4426 | #!/usr/bin/env python
# encoding: utf-8
"""
cli.py
Created by Thomas Mangin on 2014-12-22.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
import sys
from exabgp.dep.cmd2 import cmd
from exabgp.version import version
class Completed (cmd.Cmd):
# use_rawinput = False
# prompt = ''
# doc_header = 'doc_header'
# misc_header = 'misc_header'
# undoc_header = 'undoc_header'
ruler = '-'
def __init__ (self,intro):
self.prompt = '%s> ' % intro
cmd.Cmd.__init__(self)
def completedefault (self, text, line, begidx, endidx):
commands = line.split()
local = self.completion
for command in commands:
if command in local:
local = local[command]
continue
break
return [_ for _ in local.keys() if _.startswith(text)]
def default (self,line):
print 'unrecognised syntax: ', line
def do_EOF (self):
return True
class SubMenu (Completed):
def do_exit (self,line):
return True
do_x = do_exit
class Attribute (SubMenu):
chars = ''.join(chr(_) for _ in range(ord('a'),ord('z')+1) + range(ord('0'),ord('9')+1) + [ord ('-')])
attribute = None
completion = {
'origin' : {
'igp': {
},
'egp': {
},
'incomplete': {
},
},
}
def __init__ (self,name):
self.name = name
SubMenu.__init__(self,'attribute %s' % name)
def do_origin (self,line):
if line in ('igp','egp','incomplete'):
self.attribute['origin'] = line
else:
print 'invalid origin'
def do_as_path (self,line):
pass
# next-hop
def do_med (self,line):
if not line.isdigit():
print 'invalid med, %s is not a number' % line
return
med = int(line)
if 0 > med < 65536:
print 'invalid med, %s is not a valid number' % line
self.attribute['origin'] = line
# local-preference
# atomic-aggregate
# aggregator
# community
# originator-id
# cluster-list
# extended-community
# psmi
# aigp
def do_show (self,line):
print 'attribute %s ' % self.name + ' '.join('%s %s' % (key,value) for key,value in self.attribute.iteritems())
class ExaBGP (Completed):
completion = {
'announce' : {
'route' : {
},
'l2vpn' : {
},
},
'neighbor': {
'include': {
},
'exclude': {
},
'reset': {
},
'list': {
},
},
'attribute' : {
},
'show': {
'routes' : {
'extensive': {
},
'minimal': {
},
},
},
'reload': {
},
'restart': {
},
}
def _update_prompt (self):
if self._neighbors:
self.prompt = '\n# neighbor ' + ', '.join(self._neighbors) + '\n> '
else:
self.prompt = '\n> '
##
## repeat last command
##
last = 'help'
def do_last (self, line):
"Print the input, replacing '$out' with the output of the last shell command"
# Obviously not robust
if hasattr(self, 'last_output'):
print line.replace('$out', self.last_output)
##
##
##
_neighbors = set()
def do_neighbor (self,line):
try:
action,ip = line.split()
except ValueError:
if line == 'reset':
print 'removed neighbors', ', '.join(self._neighbors)
self._neighbors = set()
self._update_prompt()
else:
print 'invalid syntax'
self.help_neighbor()
return
if action == 'include':
# check ip is an IP
# check ip is a known IP
self._neighbors.add(ip)
self._update_prompt()
elif action == 'exclude':
if ip in self._neighbors:
self._neighbors.remove(ip)
print 'neighbor excluded'
self._update_prompt()
else:
print 'invalid neighbor'
elif action == 'list':
print 'removed neighbors', ', '.join(self._neighbors)
else:
print 'invalid syntax'
self.help_neighbor()
def help_neighbor (self):
print "neighbor include <ip> : limit the action to the defined neighbors"
print "neighbor exclude <ip> : remove a particular neighbor"
print "neighbor reset : clear the neighbor previous set "
_attribute = {}
def do_attribute (self,name):
if not name:
self.help_attribute()
return
invalid = ''.join([_ for _ in name if _ not in Attribute.chars])
if invalid:
print 'invalid character(s) in attribute name: %s' % invalid
return
cli = Attribute(name)
cli.attribute = self._attribute.get(name,{})
cli.cmdloop()
def help_attribute (self):
print 'attribute <name>'
def do_quit (self,line):
return True
do_q = do_quit
if __name__ == '__main__':
if len(sys.argv) > 1:
ExaBGP().onecmd(' '.join(sys.argv[1:]))
else:
print "ExaBGP %s CLI" % version
ExaBGP('').cmdloop()
| bsd-3-clause | 2,480,145,758,677,052,400 | 17.596639 | 113 | 0.604383 | false |
taiwenko/python | acs/acs_cold_start.py | 1 | 7587 | #!/usr/bin/env python
from time import sleep
import twk_utils
import math
import sys
import xpf6020
import tools.utils as tools
import watlowf4
from tools import shell
from blessings import Terminal
t = Terminal()
franz_num = raw_input('How many Franz are you testing? [1,2,3,or 4]: ').strip()
cycle_num = raw_input('How many temp cycles would you like to run?: ').strip()
utils = twk_utils.Twk_utils()
print "Accessing the XPF6020 Power Supplies"
ps1_path = '/dev/serial/by-id/usb-Prolific_Technology_Inc._USB-Serial_Controller_D-if00-port0'
ps2_path = '/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_A703PO3I-if00-port0'
pfc1_path = '/dev/serial/by-id/usb-loon_onboard_half_stack_hv_pfc_1a-if01-port0'
pfc2_path = '/dev/serial/by-id/usb-loon_onboard_half_stack_hv_pfc_2a-if01-port0'
pfc3_path = '/dev/serial/by-id/usb-loon_onboard_half_stack_hv_pfc_1b-if01-port0'
pfc4_path = '/dev/serial/by-id/usb-loon_onboard_half_stack_hv_pfc_2b-if01-port0'
print "Accessing the Temperature Chamber"
tchamber_path = '/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_A603R0MG-if00-port0'
chamber = watlowf4.WatlowF4(tchamber_path)
chamber.conditioning_on(True)
def ps_measure_check(ch, current_min, current_max, voltage_min, voltage_max, tolerance, max_cycle):
cycle = 0
avg_volt = 0
avg_current = 0
while cycle != max_cycle:
if ch == '1':
[r_mppt_v, r_mppt_i] = ps1.measure('1')
elif ch == '2':
[r_mppt_v, r_mppt_i] = ps1.measure('2')
elif ch == '3':
[r_mppt_v, r_mppt_i] = ps2.measure('1')
elif ch == '4':
[r_mppt_v, r_mppt_i] = ps2.measure('2')
else:
print 'Unknown Input Channel'
volt = float(r_mppt_v.split("V")[0])
curr = float(r_mppt_i.split("A")[0])
avg_volt = avg_volt + volt
avg_current = avg_current + curr
cycle = cycle + 1
sleep(1)
r_mppt_v = avg_volt / cycle;
r_mppt_i = avg_current / cycle;
if float(r_mppt_i) > float(current_max):
result = t.bold_red('FAILED')
result_count = 1
elif float(r_mppt_i) < float(current_min):
result = t.bold_red('FAILED')
result_count = 1
elif float(r_mppt_v) > float(voltage_max):
result = t.bold_red('FAILED')
result_count = 1
elif float(r_mppt_v) < float(voltage_min):
result = t.bold_red('FAILED')
result_count = 1
else:
result = t.bold_green('PASSED')
result_count = 0
print 'Franz CH%s @ %sV, %sA....[%s]' %(ch, r_mppt_v, r_mppt_i, result)
print ''
return result_count
def config_acs(pfc_path):
sleep(5)
tom = shell.Shell(pfc_path)
sleep(1)
sb = shell.Scoreboard(tom,'acs')
sleep(1)
tom.sendline('power on acs')
sleep(3)
print sb.query('power_acs_enabled')
sleep(1)
tom.close()
def clean_acs(pfc_path):
sleep(5)
tom = shell.Shell(pfc_path)
sleep(1)
sb = shell.Scoreboard(tom,'acs')
sleep(1)
tom.sendline('power off acs')
sleep(3)
print sb.query('power_acs_enabled')
sleep(1)
tom.close()
# Test starts here
offtime = 1 #15 #mins
offtime_sec = offtime * 60
run_count = 0
max_run_count = cycle_num
ch1result = 0
ch2result = 0
ch3result = 0
ch4result = 0
ts = utils.get_timestamp()
print '*** Franz test started @ %s***' % ts
batt_vin = 48
batt_iin = 20
ps1 = xpf6020.Xpf6020(ps1_path)
ps1.reset_ps()
ps2 = xpf6020.Xpf6020(ps2_path)
ps2.reset_ps()
ps1.set_voltage(1, batt_vin)
ps1.set_currentlimit(1, batt_iin)
if franz_num == '2':
ps1.set_voltage(2, batt_vin)
ps1.set_currentlimit(2, batt_iin)
elif franz_num == '3':
ps1.set_voltage(2, batt_vin)
ps1.set_currentlimit(2, batt_iin)
ps2.set_voltage(1,batt_vin)
ps2.set_currentlimit(1,batt_iin)
elif franz_num == '4':
ps1.set_voltage(2, batt_vin)
ps1.set_currentlimit(2, batt_iin)
ps2.set_voltage(1,batt_vin)
ps2.set_currentlimit(1,batt_iin)
ps2.set_voltage(2,batt_vin)
ps2.set_currentlimit(2,batt_iin)
else:
if franz_num != '1':
print 'Unknown franz amount. Can only test up to 4 franz at a time.'
sys.exit()
# Setup chamber
cold_temp = 20 #-60
soak_time = 1 #45 # min
chamber.ramp_down(cold_temp)
chamber.soak_time(soak_time)
while True:
# Turn on power supplies
ps1.ind_output('1','on')
if franz_num == '2':
ps1.ind_output('2','on')
elif franz_num == '3':
ps1.ind_output('2','on')
ps2.ind_output('1','on')
elif franz_num == '4':
ps1.ind_output('2','on')
ps2.ind_output('1','on')
ps2.ind_output('2','on')
else:
if franz_num != '1':
print 'Unknown Channel'
sleep(5)
# Turn on ACS using PFC
config_acs(pfc1_path)
if franz_num == '2':
config_acs(pfc2_path)
elif franz_num == '3':
config_acs(pfc2_path)
config_acs(pfc3_path)
elif franz_num == '4':
config_acs(pfc2_path)
config_acs(pfc3_path)
config_acs(pfc4_path)
else:
if franz_num != '1':
print 'Unknown Channel'
sleep(5)
# Measure current draw from PS
measurement_count = 5
print 'Averaging %d measurement...' % measurement_count
current = 0.12
voltage = 48
tolerance = 0.05
current_max = float(current) * (1 + tolerance)
current_min = float(current) * (1 - tolerance)
voltage_max = float(voltage) * (1 + tolerance)
voltage_min = float(voltage) * (1 - tolerance)
print 'Voltage Limits should be within %f to %fV' %(voltage_min, voltage_max)
print 'Current Limits should be within %f to %fA' %(current_min, current_max)
print ''
rc1 = ps_measure_check('1', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch1result = ch1result + rc1
if franz_num == '2':
rc2 = ps_measure_check('2', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch2result = ch2result + rc2
elif franz_num == '3':
rc2 = ps_measure_check('2', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch2result = ch2result + rc2
rc3 = ps_measure_check('3', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch3result = ch3result + rc3
elif franz_num == '4':
rc2 = ps_measure_check('2', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch2result = ch2result + rc2
rc3 = ps_measure_check('3', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch3result = ch3result + rc3
rc4 = ps_measure_check('4', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch4result = ch4result + rc4
else:
if franz_num != '1':
print 'Unknown franz amount.'
# Turn off ACS using PFC
clean_acs(pfc1_path)
if franz_num == '2':
clean_acs(pfc2_path)
elif franz_num == '3':
clean_acs(pfc2_path)
clean_acs(pfc3_path)
elif franz_num == '4':
clean_acs(pfc2_path)
clean_acs(pfc3_path)
clean_acs(pfc4_path)
else:
if franz_num != '1':
print 'Unknown Channel'
sleep(5)
# Turn off power supplies
ps1.all_output('off')
ps2.all_output('off')
run_count = run_count + 1
if run_count == int(max_run_count):
break;
ts = utils.get_timestamp()
print 'Off for %s min started @ %s' % (offtime, ts)
sleep(offtime_sec)
hot_temp = 24
print 'Ramping up to 24C'
chamber.ramp_up(hot_temp)
ts = utils.get_timestamp()
msg = '*** ACS test completed @ %s***' % ts
msg = msg + ', CH1 failed %s out of %s cycles' % (ch1result, max_run_count)
msg = msg + ', CH2 failed %s out of %s cycles' % (ch2result, max_run_count)
msg = msg + ', CH3 failed %s out of %s cycles' % (ch3result, max_run_count)
msg = msg + ', CH4 failed %s out of %s cycles' % (ch4result, max_run_count)
print msg
utils.send_email('ACS Cold-Start', msg)
| mit | 3,195,352,635,429,763,000 | 26.791209 | 113 | 0.652827 | false |
SmileyJames/shopify_python_api | setup.py | 1 | 1764 | from setuptools import setup
NAME='ShopifyAPI'
exec(open('shopify/version.py').read())
DESCRIPTION='Shopify API for Python'
LONG_DESCRIPTION="""\
The ShopifyAPI library allows python developers to programmatically
access the admin section of stores using an ActiveResource like
interface similar the ruby Shopify API gem. The library makes HTTP
requests to Shopify in order to list, create, update, or delete
resources (e.g. Order, Product, Collection)."""
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author='Shopify',
author_email='[email protected]',
url='https://github.com/Shopify/shopify_python_api',
packages=['shopify', 'shopify/resources'],
scripts=['scripts/shopify_api.py'],
license='MIT License',
install_requires=[
'pyactiveresource>=2.1.1',
'PyYAML',
'six',
],
test_suite='test',
tests_require=[
'mock>=1.0.1',
],
platforms='Any',
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules']
)
| mit | -7,996,432,185,524,814,000 | 38.2 | 82 | 0.594671 | false |
MDAnalysis/pyQuteMol | python/OctaMap.py | 1 | 5260 |
import numpy
def Area(a, b, c):
v = numpy.cross(b-a,c-a)
return numpy.sqrt(numpy.dot(v,v))*0.5
class OctaMapSamp:
def __init__(self):
self.size = 0
self.dir = None
self.dirrot = None
self.weight = None
def nsamp(self):
return len(self.dir)
def DuplicateTexels(self, t, s, tx, ty):
e=self.size - 1
# four corners
k0=(tx+ (ty )*s)*3
k1=(tx+e+(ty )*s)*3
k2=(tx+e+(ty+e)*s)*3
k3=(tx+ (ty+e)*s)*3
t[k0 ]=t[k1 ]=t[k2 ]=t[k3 ]
t[k0+1]=t[k1+1]=t[k2+1]=t[k3+1]
t[k0+2]=t[k1+2]=t[k2+2]=t[k3+2]
# sides
for i in range(1,size/2):
k0a=(tx + (ty +i )*s)*3
k0b=(tx + (ty +e-i)*s)*3
k1a=(tx+e + (ty +i )*s)*3
k1b=(tx+e + (ty +e-i)*s)*3
k2a=(tx+i + (ty )*s)*3
k2b=(tx+e-i+ (ty )*s)*3
k3a=(tx+i + (ty +e )*s)*3
k3b=(tx+e-i+ (ty +e )*s)*3
t[k0a+0]=t[k0b+0]; t[k1a+0]=t[k1b+0]; t[k2a+0]=t[k2b+0]; t[k3a+0]=t[k3b+0];
t[k0a+1]=t[k0b+1]; t[k1a+1]=t[k1b+1]; t[k2a+1]=t[k2b+1]; t[k3a+1]=t[k3b+1];
t[k0a+2]=t[k0b+2]; t[k1a+2]=t[k1b+2]; t[k2a+2]=t[k2b+2]; t[k3a+2]=t[k3b+2];
def FillTexture(self, t, s, tx, ty, cr, cg, cb):
for y in range(self.size):
for x in range(self.size):
k=(x+tx+(y+ty)*s)*3
p=dir[ Index( x , y ) ]
q=(p+numpy.array(1,1,1))/2.0*255.0
t[k]= q[0]
t[k+1]= q[1]
t[k+2]= q[2]
def Index(self,x, y):
return x+y*self.size
def Smooth(self,t, s, tx, ty):
size = self.size
oldvalue = numpy.zeros(size*size*6)
# copy old values
for y in range(0,size*2):
for x in range(0,size*3):
k=(x+tx+(y+ty)*s)*3
i= Index( x , y )
oldvalue[i]=t[k]
dy=size, dx=1;
e=size-1;
# smooth old values
for y in range(size):
for x in range(size):
i= Index( x , y )
TH=2
sum=oldvalue[i]
ddiv=1
w=0
if (y!=0): w=oldvalue[i-dy]
else: w=oldvalue[ Index( e-x , 1 ) ]
if(w>TH):
sum+=w
ddiv+=1
if (x!=0): w=oldvalue[i-dx]
else: w=oldvalue[ Index( 1 , e-y ) ]
if(w>TH):
sum+=w
ddiv+=1
if (y!=e): w=oldvalue[i+dy]
else: w=oldvalue[ Index( e-x ,e-1 ) ]
if(w>TH):
sum+=w
ddiv+=1
if (x!=e): w=oldvalue[i+dx]
else: w=oldvalue[ Index( e-1 , e-y ) ]
if(w>TH):
sum+=w
ddiv+=1
sum=(sum+ddiv/2)/ddiv
k=(x+tx+(y+ty)*s)*3
t[k]=t[k+1]=t[k+2]=sum
def SetSize(self,_size):
self.size=_size
self.initMap()
self.ComputeWeight()
def getDir(self, x, y):
fs=float(self.size)-1
#create point -
p = numpy.array((x*2./fs-1.,y*2./fs-1,0))
ax=numpy.abs(p[0]); ay=numpy.abs(p[1]); az=1
if (ax+ay>1.0):
p = numpy.array((numpy.sign(p[0])*(1-ay),numpy.sign(p[1])*(1-ax), 0))
az=-1
p[2]=(1-ax-ay)*az
# Normalize
p /= numpy.linalg.norm(p)
return p
def initMap(self):
size = self.size
dir = self.dir = numpy.zeros((size*size, 3))
for y in range(size):
for x in range(size):
dir[self.Index(x,y)]=self.getDir(x,y)
def ComputeWeight(self):
size = self.size
getDir = self.getDir
weight = self.weight = numpy.zeros((size*size))
k = 0
for y in range(size):
for x in range(size):
h=0.5
p00=getDir(x-h,y-h)
p01=getDir(x-h,y+0)
p02=getDir(x-h,y+h)
p10=getDir(x+0,y-h)
p11=getDir(x+0,y+0)
p12=getDir(x+0,y+h)
p20=getDir(x+h,y-h)
p21=getDir(x+h,y+0)
p22=getDir(x+h,y+h)
tota=0; c=0; e=size-1
if ( (x!=0) and (y!=0) ):
tota+=Area( p00, p10, p01 )
tota+=Area( p10, p11, p01 )
c+=1
if ( (x!=0) and (y!=e) ):
tota+=Area( p01, p11, p12 )
tota+=Area( p01, p12, p02 )
c+=1
if ( (x!=e) and (y!=0) ):
tota+=Area( p10, p20, p21 )
tota+=Area( p21, p11, p10 )
c+=1
if ( (x!=e) and (y!=e) ):
tota+=Area( p11, p21, p12 )
tota+=Area( p21, p22, p12 )
c+=1
weight[k]=1.0/(tota*4/c)
k+=1
def TotTexSizeX(self): return self.size
def TotTexSizeY(self): return self.size
octamap = OctaMapSamp()
| gpl-2.0 | -3,011,985,873,142,060,000 | 28.886364 | 87 | 0.384221 | false |
3ev0/android-whitelist | blackswan/core/modularity.py | 1 | 1286 | __author__ = 'ivo'
import logging
import argparse
from blackswan import config
_log = logging.getLogger(__name__)
class ModuleBase():
argparser = None
def __init__(self):
self.config = {}
@classmethod
def register(cls):
cls.argparser = argparse.ArgumentParser(description=cls.description, prog=cls.modname, add_help=False)
cls.argparser.add_argument("-b", "--db", default=config.def_db, help="The blackswan db file. Default: {}".format(config.def_db))
cls.add_args()
config.modules[cls.modname] = cls
_log.debug("Module %s registered", cls.modname)
return
@classmethod
def add_args(cls):
raise NotImplementedError
def work(self):
raise NotImplementedError
def __repr__(self):
return "<{}({})>".format(self.modname, repr(self.config))
def parse_args(self, modargs):
args = self.argparser.parse_args(args=modargs)
self.config.update(**vars(args))
def run(self):
_log.info("Module %s started", self.modname)
self.work()
_log.info("Module %s finished", self.modname)
def configure(self, **kwargs):
self.config.update(kwargs)
_log.info("Module %s configured: \n%s", self.modname, repr(self.config))
| apache-2.0 | -8,348,951,146,452,041,000 | 26.956522 | 136 | 0.624417 | false |
rbuffat/pyidf | tests/test_surfacecontaminantsourceandsinkgenericboundarylayerdiffusion.py | 1 | 2309 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.internal_gains import SurfaceContaminantSourceAndSinkGenericBoundaryLayerDiffusion
log = logging.getLogger(__name__)
class TestSurfaceContaminantSourceAndSinkGenericBoundaryLayerDiffusion(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_surfacecontaminantsourceandsinkgenericboundarylayerdiffusion(self):
pyidf.validation_level = ValidationLevel.error
obj = SurfaceContaminantSourceAndSinkGenericBoundaryLayerDiffusion()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_surface_name = "object-list|Surface Name"
obj.surface_name = var_surface_name
# real
var_mass_transfer_coefficient = 0.0
obj.mass_transfer_coefficient = var_mass_transfer_coefficient
# object-list
var_schedule_name = "object-list|Schedule Name"
obj.schedule_name = var_schedule_name
# real
var_henry_adsorption_constant_or_partition_coefficient = 0.0001
obj.henry_adsorption_constant_or_partition_coefficient = var_henry_adsorption_constant_or_partition_coefficient
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.surfacecontaminantsourceandsinkgenericboundarylayerdiffusions[0].name, var_name)
self.assertEqual(idf2.surfacecontaminantsourceandsinkgenericboundarylayerdiffusions[0].surface_name, var_surface_name)
self.assertAlmostEqual(idf2.surfacecontaminantsourceandsinkgenericboundarylayerdiffusions[0].mass_transfer_coefficient, var_mass_transfer_coefficient)
self.assertEqual(idf2.surfacecontaminantsourceandsinkgenericboundarylayerdiffusions[0].schedule_name, var_schedule_name)
self.assertAlmostEqual(idf2.surfacecontaminantsourceandsinkgenericboundarylayerdiffusions[0].henry_adsorption_constant_or_partition_coefficient, var_henry_adsorption_constant_or_partition_coefficient) | apache-2.0 | 6,754,138,350,835,265,000 | 41.777778 | 208 | 0.737982 | false |
drammock/pyeparse | pyeparse/utils.py | 1 | 3374 | # Authors: Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from os import path as op
import glob
import tempfile
from shutil import rmtree
import atexit
def create_chunks(sequence, size):
"""Generate chunks from a sequence
Note. copied from MNE-Python
Parameters
----------
sequence : iterable
Any iterable object
size : int
The chunksize to be returned
"""
return (sequence[p:p + size] for p in range(0, len(sequence), size))
def fwhm_kernel_2d(size, fwhm, center=None):
""" Make a square gaussian kernel.
Note: adapted from https://gist.github.com/andrewgiessel/4635563
Parameters
----------
size : int
The length of the square matrix to create.
fmhw : int
The full wdith at hald maximum value.
"""
x = np.arange(0, size, 1, np.float64)
y = x[:, np.newaxis]
# center
x0 = y0 = size // 2
return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / fwhm ** 2)
def pupil_kernel(fs, dur=4.0, t_max=0.930, n=10.1, s=1.):
"""Generate pupil response kernel modeled as an Erlang gamma function.
Parameters
----------
fs : int
Sampling frequency (samples/second) to use in generating the kernel.
dur : float
Length (in seconds) of the generated kernel.
t_max : float
Time (in seconds) where the response maximum is stipulated to occur.
n : float
Number of negative-exponential layers in the cascade defining the
s : float | None
Desired value for the area under the kernel. If `None`, no scaling is
performed.
"""
n_samp = int(np.round(fs * dur))
t = np.arange(n_samp, dtype=float) / fs
h = (t ** n) * np.exp(- n * t / t_max)
scal = 1. if s is None else float(s) / (np.sum(h) * (t[1] - t[0]))
h = scal * h
return h
def _get_test_fnames():
"""Get usable test files (omit EDF if no edf2asc)"""
path = op.join(op.dirname(__file__), 'tests', 'data')
fnames = glob.glob(op.join(path, '*.edf'))
return fnames
class _TempDir(str):
"""Class for creating and auto-destroying temp dir
This is designed to be used with testing modules.
We cannot simply use __del__() method for cleanup here because the rmtree
function may be cleaned up before this object, so we use the atexit module
instead.
"""
def __new__(self):
new = str.__new__(self, tempfile.mkdtemp())
return new
def __init__(self):
self._path = self.__str__()
atexit.register(self.cleanup)
def cleanup(self):
rmtree(self._path, ignore_errors=True)
def _has_joblib():
"""Helper to determine if joblib is installed"""
try:
import joblib # noqa
except Exception:
return False
else:
return True
def _has_h5py():
"""Helper to determine if joblib is installed"""
try:
import h5py # noqa
except Exception:
return False
else:
return True
def _has_edfapi():
"""Helper to determine if a user has edfapi installed"""
from .edf._raw import has_edfapi
return has_edfapi
_requires_h5py = np.testing.dec.skipif(not _has_h5py(),
'Requires h5py')
_requires_edfapi = np.testing.dec.skipif(not _has_edfapi(), 'Requires edfapi')
| bsd-3-clause | -3,761,224,465,408,458,000 | 24.560606 | 79 | 0.603734 | false |
gzorin/RSXGL | extsrc/mesa/src/mapi/glapi/gen/gl_XML.py | 1 | 25960 | #!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <[email protected]>
import libxml2
import re, sys, string
import typeexpr
def parse_GL_API( file_name, factory = None ):
doc = libxml2.readFile( file_name, None, libxml2.XML_PARSE_XINCLUDE + libxml2.XML_PARSE_NOBLANKS + libxml2.XML_PARSE_DTDVALID + libxml2.XML_PARSE_DTDATTR + libxml2.XML_PARSE_DTDLOAD + libxml2.XML_PARSE_NOENT )
ret = doc.xincludeProcess()
if not factory:
factory = gl_item_factory()
api = factory.create_item( "api", None, None )
api.process_element( doc )
# After the XML has been processed, we need to go back and assign
# dispatch offsets to the functions that request that their offsets
# be assigned by the scripts. Typically this means all functions
# that are not part of the ABI.
for func in api.functionIterateByCategory():
if func.assign_offset:
func.offset = api.next_offset;
api.next_offset += 1
doc.freeDoc()
return api
def is_attr_true( element, name ):
"""Read a name value from an element's attributes.
The value read from the attribute list must be either 'true' or
'false'. If the value is 'false', zero will be returned. If the
value is 'true', non-zero will be returned. An exception will be
raised for any other value."""
value = element.nsProp( name, None )
if value == "true":
return 1
elif value == "false":
return 0
else:
raise RuntimeError('Invalid value "%s" for boolean "%s".' % (value, name))
class gl_print_base:
"""Base class of all API pretty-printers.
In the model-view-controller pattern, this is the view. Any derived
class will want to over-ride the printBody, printRealHader, and
printRealFooter methods. Some derived classes may want to over-ride
printHeader and printFooter, or even Print (though this is unlikely).
"""
def __init__(self):
# Name of the script that is generating the output file.
# Every derived class should set this to the name of its
# source file.
self.name = "a"
# License on the *generated* source file. This may differ
# from the license on the script that is generating the file.
# Every derived class should set this to some reasonable
# value.
#
# See license.py for an example of a reasonable value.
self.license = "The license for this file is unspecified."
# The header_tag is the name of the C preprocessor define
# used to prevent multiple inclusion. Typically only
# generated C header files need this to be set. Setting it
# causes code to be generated automatically in printHeader
# and printFooter.
self.header_tag = None
# List of file-private defines that must be undefined at the
# end of the file. This can be used in header files to define
# names for use in the file, then undefine them at the end of
# the header file.
self.undef_list = []
return
def Print(self, api):
self.printHeader()
self.printBody(api)
self.printFooter()
return
def printHeader(self):
"""Print the header associated with all files and call the printRealHeader method."""
print '/* DO NOT EDIT - This file generated automatically by %s script */' \
% (self.name)
print ''
print '/*'
print ' * ' + self.license.replace('\n', '\n * ')
print ' */'
print ''
if self.header_tag:
print '#if !defined( %s )' % (self.header_tag)
print '# define %s' % (self.header_tag)
print ''
self.printRealHeader();
return
def printFooter(self):
"""Print the header associated with all files and call the printRealFooter method."""
self.printRealFooter()
if self.undef_list:
print ''
for u in self.undef_list:
print "# undef %s" % (u)
if self.header_tag:
print ''
print '#endif /* !defined( %s ) */' % (self.header_tag)
def printRealHeader(self):
"""Print the "real" header for the created file.
In the base class, this function is empty. All derived
classes should over-ride this function."""
return
def printRealFooter(self):
"""Print the "real" footer for the created file.
In the base class, this function is empty. All derived
classes should over-ride this function."""
return
def printPure(self):
"""Conditionally define `PURE' function attribute.
Conditionally defines a preprocessor macro `PURE' that wraps
GCC's `pure' function attribute. The conditional code can be
easilly adapted to other compilers that support a similar
feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("PURE")
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
# define PURE __attribute__((pure))
# else
# define PURE
# endif"""
return
def printFastcall(self):
"""Conditionally define `FASTCALL' function attribute.
Conditionally defines a preprocessor macro `FASTCALL' that
wraps GCC's `fastcall' function attribute. The conditional
code can be easilly adapted to other compilers that support a
similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("FASTCALL")
print """# if defined(__i386__) && defined(__GNUC__) && !defined(__CYGWIN__) && !defined(__MINGW32__)
# define FASTCALL __attribute__((fastcall))
# else
# define FASTCALL
# endif"""
return
def printVisibility(self, S, s):
"""Conditionally define visibility function attribute.
Conditionally defines a preprocessor macro name S that wraps
GCC's visibility function attribute. The visibility used is
the parameter s. The conditional code can be easilly adapted
to other compilers that support a similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append(S)
print """# if (defined(__GNUC__) && !defined(__CYGWIN__) && !defined(__MINGW32__)) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) && defined(__ELF__))
# define %s __attribute__((visibility("%s")))
# else
# define %s
# endif""" % (S, s, S)
return
def printNoinline(self):
"""Conditionally define `NOINLINE' function attribute.
Conditionally defines a preprocessor macro `NOINLINE' that
wraps GCC's `noinline' function attribute. The conditional
code can be easilly adapted to other compilers that support a
similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("NOINLINE")
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
# define NOINLINE __attribute__((noinline))
# else
# define NOINLINE
# endif"""
return
def real_function_name(element):
name = element.nsProp( "name", None )
alias = element.nsProp( "alias", None )
if alias:
return alias
else:
return name
def real_category_name(c):
if re.compile("[1-9][0-9]*[.][0-9]+").match(c):
return "GL_VERSION_" + c.replace(".", "_")
else:
return c
def classify_category(name, number):
"""Based on the category name and number, select a numerical class for it.
Categories are divided into four classes numbered 0 through 3. The
classes are:
0. Core GL versions, sorted by version number.
1. ARB extensions, sorted by extension number.
2. Non-ARB extensions, sorted by extension number.
3. Un-numbered extensions, sorted by extension name.
"""
try:
core_version = float(name)
except Exception,e:
core_version = 0.0
if core_version > 0.0:
cat_type = 0
key = name
elif name.startswith("GL_ARB_") or name.startswith("GLX_ARB_") or name.startswith("WGL_ARB_"):
cat_type = 1
key = int(number)
else:
if number != None:
cat_type = 2
key = int(number)
else:
cat_type = 3
key = name
return [cat_type, key]
def create_parameter_string(parameters, include_names):
"""Create a parameter string from a list of gl_parameters."""
list = []
for p in parameters:
if p.is_padding:
continue
if include_names:
list.append( p.string() )
else:
list.append( p.type_string() )
if len(list) == 0: list = ["void"]
return string.join(list, ", ")
class gl_item:
def __init__(self, element, context):
self.context = context
self.name = element.nsProp( "name", None )
self.category = real_category_name( element.parent.nsProp( "name", None ) )
return
class gl_type( gl_item ):
def __init__(self, element, context):
gl_item.__init__(self, element, context)
self.size = int( element.nsProp( "size", None ), 0 )
te = typeexpr.type_expression( None )
tn = typeexpr.type_node()
tn.size = int( element.nsProp( "size", None ), 0 )
tn.integer = not is_attr_true( element, "float" )
tn.unsigned = is_attr_true( element, "unsigned" )
tn.name = "GL" + self.name
te.set_base_type_node( tn )
self.type_expr = te
return
def get_type_expression(self):
return self.type_expr
class gl_enum( gl_item ):
def __init__(self, element, context):
gl_item.__init__(self, element, context)
self.value = int( element.nsProp( "value", None ), 0 )
temp = element.nsProp( "count", None )
if not temp or temp == "?":
self.default_count = -1
else:
try:
c = int(temp)
except Exception,e:
raise RuntimeError('Invalid count value "%s" for enum "%s" in function "%s" when an integer was expected.' % (temp, self.name, n))
self.default_count = c
return
def priority(self):
"""Calculate a 'priority' for this enum name.
When an enum is looked up by number, there may be many
possible names, but only one is the 'prefered' name. The
priority is used to select which name is the 'best'.
Highest precedence is given to core GL name. ARB extension
names have the next highest, followed by EXT extension names.
Vendor extension names are the lowest.
"""
if self.name.endswith( "_BIT" ):
bias = 1
else:
bias = 0
if self.category.startswith( "GL_VERSION_" ):
priority = 0
elif self.category.startswith( "GL_ARB_" ):
priority = 2
elif self.category.startswith( "GL_EXT_" ):
priority = 4
else:
priority = 6
return priority + bias
class gl_parameter:
def __init__(self, element, context):
self.name = element.nsProp( "name", None )
ts = element.nsProp( "type", None )
self.type_expr = typeexpr.type_expression( ts, context )
temp = element.nsProp( "variable_param", None )
if temp:
self.count_parameter_list = temp.split( ' ' )
else:
self.count_parameter_list = []
# The count tag can be either a numeric string or the name of
# a variable. If it is the name of a variable, the int(c)
# statement will throw an exception, and the except block will
# take over.
c = element.nsProp( "count", None )
try:
count = int(c)
self.count = count
self.counter = None
except Exception,e:
count = 1
self.count = 0
self.counter = c
self.count_scale = int(element.nsProp( "count_scale", None ))
elements = (count * self.count_scale)
if elements == 1:
elements = 0
#if ts == "GLdouble":
# print '/* stack size -> %s = %u (before)*/' % (self.name, self.type_expr.get_stack_size())
# print '/* # elements = %u */' % (elements)
self.type_expr.set_elements( elements )
#if ts == "GLdouble":
# print '/* stack size -> %s = %u (after) */' % (self.name, self.type_expr.get_stack_size())
self.is_client_only = is_attr_true( element, 'client_only' )
self.is_counter = is_attr_true( element, 'counter' )
self.is_output = is_attr_true( element, 'output' )
# Pixel data has special parameters.
self.width = element.nsProp('img_width', None)
self.height = element.nsProp('img_height', None)
self.depth = element.nsProp('img_depth', None)
self.extent = element.nsProp('img_extent', None)
self.img_xoff = element.nsProp('img_xoff', None)
self.img_yoff = element.nsProp('img_yoff', None)
self.img_zoff = element.nsProp('img_zoff', None)
self.img_woff = element.nsProp('img_woff', None)
self.img_format = element.nsProp('img_format', None)
self.img_type = element.nsProp('img_type', None)
self.img_target = element.nsProp('img_target', None)
self.img_pad_dimensions = is_attr_true( element, 'img_pad_dimensions' )
self.img_null_flag = is_attr_true( element, 'img_null_flag' )
self.img_send_null = is_attr_true( element, 'img_send_null' )
self.is_padding = is_attr_true( element, 'padding' )
return
def compatible(self, other):
return 1
def is_array(self):
return self.is_pointer()
def is_pointer(self):
return self.type_expr.is_pointer()
def is_image(self):
if self.width:
return 1
else:
return 0
def is_variable_length(self):
return len(self.count_parameter_list) or self.counter
def is_64_bit(self):
count = self.type_expr.get_element_count()
if count:
if (self.size() / count) == 8:
return 1
else:
if self.size() == 8:
return 1
return 0
def string(self):
return self.type_expr.original_string + " " + self.name
def type_string(self):
return self.type_expr.original_string
def get_base_type_string(self):
return self.type_expr.get_base_name()
def get_dimensions(self):
if not self.width:
return [ 0, "0", "0", "0", "0" ]
dim = 1
w = self.width
h = "1"
d = "1"
e = "1"
if self.height:
dim = 2
h = self.height
if self.depth:
dim = 3
d = self.depth
if self.extent:
dim = 4
e = self.extent
return [ dim, w, h, d, e ]
def get_stack_size(self):
return self.type_expr.get_stack_size()
def size(self):
if self.is_image():
return 0
else:
return self.type_expr.get_element_size()
def get_element_count(self):
c = self.type_expr.get_element_count()
if c == 0:
return 1
return c
def size_string(self, use_parens = 1):
s = self.size()
if self.counter or self.count_parameter_list:
list = [ "compsize" ]
if self.counter and self.count_parameter_list:
list.append( self.counter )
elif self.counter:
list = [ self.counter ]
if s > 1:
list.append( str(s) )
if len(list) > 1 and use_parens :
return "(%s)" % (string.join(list, " * "))
else:
return string.join(list, " * ")
elif self.is_image():
return "compsize"
else:
return str(s)
def format_string(self):
if self.type_expr.original_string == "GLenum":
return "0x%x"
else:
return self.type_expr.format_string()
class gl_function( gl_item ):
def __init__(self, element, context):
self.context = context
self.name = None
self.entry_points = []
self.return_type = "void"
self.parameters = []
self.offset = -1
self.initialized = 0
self.images = []
self.assign_offset = 0
self.static_entry_points = []
# Track the parameter string (for the function prototype)
# for each entry-point. This is done because some functions
# change their prototype slightly when promoted from extension
# to ARB extension to core. glTexImage3DEXT and glTexImage3D
# are good examples of this. Scripts that need to generate
# code for these differing aliases need to real prototype
# for each entry-point. Otherwise, they may generate code
# that won't compile.
self.entry_point_parameters = {}
self.process_element( element )
return
def process_element(self, element):
name = element.nsProp( "name", None )
alias = element.nsProp( "alias", None )
if is_attr_true(element, "static_dispatch"):
self.static_entry_points.append(name)
self.entry_points.append( name )
if alias:
true_name = alias
else:
true_name = name
# Only try to set the offset when a non-alias
# entry-point is being processes.
offset = element.nsProp( "offset", None )
if offset:
try:
o = int( offset )
self.offset = o
except Exception, e:
self.offset = -1
if offset == "assign":
self.assign_offset = 1
if not self.name:
self.name = true_name
elif self.name != true_name:
raise RuntimeError("Function true name redefined. Was %s, now %s." % (self.name, true_name))
# There are two possible cases. The first time an entry-point
# with data is seen, self.initialized will be 0. On that
# pass, we just fill in the data. The next time an
# entry-point with data is seen, self.initialized will be 1.
# On that pass we have to make that the new values match the
# valuse from the previous entry-point.
parameters = []
return_type = "void"
child = element.children
while child:
if child.type == "element":
if child.name == "return":
return_type = child.nsProp( "type", None )
elif child.name == "param":
param = self.context.factory.create_item( "parameter", child, self.context)
parameters.append( param )
child = child.next
if self.initialized:
if self.return_type != return_type:
raise RuntimeError( "Return type changed in %s. Was %s, now %s." % (name, self.return_type, return_type))
if len(parameters) != len(self.parameters):
raise RuntimeError( "Parameter count mismatch in %s. Was %d, now %d." % (name, len(self.parameters), len(parameters)))
for j in range(0, len(parameters)):
p1 = parameters[j]
p2 = self.parameters[j]
if not p1.compatible( p2 ):
raise RuntimeError( 'Parameter type mismatch in %s. "%s" was "%s", now "%s".' % (name, p2.name, p2.type_expr.original_string, p1.type_expr.original_string))
if true_name == name or not self.initialized:
self.return_type = return_type
self.parameters = parameters
for param in self.parameters:
if param.is_image():
self.images.append( param )
if element.children:
self.initialized = 1
self.entry_point_parameters[name] = parameters
else:
self.entry_point_parameters[name] = []
return
def filter_entry_points(self, entry_point_list):
"""Filter out entry points not in entry_point_list."""
if not self.initialized:
raise RuntimeError('%s is not initialized yet' % self.name)
entry_points = []
for ent in self.entry_points:
if ent not in entry_point_list:
if ent in self.static_entry_points:
self.static_entry_points.remove(ent)
self.entry_point_parameters.pop(ent)
else:
entry_points.append(ent)
if not entry_points:
raise RuntimeError('%s has no entry point after filtering' % self.name)
self.entry_points = entry_points
if self.name not in entry_points:
# use the first remaining entry point
self.name = entry_points[0]
self.parameters = self.entry_point_parameters[entry_points[0]]
def get_images(self):
"""Return potentially empty list of input images."""
return self.images
def parameterIterator(self):
return self.parameters.__iter__();
def get_parameter_string(self, entrypoint = None):
if entrypoint:
params = self.entry_point_parameters[ entrypoint ]
else:
params = self.parameters
return create_parameter_string( params, 1 )
def get_called_parameter_string(self):
p_string = ""
comma = ""
for p in self.parameterIterator():
p_string = p_string + comma + p.name
comma = ", "
return p_string
def is_abi(self):
return (self.offset >= 0 and not self.assign_offset)
def is_static_entry_point(self, name):
return name in self.static_entry_points
def dispatch_name(self):
if self.name in self.static_entry_points:
return self.name
else:
return "_dispatch_stub_%u" % (self.offset)
def static_name(self, name):
if name in self.static_entry_points:
return name
else:
return "_dispatch_stub_%u" % (self.offset)
class gl_item_factory:
"""Factory to create objects derived from gl_item."""
def create_item(self, item_name, element, context):
if item_name == "function":
return gl_function(element, context)
if item_name == "type":
return gl_type(element, context)
elif item_name == "enum":
return gl_enum(element, context)
elif item_name == "parameter":
return gl_parameter(element, context)
elif item_name == "api":
return gl_api(self)
else:
return None
class gl_api:
def __init__(self, factory):
self.functions_by_name = {}
self.enums_by_name = {}
self.types_by_name = {}
self.category_dict = {}
self.categories = [{}, {}, {}, {}]
self.factory = factory
self.next_offset = 0
typeexpr.create_initial_types()
return
def filter_functions(self, entry_point_list):
"""Filter out entry points not in entry_point_list."""
functions_by_name = {}
for func in self.functions_by_name.itervalues():
entry_points = [ent for ent in func.entry_points if ent in entry_point_list]
if entry_points:
func.filter_entry_points(entry_points)
functions_by_name[func.name] = func
self.functions_by_name = functions_by_name
def process_element(self, doc):
element = doc.children
while element.type != "element" or element.name != "OpenGLAPI":
element = element.next
if element:
self.process_OpenGLAPI(element)
return
def process_OpenGLAPI(self, element):
child = element.children
while child:
if child.type == "element":
if child.name == "category":
self.process_category( child )
elif child.name == "OpenGLAPI":
self.process_OpenGLAPI( child )
child = child.next
return
def process_category(self, cat):
cat_name = cat.nsProp( "name", None )
cat_number = cat.nsProp( "number", None )
[cat_type, key] = classify_category(cat_name, cat_number)
self.categories[cat_type][key] = [cat_name, cat_number]
child = cat.children
while child:
if child.type == "element":
if child.name == "function":
func_name = real_function_name( child )
temp_name = child.nsProp( "name", None )
self.category_dict[ temp_name ] = [cat_name, cat_number]
if self.functions_by_name.has_key( func_name ):
func = self.functions_by_name[ func_name ]
func.process_element( child )
else:
func = self.factory.create_item( "function", child, self )
self.functions_by_name[ func_name ] = func
if func.offset >= self.next_offset:
self.next_offset = func.offset + 1
elif child.name == "enum":
enum = self.factory.create_item( "enum", child, self )
self.enums_by_name[ enum.name ] = enum
elif child.name == "type":
t = self.factory.create_item( "type", child, self )
self.types_by_name[ "GL" + t.name ] = t
child = child.next
return
def functionIterateByCategory(self, cat = None):
"""Iterate over functions by category.
If cat is None, all known functions are iterated in category
order. See classify_category for details of the ordering.
Within a category, functions are sorted by name. If cat is
not None, then only functions in that category are iterated.
"""
lists = [{}, {}, {}, {}]
for func in self.functionIterateAll():
[cat_name, cat_number] = self.category_dict[func.name]
if (cat == None) or (cat == cat_name):
[func_cat_type, key] = classify_category(cat_name, cat_number)
if not lists[func_cat_type].has_key(key):
lists[func_cat_type][key] = {}
lists[func_cat_type][key][func.name] = func
functions = []
for func_cat_type in range(0,4):
keys = lists[func_cat_type].keys()
keys.sort()
for key in keys:
names = lists[func_cat_type][key].keys()
names.sort()
for name in names:
functions.append(lists[func_cat_type][key][name])
return functions.__iter__()
def functionIterateByOffset(self):
max_offset = -1
for func in self.functions_by_name.itervalues():
if func.offset > max_offset:
max_offset = func.offset
temp = [None for i in range(0, max_offset + 1)]
for func in self.functions_by_name.itervalues():
if func.offset != -1:
temp[ func.offset ] = func
list = []
for i in range(0, max_offset + 1):
if temp[i]:
list.append(temp[i])
return list.__iter__();
def functionIterateAll(self):
return self.functions_by_name.itervalues()
def enumIterateByName(self):
keys = self.enums_by_name.keys()
keys.sort()
list = []
for enum in keys:
list.append( self.enums_by_name[ enum ] )
return list.__iter__()
def categoryIterate(self):
"""Iterate over categories.
Iterate over all known categories in the order specified by
classify_category. Each iterated value is a tuple of the
name and number (which may be None) of the category.
"""
list = []
for cat_type in range(0,4):
keys = self.categories[cat_type].keys()
keys.sort()
for key in keys:
list.append(self.categories[cat_type][key])
return list.__iter__()
def get_category_for_name( self, name ):
if self.category_dict.has_key(name):
return self.category_dict[name]
else:
return ["<unknown category>", None]
def typeIterate(self):
return self.types_by_name.itervalues()
def find_type( self, type_name ):
if type_name in self.types_by_name:
return self.types_by_name[ type_name ].type_expr
else:
print "Unable to find base type matching \"%s\"." % (type_name)
return None
| bsd-2-clause | 8,469,495,609,065,517,000 | 24.985986 | 210 | 0.667874 | false |
wesleywerner/conspire | src/const.py | 1 | 7542 | STATE_MENU = 1
STATE_BUILD = 2
STATE_UFO = 3
STATE_FLIGHT = 4
STATE_RESULTS = 5
STATE_END = 100
# parts available for use per level number.
LEVEL_PARTS = {
1: ('tax returns', 'shopping list', 'todo list',
'ludum dare comments', 'bank accounts',
'website passwords', 'IP address scamlist',
),
2: ('human torso', 'human head',
'human left arm', 'human right arm',
'human left leg', 'human right leg',
'website passwords', 'todo list', 'alien left leg',
),
3: ('alien torso', 'alien head',
'alien left arm', 'alien right arm',
'alien left leg', 'alien right leg',
'human torso', 'human head', 'human left arm',
),
4: ('codex page I', 'codex page II', 'codex page III',
'codex page IV', 'codex page V', 'codex page VI',
'biblical references', 'book of psalms', 'book of tomas',
'todo list', 'ludum dare comments'
),
5: ('ptreodactyl torso', 'ptreodactyl skull',
'ptreodactyl right wing', 'ptreodactyl left wing',
'cyclops right arm', 'cyclops left leg',
'human left arm', 'human left leg',
),
6: ('alien torso', 'alien head',
'alien left arm', 'alien right arm',
'alien left leg', 'alien right leg',
'trex torso', 'trex head', 'trex tail', 'trex legs',
'human torso', 'human head',
'human left arm', 'human right arm',
'human left leg', 'human right leg',
),
}
#500: ('cyclops torso', 'cyclops skull',
#'cyclops right arm', 'cyclops left arm',
#'cyclops right leg', 'cyclops left leg',
#),
# parts a level may start off with as in use
DEFAULT_PARTS = {
1: ('shopping list', 'ludum dare comments'),
3: ('human torso', 'human head', 'human left arm',),
4: ('todo list', 'codex page I',),
5: ('ptreodactyl left wing', 'cyclops right arm', 'human left leg',),
}
# parts required to complete a level
COMPLETION_PARTS = {
1: ('tax returns', 'bank accounts',
'website passwords', 'IP address scamlist',
),
2: ('human torso', 'human head',
'human left arm', 'human right arm',
'human left leg', 'human right leg',
),
3: ('alien torso', 'alien head',
'alien left arm', 'alien right arm',
'alien left leg', 'alien right leg',
),
4: ('codex page I', 'codex page II', 'codex page III',
'codex page IV', 'codex page V', 'codex page VI',
),
5: ('ptreodactyl torso', 'ptreodactyl skull',
'ptreodactyl right wing', 'ptreodactyl left wing',
),
6: ('alien torso', 'alien head',
'alien left arm', 'alien right arm',
'alien left leg', 'alien right leg',
'trex torso', 'trex head', 'trex tail', 'trex legs'
),
}
LEVEL_SCENARIOS = (
"skip",
"Welcome to Conspiracy 101, agent!" \
"\n" \
"Use your arrows or mouse wheel to scroll through this briefing." \
"\n" \
"We represent certain global interests. " \
"These interests are kept hidden from the public, they are " \
"privately funded and have access to the top levels government." \
"\n" \
"To start you off, we need to forge some papers. Our goal is to " \
"discredit a high ranking official who is refusing cooperation " \
"with our fine organization. We hope this move will make him " \
"reconsider." \
"\n" \
"Compile fake papers for tax returns and bank accounts. " \
"These figures match up to a internet scamming operation this " \
"individual is apparently running, so include lists of website " \
"passwords and IP addresses." \
"\n" \
"Do this by placing the correct items in the green area below. " \
"When you are done proceed to plant the evidence, " \
"the evidence will be carried by aircraft via remote control. " \
"At a critical moment, you will force crash the craft, ensuring " \
"the evidence is discovered." \
"\n" \
"Good luck, and welcome to the team, Agent!",
"...anyway, welcome back, Agent! We have a situation..." \
"\n" \
"A problematic official is suspected of being a double agent. " \
"We are going to make him disappear from public by faking his death, " \
"while keeping him under ground for 'questioning'." \
"\n" \
"Construct a fake human body, as accurately as possbile. " \
"The body will be found at the air crash site you will coordinate. " \
"\n" \
"Report back after the mission for debriefing.",
"We are in the business of predicting the future, Agent! " \
"And the best way to predict the future, is to invent it!" \
"\n" \
"We have a situation with one of our underground testing fascilities, " \
"rumors are spreading of it's existence, and we cannot allow this. " \
"We need a distraction for the media, and conspiracy theorists love " \
"nothing more than a good alien story! " \
"\n" \
"Contruct a faux alien corpse, you will be flying it remotely in " \
"one of our top-secret super experimental aircraft. Remember to " \
"down it in the green zone for optimal mission success." \
"\n" \
"Well what are you waiting for, you are not in preschool " \
"any more!",
"A pertinent problem has arisen regarding the highest persons " \
"in state, specifically the ones involved in the secular movements. " \
"\n" \
"In exchange for invaluable information from these informants we " \
"are doing them a favor." \
"\n" \
"Construct a faux codex book to decode ancient texts. " \
"This will gain leverage over the public." \
"\n" \
"The codex will be taken from the crash site, by a fellow agent, " \
"and secured in another location for discovery. " \
"\n" \
"Remember to dot your i's and cross your t's.",
"Our friends over at the dept. of evolution made a very generous " \
"donation in our behalf, gaining notoriety with very important " \
"higher-ups. " \
"\n" \
"As a sign of good faith, you will construct a pterodactyl skeleton " \
"to be flown out to an undisclosed location." \
"\n" \
"This will serve their agenda nicely.",
"Good day, Agent!" \
"\n" \
"Today is a very special day, we are going to combine the ultimate " \
"of myths. A lot of people are unsure about the existence of " \
"extra terrestrials, and many others the same about dinosaurs. " \
"We will play on their uncertainty of both and create a story " \
"to distract everybody from what is really going on in the world! " \
"\n" \
"You must construct one alien corpse, and one T-rex skeleton. " \
"The cover story is that dinosaurs were in fact alien of origin." \
"\n" \
"Keep this up, Agent, and one day you'll be the one making up these stories!",
)
# affects the wording used in reports.
# try these:
# political, myth
SCENARIO_TYPE = (
'skip',
'political',
'political',
'myth',
'myth',
'myth',
'myth',
'',
)
# determine the type of the item to build, maps to levels.
ITEM_TYPES = (
'skip',
'documents',
'high ranking officials body',
'alien corpse',
'biblical codex',
'pterodactyl skeleton',
'alien corpse and T-rex skeleton',
'',
)
# determine the method of evidence deployment
TACTICAL_TYPE = (
0,
STATE_FLIGHT,
STATE_FLIGHT,
STATE_UFO,
STATE_FLIGHT,
STATE_FLIGHT,
STATE_UFO,
)
| gpl-3.0 | 5,350,094,556,457,263,000 | 34.408451 | 82 | 0.609122 | false |
vIiRuS/Lagerregal | devices/migrations/0002_auto_20151105_0513.py | 1 | 5237 | from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20151105_0513'),
('devices', '0001_initial'),
('devicetypes', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('devicegroups', '0001_initial'),
('locations', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='template',
name='devicetype',
field=models.ForeignKey(blank=True, to='devicetypes.Type', null=True, on_delete=models.CASCADE),
),
migrations.AddField(
model_name='template',
name='manufacturer',
field=models.ForeignKey(blank=True, to='devices.Manufacturer', null=True, on_delete=models.CASCADE),
),
migrations.AddField(
model_name='room',
name='building',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to='devices.Building', null=True),
),
migrations.AddField(
model_name='room',
name='section',
field=models.ForeignKey(related_name='rooms', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='locations.Section', null=True),
),
migrations.AddField(
model_name='picture',
name='device',
field=models.ForeignKey(related_name='pictures', to='devices.Device', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='note',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='note',
name='device',
field=models.ForeignKey(related_name='notes', to='devices.Device', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='lending',
name='device',
field=models.ForeignKey(blank=True, to='devices.Device', null=True, on_delete=models.CASCADE),
),
migrations.AddField(
model_name='lending',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='Lent to', to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='deviceinformation',
name='device',
field=models.ForeignKey(related_name='information', to='devices.Device', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='deviceinformation',
name='infotype',
field=models.ForeignKey(to='devices.DeviceInformationType', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='device',
name='bookmarkers',
field=models.ManyToManyField(related_name='bookmarks', null=True, through='devices.Bookmark', to=settings.AUTH_USER_MODEL, blank=True),
),
migrations.AddField(
model_name='device',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='device',
name='currentlending',
field=models.ForeignKey(related_name='currentdevice', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='devices.Lending', null=True),
),
migrations.AddField(
model_name='device',
name='department',
field=models.ForeignKey(related_name='devices', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='users.Department', null=True),
),
migrations.AddField(
model_name='device',
name='devicetype',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='devicetypes.Type', null=True),
),
migrations.AddField(
model_name='device',
name='group',
field=models.ForeignKey(related_name='devices', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='devicegroups.Devicegroup', null=True),
),
migrations.AddField(
model_name='device',
name='manufacturer',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='devices.Manufacturer', null=True),
),
migrations.AddField(
model_name='device',
name='room',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='devices.Room', null=True),
),
migrations.AddField(
model_name='bookmark',
name='device',
field=models.ForeignKey(to='devices.Device', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='bookmark',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
),
]
| bsd-3-clause | 7,381,653,905,418,718,000 | 41.577236 | 160 | 0.596525 | false |
akshaybabloo/Car-ND | Project_5/laneline.py | 1 | 21371 | import cv2
import glob
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.metrics import mean_squared_error
x_cor = 9 #Number of corners to find
y_cor = 6
# Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((y_cor*x_cor,3), np.float32)
objp[:,:2] = np.mgrid[0:x_cor, 0:y_cor].T.reshape(-1,2)
def camera_cal():
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('camera_cal/calibration*.jpg') # Make a list of paths to calibration images
# Step through the list and search for chessboard corners
corners_not_found = [] #Calibration images in which opencv failed to find corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Conver to grayscale
ret, corners = cv2.findChessboardCorners(gray, (x_cor,y_cor), None) # Find the chessboard corners
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
else:
corners_not_found.append(fname)
print 'Corners were found on', str(len(imgpoints)), 'out of', str(len(images)), 'it is', str(len(imgpoints)*100.0/len(images)),'% of calibration images'
img_size = (img.shape[1], img.shape[0])
# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
return mtx, dist
mtx, dist = camera_cal()
def undistort(img):
return cv2.undistort(img, mtx, dist, None, mtx)
def eq_Hist(img): # Histogram normalization
img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])
return img
# Sobel
def sobel_img(img, thresh_min = 25, thresh_max = 255, sobel_kernel = 11):
sobelx = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
sobely = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
scaled_sobelx = np.uint16(255*sobelx/np.max(sobelx))
scaled_sobely = np.uint16(255*sobely/np.max(sobely))
sobel_sum = scaled_sobelx+0.2*scaled_sobely
scaled_sobel_sum = np.uint8(255*sobel_sum/np.max(sobel_sum))
sum_binary = np.zeros_like(scaled_sobel_sum)
sum_binary[(scaled_sobel_sum >= thresh_min) & (scaled_sobel_sum <= thresh_max)] = 1
return sum_binary
# Solbel magnitude
def sobel_mag_img(img, thresh_min = 25, thresh_max = 255, sobel_kernel = 11):
sobelx = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
sobely = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
gradmag = np.sqrt(sobelx**2 + sobely**2)
scaled_gradmag = np.uint8(255*gradmag/np.max(gradmag))
gradmag_binary = np.zeros_like(scaled_gradmag)
gradmag_binary[(scaled_gradmag >= thresh_min) & (scaled_gradmag <= thresh_max)] = 1
return gradmag_binary
# Sobel direction
def sobel_dir_img(img, thresh_min = 0.0, thresh_max = 1.5, sobel_kernel = 11):
sobelx = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
sobely = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
graddir = np.arctan2(sobely, sobelx)
graddir_binary = np.zeros_like(graddir)
graddir_binary[(graddir >= thresh_min) & (graddir <= thresh_max)] = 1
return graddir_binary
# Binary red channel threshold
def red_thres(img, thresh_min = 25, thresh_max = 255):
red = img[:,:,2]
red_binary = np.zeros_like(red)
red_binary[(red >= thresh_min) & (red <= thresh_max)] = 1
return red_binary
# Binary saturation channel threshold
def s_thres(img, thresh_min = 25, thresh_max = 255):
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
s_channel = hls[:,:,2]
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel > thresh_min) & (s_channel <= thresh_max)] = 1
return s_binary
# Return saturation channel
def s_hls(img):
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
return hls[:,:,2]
IMAGE_H = 223
IMAGE_W = 1280
# Sharpen image
def sharpen_img(img):
gb = cv2.GaussianBlur(img, (5,5), 20.0)
return cv2.addWeighted(img, 2, gb, -1, 0)
# Compute linear image transformation img*s+m
def lin_img(img,s=1.0,m=0.0):
img2=cv2.multiply(img, np.array([s]))
return cv2.add(img2, np.array([m]))
# Change image contrast; s>1 - increase
def contr_img(img, s=1.0):
m=127.0*(1.0-s)
return lin_img(img, s, m)
# Create perspective image transformation matrices
def create_M():
src = np.float32([[0, 673], [1207, 673], [0, 450], [1280, 450]])
dst = np.float32([[569, 223], [711, 223], [0, 0], [1280, 0]])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
return M, Minv
# Main image transformation routine to get a warped image
def transform(img, M):
undist = undistort(img)
img_size = (1280, 223)
warped = cv2.warpPerspective(undist, M, img_size)
warped = sharpen_img(warped)
warped = contr_img(warped, 1.1)
return warped
# Show original and warped image side by side
def show_warped(img, M):
f, (plot1, plot2) = plt.subplots(1, 2, figsize=(9, 3))
plot1.imshow(cv2.cvtColor(undistort(img), cv2.COLOR_BGR2RGB))
plot1.set_title('Undistorted', fontsize=20)
plot2.imshow(cv2.cvtColor(transform(img, M), cv2.COLOR_BGR2RGB))
plot2.set_title('Warped', fontsize=20)
# Show one image
def show_img(img):
if len(img.shape)==3:
plt.figure()
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
else:
plt.figure()
plt.imshow(img, cmap='gray')
M, Minv = create_M()
#Calculate coefficients of polynom in y+h coordinates, i.e. f(y) -> f(y+h)
def pol_shift(pol, h):
pol_ord = len(pol)-1 # Determinate degree of the polynomial
if pol_ord == 3:
pol0 = pol[0]
pol1 = pol[1] + 3.0*pol[0]*h
pol2 = pol[2] + 3.0*pol[0]*h*h + 2.0*pol[1]*h
pol3 = pol[3] + pol[0]*h*h*h + pol[1]*h*h + pol[2]*h
return(np.array([pol0, pol1, pol2, pol3]))
if pol_ord == 2:
pol0 = pol[0]
pol1 = pol[1] + 2.0*pol[0]*h
pol2 = pol[2] + pol[0]*h*h+pol[1]*h
return(np.array([pol0, pol1, pol2]))
if pol_ord == 1:
pol0 = pol[0]
pol1 = pol[1] + pol[0]*h
return(np.array([pol0, pol1]))
# Calculate derivative for a polynom pol in a point x
def pol_d(pol, x):
pol_ord = len(pol)-1
if pol_ord == 3:
return 3.0*pol[0]*x*x+2.0*pol[1]*x+pol[2]
if pol_ord == 2:
return 2.0*pol[0]*x+pol[1]
if pol_ord == 1:
return pol[0]#*np.ones(len(np.array(x)))
# Calculate the second derivative for a polynom pol in a point x
def pol_dd(pol, x):
pol_ord = len(pol)-1
if pol_ord == 3:
return 6.0*pol[0]*x+2.0*pol[1]
if pol_ord == 2:
return 2.0*pol[0]
if pol_ord == 1:
return 0.0
# Calculate a polinomial value in a point x
def pol_calc(pol, x):
pol_f = np.poly1d(pol)
return(pol_f(x))
xm_in_px = 3.675 / 85 # Lane width (12 ft in m) is ~85 px on image
ym_in_px = 3.048 / 24 # Dashed line length (10 ft in m) is ~24 px on image
def px_to_m(px):
return xm_in_px*px
# Calculate offset from the lane center
def lane_offset(left, right):
offset = 1280/2.0-(pol_calc(left, 1.0)+ pol_calc(right, 1.0))/2.0
return px_to_m(offset)
# Calculate radius of curvature
MAX_RADIUS = 10000
def r_curv(pol, y):
if len(pol) == 2: # If the polinomial is a linear function
return MAX_RADIUS
else:
y_pol = np.linspace(0, 1, num=EQUID_POINTS)
x_pol = pol_calc(pol, y_pol)*xm_in_px
y_pol = y_pol*IMAGE_H*ym_in_px
pol = np.polyfit(y_pol, x_pol, len(pol)-1)
d_y = pol_d(pol, y)
dd_y = pol_dd(pol, y)
r = ((np.sqrt(1+d_y**2))**3)/abs(dd_y)
if r > MAX_RADIUS:
r = MAX_RADIUS
return r
def lane_curv(left, right):
l = r_curv(left, 1.0)
r = r_curv(right, 1.0)
if l < MAX_RADIUS and r < MAX_RADIUS:
return (r_curv(left, 1.0)+r_curv(right, 1.0))/2.0
else:
if l < MAX_RADIUS:
return l
if r < MAX_RADIUS:
return r
return MAX_RADIUS
#Calculate approximated equidistant to a parabola
EQUID_POINTS = 25 # Number of points to use for the equidistant approximation
def equidistant(pol, d, max_l = 1, plot = False):
y_pol = np.linspace(0, max_l, num=EQUID_POINTS)
x_pol = pol_calc(pol, y_pol)
y_pol *= IMAGE_H # Convert y coordinates bach to [0..223] scale
x_m = []
y_m = []
k_m = []
for i in range(len(x_pol)-1):
x_m.append((x_pol[i+1]-x_pol[i])/2.0+x_pol[i]) # Calculate polints position between given points
y_m.append((y_pol[i+1]-y_pol[i])/2.0+y_pol[i])
if x_pol[i+1] == x_pol[i]:
k_m.append(1e8) # A vary big number
else:
k_m.append(-(y_pol[i+1]-y_pol[i])/(x_pol[i+1]-x_pol[i])) # Slope of perpendicular lines
x_m = np.array(x_m)
y_m = np.array(y_m)
k_m = np.array(k_m)
#Calculate equidistant points
y_eq = d*np.sqrt(1.0/(1+k_m**2))
x_eq = np.zeros_like(y_eq)
if d >= 0:
for i in range(len(x_m)):
if k_m[i] < 0:
y_eq[i] = y_m[i]-abs(y_eq[i])
else:
y_eq[i] = y_m[i]+abs(y_eq[i])
x_eq[i] = (x_m[i]-k_m[i]*y_m[i])+k_m[i]*y_eq[i]
else:
for i in range(len(x_m)):
if k_m[i] < 0:
y_eq[i] = y_m[i]+abs(y_eq[i])
else:
y_eq[i] = y_m[i]-abs(y_eq[i])
x_eq[i] = (x_m[i]-k_m[i]*y_m[i])+k_m[i]*y_eq[i]
y_eq /= IMAGE_H # Convert y coordinates back to [0..1] scale
y_pol /= IMAGE_H
y_m /= IMAGE_H
pol_eq = np.polyfit(y_eq, x_eq, len(pol)-1) # Fit equidistant with a polinomial
if plot:
plt.plot(x_pol, y_pol, color='red', linewidth=1, label = 'Original line') #Original line
plt.plot(x_eq, y_eq, color='green', linewidth=1, label = 'Equidistant') #Equidistant
plt.plot(pol_calc(pol_eq, y_pol), y_pol, color='blue',
linewidth=1, label = 'Approximation') #Approximation
plt.legend()
for i in range(len(x_m)):
plt.plot([x_m[i],x_eq[i]], [y_m[i],y_eq[i]], color='black', linewidth=1) #Draw connection lines
plt.savefig('readme_img/equid.jpg')
return pol_eq
DEV_POL = 2 # Max mean squared error of the approximation
MSE_DEV = 1.1 # Minimum mean squared error ratio to consider higher order of the polynomial
def best_pol_ord(x, y):
pol1 = np.polyfit(y,x,1)
pred1 = pol_calc(pol1, y)
mse1 = mean_squared_error(x, pred1)
if mse1 < DEV_POL:
return pol1, mse1
pol2 = np.polyfit(y,x,2)
pred2 = pol_calc(pol2, y)
mse2 = mean_squared_error(x, pred2)
if mse2 < DEV_POL or mse1/mse2 < MSE_DEV:
return pol2, mse2
else:
pol3 = np.polyfit(y,x,3)
pred3 = pol_calc(pol3, y)
mse3 = mean_squared_error(x, pred3)
if mse2/mse3 < MSE_DEV:
return pol2, mse2
else:
return pol3, mse3
# Smooth polinomial functions of different degrees
def smooth_dif_ord(pol_p, x, y, new_ord):
x_p = pol_calc(pol_p, y)
x_new = (x+x_p)/2.0
return np.polyfit(y, x_new, new_ord)
# Calculate threashold for left line
def thres_l_calc(sens):
thres = -0.0045*sens**2+1.7581*sens-115.0
if thres < 25*(382.0-sens)/382.0+5:
thres = 25*(382.0-sens)/382.0+5
return thres
# Calculate threashold for right line
def thres_r_calc(sens):
thres = -0.0411*sens**2+9.1708*sens-430.0
if sens<210:
if thres < sens/6:
thres = sens/6
else:
if thres < 20:
thres = 20
return thres
WINDOW_SIZE = 15 # Half of the sensor span
DEV = 7 # Maximum of the point deviation from the sensor center
SPEED = 2 / IMAGE_H # Pixels shift per frame
POL_ORD = 2 # Default polinomial order
RANGE = 0.0 # Fraction of the image to skip
def find(img, left=True, p_ord=POL_ORD, pol = np.zeros(POL_ORD+1), max_n = 0):
x_pos = []
y_pos = []
max_l = img.shape[0] #number of lines in the img
for i in range(max_l-int(max_l*RANGE)):
y = max_l-i #Line number
y_01 = y / float(max_l) #y in [0..1] scale
if abs(pol[-1]) > 0: #If it not a still image or the first video frame
if y_01 >= max_n + SPEED: # If we can use pol to find center of the virtual sensor from the previous frame
cent = int(pol_calc(pol, y_01-SPEED))
if y == max_l:
if left:
cent = 605
else:
cent = 690
else: # Prolong the pol tangentially
k = pol_d(pol, max_n)
b = pol_calc(pol, max_n)-k*max_n
cent = int(k*y_01+b)
if cent > 1280-WINDOW_SIZE:
cent = 1280-WINDOW_SIZE
if cent < WINDOW_SIZE:
cent = WINDOW_SIZE
else: #If it is a still image
if len(x_pos) > 0: # If there are some points detected
cent = x_pos[-1] # Use the previous point as a senser center
else: #Initial guess on line position
if left:
cent = 605
else:
cent = 690
if left: #Subsample image
sens = 0.5*s_hls(img[max_l-1-i:max_l-i,cent-WINDOW_SIZE:cent+WINDOW_SIZE,:])\
+img[max_l-1-i:max_l-i,cent-WINDOW_SIZE:cent+WINDOW_SIZE,2]
else:
sens = img[max_l-1-i:max_l-i,cent-WINDOW_SIZE:cent+WINDOW_SIZE,2]
if len(sens[0,:]) < WINDOW_SIZE: #If we out of the image
break
x_max = max(sens[0,:]) #Find maximal value on the sensor
sens_mean = np.mean(sens[0,:])
# Get threshold
if left:
loc_thres = thres_l_calc(sens_mean)
loc_dev = DEV
else:
loc_thres = thres_r_calc(sens_mean)
loc_dev = DEV
if len(x_pos) == 0:
loc_dev = WINDOW_SIZE
if (x_max-sens_mean) > loc_thres and (x_max>100 or left):
if left:
x = list(reversed(sens[0,:])).index(x_max)
x = cent+WINDOW_SIZE-x
else:
x = list(sens[0,:]).index(x_max)
x = cent-WINDOW_SIZE+x
if x-1 < 569.0*y_01 or x+1 > 569.0*y_01+711 or np.nonzero(sens[0,:]) < WINDOW_SIZE: #if the sensor touchs black triangle
break # We are done
if abs(pol[-1]) < 1e-4: # If there are no polynomial provided
x_pos.append(x)
y_pos.append(y_01)
else:
if abs(x-cent) < loc_dev:#*14.206*r_curv(pol, max_l)**-0.2869:
x_pos.append(x)
y_pos.append(y_01)
if len(x_pos) > 1:
return x_pos, y_pos
else:
return [0], [0.0]
RANGE = 0.0
def get_lane(img, plot=False):
warp = transform(img, M)
img = undistort(img)
ploty = np.linspace(0, 1, num=warp.shape[0])
x2, y2 = find(warp)
x, y = find(warp, False)
right_fitx = pol_calc(best_pol_ord(x,y)[0], ploty)
left_fitx = pol_calc(best_pol_ord(x2,y2)[0], ploty)
y2 = np.int16(np.array(y2)*223.0) # Convert into [0..223] scale
y = np.int16(np.array(y)*223.0)
if plot:
for i in range(len(x)): # Plot points
cv2.circle(warp, (x[i], y[i]), 1, (255,50,255))
for i in range(len(x2)):
cv2.circle(warp, (x2[i], y2[i]), 1, (255,50,250))
show_img(warp)
plt.axis('off')
plt.plot(left_fitx, ploty*IMAGE_H, color='green', linewidth=1)
plt.plot(right_fitx, ploty*IMAGE_H, color='green', linewidth=1)
cv2.imwrite('img.jpg', warp)
return img, left_fitx, right_fitx, ploty*IMAGE_H
def draw_lane_img_p(img_path):
return cv2.imread(img_path)
def draw_lane(img, video=False):
if video:
img, left_fitx, right_fitx, ploty, left, right = get_lane_video(img)
else:
img, left_fitx, right_fitx, ploty = get_lane(img, False)
warp_zero = np.zeros((IMAGE_H,IMAGE_W)).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(img, 1.0, newwarp, 0.6, 0)
if video:
# Add text information on the frame
font = cv2.FONT_HERSHEY_SIMPLEX
text_pos = 'Pos of the car: '+str(np.round(lane_offset(left, right),2))+ ' m'
radius = np.round(lane_curv(left, right),2)
if radius >= MAX_RADIUS:
radius = 'Inf'
else:
radius = str(radius)
text_rad = 'Radius: '+radius+ ' m'
cv2.putText(result,text_pos,(10,25), font, 1,(255,255,255),2)
cv2.putText(result,text_rad,(10,75), font, 1,(255,255,255),2)
return(result)
right_fit_p = np.zeros(POL_ORD+1)
left_fit_p = np.zeros(POL_ORD+1)
r_len = 0
l_len = 0
lane_w_p = 90
MIN = 60 # Minimal line separation (in px)
MAX = 95 # Maximal line separation (in px)
MIN_POINTS = 10 #Minimal points to consider a line
MAX_N = 5 # Maximal frames without line detected to use previous frame
n_count = 0 # Frame counter
r_n = 0 # Number of frames with unsuccessful line detection
l_n = 0
def get_lane_video(img):
global right_fit_p, left_fit_p, r_len, l_len, n_count, r_n, l_n
sw = False
warp = transform(img, M)
img = undistort(img)
if l_n < MAX_N and n_count > 0:
x, y = find(warp, pol = left_fit_p, max_n = l_len)
else:
x, y = find(warp)
if len(x) > MIN_POINTS:
left_fit, mse_l = best_pol_ord(x,y)
if mse_l > DEV_POL*9 and n_count > 0:
left_fit = left_fit_p
l_n += 1
else:
l_n /= 2
else:
left_fit = left_fit_p
l_n += 1
if r_n < MAX_N and n_count > 0:
x2, y2 = find(warp, False, pol = right_fit_p, max_n = r_len)
else:
x2, y2 = find(warp, False)
if len(x2) > MIN_POINTS:
right_fit, mse_r = best_pol_ord(x2, y2)
if mse_r > DEV_POL*9 and n_count > 0:
right_fit = right_fit_p
r_n += 1
else:
r_n /= 2
else:
right_fit = right_fit_p
r_n += 1
if n_count > 0: # if not the first video frame
# Apply filter
if len(left_fit_p) == len(left_fit): # If new and prev polinomial have the same order
left_fit = pol_shift(left_fit_p, -SPEED)*(1.0-len(x)/((1.0-RANGE)*IMAGE_H))+left_fit*(len(x)/((1.0-RANGE)*IMAGE_H))
else:
left_fit = smooth_dif_ord(left_fit_p, x, y, len(left_fit)-1)
l_len = y[-1]
if len(right_fit_p) == len(right_fit):
right_fit = pol_shift(right_fit_p, -SPEED)*(1.0-len(x2)/((1.0-RANGE)*IMAGE_H))+right_fit*(len(x2)/((1.0-RANGE)*IMAGE_H))
else:
right_fit = smooth_dif_ord(right_fit_p, x2, y2, len(right_fit)-1)
r_len = y2[-1]
if len(x) > MIN_POINTS and len(x2) <= MIN_POINTS: # If we have only left line
lane_w = pol_calc(right_fit_p, 1.0)-pol_calc(left_fit_p, 1.0)
right_fit = smooth_dif_ord(right_fit_p, pol_calc(equidistant(left_fit, lane_w, max_l=l_len), y),
y, len(left_fit)-1)
r_len = l_len
r_n /=2
if len(x2) > MIN_POINTS and len(x) <= MIN_POINTS: # If we have only right line
lane_w = pol_calc(right_fit_p, 1.0)-pol_calc(left_fit_p, 1.0)
#print(lane_w)
left_fit = smooth_dif_ord(left_fit_p, pol_calc(equidistant(right_fit, -lane_w, max_l=r_len), y2),
y2, len(right_fit)-1)
l_len = r_len
l_n /=2
if (l_n < MAX_N and r_n < MAX_N):
max_y = max(RANGE, l_len, r_len)
else:
max_y = 1.0#max(RANGE, l_len, r_len)
sw = True
d1 = pol_calc(right_fit, 1.0)-pol_calc(left_fit, 1.0)
dm = pol_calc(right_fit, max_y)-pol_calc(left_fit, max_y)
if (d1 > MAX or d1 < 60 or dm < 0):
left_fit = left_fit_p
right_fit = right_fit_p
l_n += 1
r_n += 1
ploty = np.linspace(max_y, 1, num=IMAGE_H)
left_fitx = pol_calc(left_fit, ploty)
right_fitx = pol_calc(right_fit, ploty)
right_fit_p = np.copy(right_fit)
left_fit_p = np.copy(left_fit)
n_count += 1
return img, left_fitx, right_fitx, ploty*223.0, left_fit, right_fit
def init_params(ran):
global right_fit_p, left_fit_p, n_count, RANGE, MIN_POINTS
right_fit_p = np.zeros(POL_ORD+1)
left_fit_p = np.zeros(POL_ORD+1)
n_count = 0
RANGE = ran
MIN_POINTS = 25-15*ran
| mit | 4,058,978,250,439,783,000 | 36.42732 | 156 | 0.575406 | false |
amir-qayyum-khan/lore | ui/tests/test_learningresources_views.py | 1 | 12408 | """
Test the importer views to make sure they work.
"""
from __future__ import unicode_literals
import json
import logging
import os
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.urlresolvers import resolve
import ui.urls
from learningresources.models import Repository, StaticAsset
from roles.api import assign_user_to_repo_group, remove_user_from_repo_group
from roles.permissions import GroupTypes
from search.sorting import LoreSortingFields
from six.moves import reload_module # pylint: disable=import-error
from learningresources.tests.base import LoreTestCase
HTTP_OK = 200
UNAUTHORIZED = 403
NOT_FOUND = 404
log = logging.getLogger(__name__)
# pylint: disable=too-many-public-methods
class TestViews(LoreTestCase):
"""Hit each view."""
def setUp(self):
super(TestViews, self).setUp()
self.repository_url = "/repositories/{0}/".format(self.repo.slug)
self.import_url_slug = "/repositories/{0}/import/".format(
self.repo.slug
)
def upload_test_file(self):
"""Used multiple times in tests"""
with default_storage.open(self.get_course_zip(), "rb") as post_file:
resp = self.client.post(
self.import_url_slug,
{"course_file": post_file, "repository": self.repo.id},
follow=True
)
return resp.content.decode("utf-8")
def test_get_home(self):
"""Home Page."""
body = self.assert_status_code("/home", HTTP_OK, return_body=True)
self.assertTrue("<title>MIT - LORE </title>" in body)
body = self.assert_status_code("/", HTTP_OK, return_body=True)
self.assertTrue("<title>MIT - LORE </title>" in body)
self.assertTrue('>Create repository</a>' in body)
self.assertFalse('Request permission to create '
'repositories</a>' in body)
def test_get_home_norepo(self):
"""Home Page with no authorization to create repositories"""
self.logout()
self.login(self.USERNAME_NO_REPO)
body = self.assert_status_code("/home", HTTP_OK, return_body=True)
self.assertTrue("<title>MIT - LORE </title>" in body)
body = self.assert_status_code("/", HTTP_OK, return_body=True)
self.assertTrue("<title>MIT - LORE </title>" in body)
self.assertFalse('<a href="/lore/create_repo/">'
'Create repository</a>' in body)
self.assertTrue('Request permission to create '
'repositories</a>' in body)
def test_create_repo_post(self):
"""Create repo."""
repo_name = "my really sweet repository"
resp = self.client.post(
"/repositories/new/",
{"name": repo_name, "description": "foo"},
follow=True,
)
self.assertTrue(resp.status_code == HTTP_OK)
body = resp.content.decode("utf-8")
self.assertTrue(repo_name in body)
def test_listing_not_found(self):
"""View listing page, but repo does not exist."""
self.assert_status_code(
"/repositories/99/",
NOT_FOUND,
return_body=True
)
def test_listing_unauthorized(self):
"""View listing page, but not authorized to view this repository."""
self.logout()
self.login(self.USERNAME_NO_REPO)
self.assert_status_code(
self.repository_url,
UNAUTHORIZED,
return_body=True
)
def test_listing_importcourse_perms(self):
"""
Tests the listing page with different user permissions
to check who can see the import course html
"""
self.logout()
self.login(self.USERNAME_NO_REPO)
# user has no permissions at all
self.assert_status_code(
self.repository_url,
UNAUTHORIZED
)
# user has author permissions and cannot see the import for the repo
assign_user_to_repo_group(
self.user_norepo,
self.repo,
GroupTypes.REPO_AUTHOR
)
body = self.assert_status_code(
self.repository_url,
HTTP_OK,
return_body=True
)
self.assertFalse("Import Course</a>" in body)
# user has no permissions
remove_user_from_repo_group(
self.user_norepo,
self.repo,
GroupTypes.REPO_AUTHOR
)
self.assert_status_code(
self.repository_url,
UNAUTHORIZED
)
# user has curator permissions and can see the the import for the repo
assign_user_to_repo_group(
self.user_norepo,
self.repo,
GroupTypes.REPO_CURATOR
)
body = self.assert_status_code(
self.repository_url,
HTTP_OK,
return_body=True
)
self.assertTrue("Import Course</a>" in body)
# user has no permissions
remove_user_from_repo_group(
self.user_norepo,
self.repo,
GroupTypes.REPO_CURATOR
)
self.assert_status_code(
self.repository_url,
UNAUTHORIZED
)
# user has admin permissions and can see the the import for the repo
assign_user_to_repo_group(
self.user_norepo,
self.repo,
GroupTypes.REPO_ADMINISTRATOR
)
body = self.assert_status_code(
self.repository_url,
HTTP_OK,
return_body=True
)
self.assertTrue("Import Course</a>" in body)
def test_create_repo_get(self):
"""GET repo creation page."""
resp = self.client.get("/repositories/new", follow=True)
body = resp.content.decode("utf-8")
self.assertTrue('<h1>Create repository</h1>' in body)
def test_repo_post(self):
"""POST repo page."""
# We have the default self.repo in the database...
self.assertTrue(Repository.objects.count() == 1)
self.client.post(
"/repositories/new/",
{"name": "test name", "description": "test description"},
follow=True
)
self.assertTrue(Repository.objects.count() == 2)
def test_repo_dupe_slug(self):
"""slug must be unique"""
# We have the default self.repo in the database...
slug = "awesome-repo"
slug1 = "awesome-repo1"
self.assertFalse(Repository.objects.filter(slug=slug).exists())
self.assertFalse(Repository.objects.filter(slug=slug1).exists())
self.client.post(
"/repositories/new/",
{"name": "awesome repo", "description": "test description"},
follow=True
)
self.assertTrue(Repository.objects.filter(slug=slug).exists())
self.client.post(
"/repositories/new/",
{"name": "awesome repo", "description": "test description"},
follow=True
)
self.assertTrue(Repository.objects.filter(slug=slug1).exists())
def test_invalid_repo_form(self):
"""Upload invalid form"""
resp = self.client.post(
"/repositories/new/",
{}, follow=True
)
self.assertTrue(resp.status_code == HTTP_OK)
body = resp.content.decode("utf-8")
self.assertTrue("This field is required." in body)
self.assertTrue(Repository.objects.count() == 1)
def test_access_without_login(self):
"""
Tests the repository page without login
"""
self.logout()
response = self.client.get(self.repository_url, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to login
self.assertEqual(len(response.redirect_chain), 2)
self.assertTrue(302 in response.redirect_chain[0])
def test_repo_url(self):
"""Hit repo site normally."""
resp = self.client.get(self.repository_url, follow=True)
self.assertTrue(resp.status_code == HTTP_OK)
def test_repo_page_num(self):
"""Hit repo site normally."""
resp = self.client.get(self.repository_url + "?page=1", follow=True)
self.assertTrue(resp.status_code == HTTP_OK)
def test_repo_course_filter(self):
"""Hit repo site normally."""
querystring = "?selected_facets=course_exact:{0}".format(
self.course.course_number)
resp = self.client.get(self.repository_url + querystring, follow=True)
self.assertTrue(resp.status_code == HTTP_OK)
def test_listing_with_sorting(self):
"""
Hit the listing with sorting and test that the current sorting
changes in the interface.
The actual sorting of results is tested in search.tests.test_indexing
"""
url = self.repository_url + "?sortby={0}"
def get_sorting_options(resp):
"""Helper function to decode JSON sorting options."""
return json.loads(resp.context['sorting_options_json'])
# test no sort type
resp = self.client.get(self.repository_url, follow=True)
self.assertEqual(resp.status_code, HTTP_OK)
self.assertEqual(
get_sorting_options(resp)['current'],
list(LoreSortingFields.get_sorting_option(
LoreSortingFields.DEFAULT_SORTING_FIELD
))
)
# test all the allowed sort types
for sort_option in LoreSortingFields.all_sorting_options():
sort_url = url.format(sort_option[0])
resp = self.client.get(sort_url, follow=True)
self.assertEqual(resp.status_code, HTTP_OK)
self.assertEqual(
get_sorting_options(resp)['current'],
list(sort_option)
)
# test sorting by not allowed sort type
url_not_allowed_sort_type = url.format('foo_field')
resp = self.client.get(url_not_allowed_sort_type, follow=True)
self.assertEqual(resp.status_code, HTTP_OK)
self.assertEqual(
get_sorting_options(resp)['current'],
list(LoreSortingFields.get_sorting_option(
LoreSortingFields.DEFAULT_SORTING_FIELD
))
)
def test_serve_media(self):
"""Hit serve media"""
self.assertEqual(
settings.DEFAULT_FILE_STORAGE,
'storages.backends.overwrite.OverwriteStorage'
)
# tests that the media view is available
resolver = resolve('/media/assets/foo.txt')
self.assertEqual(resolver.view_name, 'media')
# upload a course
self.upload_test_file()
self.assertEqual(len(StaticAsset.objects.all()), 5)
# take the url of a static asset
static_asset = StaticAsset.objects.first().asset
static_asset_url = static_asset.url
# hit the view
resp = self.client.get(static_asset_url)
self.assertEqual(resp.status_code, HTTP_OK)
self.assertEqual(
resp.get('Content-Disposition'),
'attachment; filename={}'.format(
os.path.basename(static_asset_url)
)
)
self.assertEqual(
b"".join(resp.streaming_content),
static_asset.file.read()
)
# only the user with right to see the repo can access the file
self.logout()
self.login(self.user_norepo.username)
resp = self.client.get(static_asset_url)
self.assertEqual(resp.status_code, UNAUTHORIZED)
# login back with the original user
self.logout()
self.login(self.user.username)
# hit the view with a nonexistent file
resp = self.client.get('/media/fsdfs2837hwdudnks/foo.txt')
self.assertEqual(resp.status_code, NOT_FOUND)
# change the default file storage to S3
with self.settings(
DEFAULT_FILE_STORAGE=('storages.backends'
'.s3boto.S3BotoStorage')
):
reload_module(ui.urls)
# the view is not available any more
resp = self.client.get(static_asset_url)
self.assertEqual(resp.status_code, NOT_FOUND)
# force the reload of the urls again to be sure to have everything back
reload_module(ui.urls)
| agpl-3.0 | -8,293,194,561,384,870,000 | 35.174927 | 79 | 0.590103 | false |
foobacca/django-cms | cms/tests/toolbar.py | 1 | 7223 | from __future__ import with_statement
from cms.api import create_page
from cms.toolbar.toolbar import CMSToolbar
from cms.middleware.toolbar import ToolbarMiddleware
from cms.test_utils.testcases import SettingsOverrideTestCase
from cms.test_utils.util.context_managers import SettingsOverride
from django.contrib.auth.models import AnonymousUser, User, Permission
from django.test.client import RequestFactory
class ToolbarTestBase(SettingsOverrideTestCase):
def get_page_request(self, page, user, path=None, edit=False):
path = page and page.get_absolute_url() or path
if edit:
path += '?edit'
request = RequestFactory().get(path)
request.session = {}
request.user = user
request.LANGUAGE_CODE = "en"
if edit:
request.GET = {'edit': None}
else:
request.GET = {'edit_off': None}
request.current_page = page
mid = ToolbarMiddleware()
mid.process_request(request)
return request
def get_anon(self):
return AnonymousUser()
def get_staff(self):
staff = User(
username='staff',
email='[email protected]',
is_active=True,
is_staff=True,
)
staff.set_password('staff')
staff.save()
staff.user_permissions.add(Permission.objects.get(codename='change_page'))
return staff
def get_nonstaff(self):
nonstaff = User(
username='nonstaff',
email='[email protected]',
is_active=True,
is_staff=False,
)
nonstaff.set_password('nonstaff')
nonstaff.save()
nonstaff.user_permissions.add(Permission.objects.get(codename='change_page'))
return nonstaff
def get_superuser(self):
superuser = User(
username='superuser',
email='[email protected]',
is_active=True,
is_staff=True,
is_superuser=True,
)
superuser.set_password('superuser')
superuser.save()
return superuser
class ToolbarTests(ToolbarTestBase):
settings_overrides = {'CMS_PERMISSION': False}
def test_toolbar_no_page_anon(self):
request = self.get_page_request(None, self.get_anon(), '/')
toolbar = CMSToolbar(request)
items = toolbar.get_items()
self.assertEqual(len(items), 0)
def test_toolbar_no_page_staff(self):
request = self.get_page_request(None, self.get_staff(), '/')
toolbar = CMSToolbar(request)
items = toolbar.get_items()
# Logo + edit-mode + admin-menu + logout
self.assertEqual(len(items), 2)
self.assertEqual(len(items[0].get_context()['items']), 6)
def test_toolbar_no_page_superuser(self):
request = self.get_page_request(None, self.get_superuser(), '/')
toolbar = CMSToolbar(request)
items = toolbar.get_items()
# Logo + edit-mode + admin-menu + logout
self.assertEqual(len(items), 2)
self.assertEqual(len(items[0].get_context()['items']), 7)
def test_toolbar_anon(self):
page = create_page('test', 'nav_playground.html', 'en')
request = self.get_page_request(page, self.get_anon())
toolbar = CMSToolbar(request)
items = toolbar.get_items()
self.assertEqual(len(items), 0)
def test_toolbar_nonstaff(self):
page = create_page('test', 'nav_playground.html', 'en', published=True)
request = self.get_page_request(page, self.get_nonstaff())
toolbar = CMSToolbar(request)
items = toolbar.get_items()
# Logo + edit-mode + logout
self.assertEqual(len(items), 0)
def test_toolbar_template_change_permission(self):
with SettingsOverride(CMS_PERMISSIONS=True):
page = create_page('test', 'nav_playground.html', 'en', published=True)
request = self.get_page_request(page, self.get_nonstaff())
toolbar = CMSToolbar(request)
items = toolbar.get_items()
self.assertEqual([item for item in items if item.css_class_suffix == 'templates'], [])
def test_toolbar_markup(self):
create_page("toolbar-page", "nav_playground.html", "en", published=True)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get('/en/?edit')
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'nav_playground.html')
self.assertContains(response, '<div id="cms_toolbar"')
self.assertContains(response, 'cms.placeholders.js')
self.assertContains(response, 'cms.placeholders.css')
def test_show_toolbar_to_staff(self):
page = create_page("toolbar-page", "nav_playground.html", "en",
published=True)
request = self.get_page_request(page, self.get_staff(), '/')
toolbar = CMSToolbar(request)
self.assertTrue(toolbar.show_toolbar)
def test_show_toolbar_with_edit(self):
page = create_page("toolbar-page", "nav_playground.html", "en",
published=True)
request = self.get_page_request(page, AnonymousUser(), edit=True)
toolbar = CMSToolbar(request)
self.assertTrue(toolbar.show_toolbar)
def test_show_toolbar_without_edit(self):
page = create_page("toolbar-page", "nav_playground.html", "en",
published=True)
request = self.get_page_request(page, AnonymousUser(), edit=False)
toolbar = CMSToolbar(request)
self.assertFalse(toolbar.show_toolbar)
def test_toolbar_publish_button(self):
page = create_page('test', 'nav_playground.html', 'en', published=True)
request = self.get_page_request(page, self.get_superuser(), edit=True)
toolbar = CMSToolbar(request)
self.assertTrue(toolbar.edit_mode)
items = toolbar.get_items()
self.assertEqual(len(items), 6)
def test_toolbar_no_publish_button(self):
page = create_page('test', 'nav_playground.html', 'en', published=True)
request = self.get_page_request(page, self.get_staff(), edit=True)
toolbar = CMSToolbar(request)
self.assertTrue(page.has_change_permission(request))
self.assertFalse(page.has_publish_permission(request))
self.assertTrue(toolbar.edit_mode)
items = toolbar.get_items()
# Logo + edit-mode + templates + page-menu + admin-menu + logout
self.assertEqual(len(items), 5)
def test_toolbar_no_change_button(self):
page = create_page('test', 'nav_playground.html', 'en', published=True)
user = self.get_staff()
user.user_permissions.all().delete()
request = self.get_page_request(page, user, edit=True)
toolbar = CMSToolbar(request)
self.assertFalse(page.has_change_permission(request))
self.assertFalse(page.has_publish_permission(request))
items = toolbar.get_items()
# Logo + page-menu + admin-menu + logout
self.assertEqual(len(items), 2)
self.assertEqual(len(items[0].get_context()['items']), 6)
| bsd-3-clause | 860,474,157,216,884,000 | 37.625668 | 98 | 0.622456 | false |
googlegenomics/gcp-variant-transforms | gcp_variant_transforms/options/variant_transform_options_test.py | 1 | 10813 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for derivatives variant_transform_options."""
import unittest
import argparse
from typing import List # pylint: disable=unused-import
import mock
from apache_beam.io.gcp.internal.clients import bigquery
from apitools.base.py import exceptions
from gcp_variant_transforms.options import variant_transform_options
from gcp_variant_transforms.testing import temp_dir
def make_args(options, args):
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
options.add_arguments(parser)
namespace, remaining_args = parser.parse_known_args(args)
assert not remaining_args
return namespace
class VcfReadOptionsTest(unittest.TestCase):
"""Tests cases for the VcfReadOptions class."""
def setUp(self):
self._options = variant_transform_options.VcfReadOptions()
def _make_args(self, args):
# type: (List[str]) -> argparse.Namespace
return make_args(self._options, args)
def test_no_inputs(self):
args = self._make_args([])
self.assertRaises(ValueError, self._options.validate, args)
def test_failure_for_conflicting_flags_inputs(self):
args = self._make_args(['--input_pattern', '*',
'--input_file', 'asd'])
self.assertRaises(ValueError, self._options.validate, args)
def test_failure_for_conflicting_flags_headers(self):
args = self._make_args(['--input_pattern', '*',
'--infer_headers',
'--representative_header_file', 'gs://some_file'])
self.assertRaises(ValueError, self._options.validate, args)
def test_failure_for_conflicting_flags_no_errors_with_pattern_input(self):
args = self._make_args(['--input_pattern', '*',
'--representative_header_file', 'gs://some_file'])
self._options.validate(args)
def test_failure_for_conflicting_flags_no_errors_with_file_input(self):
lines = ['./gcp_variant_transforms/testing/data/vcf/valid-4.0.vcf\n',
'./gcp_variant_transforms/testing/data/vcf/valid-4.0.vcf\n',
'./gcp_variant_transforms/testing/data/vcf/valid-4.0.vcf\n']
with temp_dir.TempDir() as tempdir:
filename = tempdir.create_temp_file(lines=lines)
args = self._make_args([
'--input_file',
filename,
'--representative_header_file', 'gs://some_file'])
self._options.validate(args)
class BigQueryWriteOptionsTest(unittest.TestCase):
"""Tests cases for the BigQueryWriteOptions class."""
def setUp(self):
self._options = variant_transform_options.BigQueryWriteOptions()
def _make_args(self, args):
# type: (List[str]) -> argparse.Namespace
return make_args(self._options, args)
def test_valid_table_path(self):
args = self._make_args(['--append',
'--output_table', 'project:dataset.table'])
client = mock.Mock()
client.datasets.Get.return_value = bigquery.Dataset(
datasetReference=bigquery.DatasetReference(
projectId='project', datasetId='dataset'))
self._options.validate(args, client)
def test_existing_sample_table(self):
args = self._make_args(
['--append', 'False', '--output_table', 'project:dataset.table',
'--sharding_config_path',
'gcp_variant_transforms/testing/data/sharding_configs/'
'residual_at_end.yaml'])
client = mock.Mock()
client.tables.Get.return_value = bigquery.Table(
tableReference=bigquery.TableReference(projectId='project',
datasetId='dataset',
tableId='table__sample_info'))
with self.assertRaisesRegex(
ValueError,
'project:dataset.table__sample_info already exists'):
self._options.validate(args, client)
def test_existing_main_table(self):
def side_effect(request):
if (request == bigquery.BigqueryTablesGetRequest(
projectId='project',
datasetId='dataset',
tableId='table__sample_info')):
raise exceptions.HttpError(response={'status': '404'},
url='', content='')
return bigquery.Table(tableReference=bigquery.TableReference(
projectId='project',
datasetId='dataset',
tableId='table__chr1_part1'))
args = self._make_args(
['--append', 'False', '--output_table', 'project:dataset.table',
'--sharding_config_path',
'gcp_variant_transforms/testing/data/sharding_configs/'
'residual_at_end.yaml'])
client = mock.Mock()
client.tables.Get.side_effect = side_effect
with self.assertRaisesRegex(
ValueError,
'project:dataset.table__chr01_part1 already exists'):
self._options.validate(args, client)
def test_missing_sample_table(self):
args = self._make_args(
['--append', 'True', '--output_table', 'project:dataset.table',
'--sharding_config_path',
'gcp_variant_transforms/testing/data/sharding_configs/'
'residual_at_end.yaml'])
client = mock.Mock()
client.tables.Get.side_effect = exceptions.HttpError(
response={'status': '404'}, url='', content='')
with self.assertRaisesRegex(
ValueError,
'project:dataset.table__sample_info does not exist'):
self._options.validate(args, client)
def test_missing_main_table(self):
def side_effect(request):
if (request == bigquery.BigqueryTablesGetRequest(
projectId='project',
datasetId='dataset',
tableId='table__sample_info')):
return bigquery.Table(tableReference=bigquery.TableReference(
projectId='project',
datasetId='dataset',
tableId='table__sample_info'))
else:
raise exceptions.HttpError(response={'status': '404'},
url='', content='')
args = self._make_args(
['--append', 'True', '--output_table', 'project:dataset.table',
'--sharding_config_path',
'gcp_variant_transforms/testing/data/sharding_configs/'
'residual_at_end.yaml'])
client = mock.Mock()
client.tables.Get.side_effect = side_effect
with self.assertRaisesRegex(
ValueError,
'project:dataset.table__chr01_part1 does not exist'):
self._options.validate(args, client)
def test_no_project(self):
args = self._make_args(['--output_table', 'dataset.table'])
client = mock.Mock()
self.assertRaises(ValueError, self._options.validate, args, client)
def test_invalid_table_path(self):
no_table = self._make_args(['--output_table', 'project:dataset'])
incorrect_sep1 = self._make_args(['--output_table',
'project.dataset.table'])
incorrect_sep2 = self._make_args(['--output_table',
'project:dataset:table'])
client = mock.Mock()
self.assertRaises(
ValueError, self._options.validate, no_table, client)
self.assertRaises(
ValueError, self._options.validate, incorrect_sep1, client)
self.assertRaises(
ValueError, self._options.validate, incorrect_sep2, client)
def test_dataset_does_not_exists(self):
args = self._make_args(['--output_table', 'project:dataset.table'])
client = mock.Mock()
client.datasets.Get.side_effect = exceptions.HttpError(
response={'status': '404'}, url='', content='')
self.assertRaises(ValueError, self._options.validate, args, client)
class AnnotationOptionsTest(unittest.TestCase):
def setUp(self):
self._options = variant_transform_options.AnnotationOptions()
def _make_args(self, args):
# type: (List[str]) -> argparse.Namespace
return make_args(self._options, args)
def test_validate_okay(self):
"""Tests that no exceptions are raised for valid arguments."""
args = self._make_args(['--run_annotation_pipeline',
'--annotation_output_dir', 'gs://GOOD_DIR',
'--vep_image_uri', 'AN_IMAGE',
'--vep_cache_path', 'gs://VEP_CACHE'])
self._options.validate(args)
def test_invalid_output_dir(self):
args = self._make_args(['--run_annotation_pipeline',
'--annotation_output_dir', 'BAD_DIR',
'--vep_image_uri', 'AN_IMAGE',
'--vep_cache_path', 'gs://VEP_CACHE'])
self.assertRaises(ValueError, self._options.validate, args)
def test_failure_for_no_image(self):
args = self._make_args(['--run_annotation_pipeline',
'--annotation_output_dir', 'BAD_DIR',
'--vep_cache_path', 'gs://VEP_CACHE'])
self.assertRaises(ValueError, self._options.validate, args)
def test_failure_for_invalid_vep_cache(self):
args = self._make_args(['--run_annotation_pipeline',
'--annotation_output_dir', 'gs://GOOD_DIR',
'--vep_image_uri', 'AN_IMAGE',
'--vep_cache_path', 'VEP_CACHE'])
self.assertRaises(ValueError, self._options.validate, args)
class PreprocessOptionsTest(unittest.TestCase):
"""Tests cases for the PreprocessOptions class."""
def setUp(self):
self._options = variant_transform_options.PreprocessOptions()
def _make_args(self, args):
# type: (List[str]) -> argparse.Namespace
return make_args(self._options, args)
def test_failure_for_conflicting_flags_inputs(self):
args = self._make_args(['--input_pattern', '*',
'--report_path', 'some_path',
'--input_file', 'asd'])
self.assertRaises(ValueError, self._options.validate, args)
def test_failure_for_conflicting_flags_no_errors(self):
args = self._make_args(['--input_pattern', '*',
'--report_path', 'some_path'])
self._options.validate(args)
def test_failure_for_conflicting_flags_no_errors_with_pattern_input(self):
args = self._make_args(['--input_pattern', '*',
'--report_path', 'some_path'])
self._options.validate(args)
| apache-2.0 | -1,461,147,277,719,678,000 | 38.036101 | 78 | 0.622491 | false |
edx/course-discovery | course_discovery/apps/course_metadata/migrations/0068_auto_20171108_1614.py | 1 | 1536 | # Generated by Django 1.11.3 on 2017-11-08 16:14
import django.db.models.deletion
import django_extensions.db.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20171004_1133'),
('course_metadata', '0067_auto_20171108_1432'),
]
operations = [
migrations.CreateModel(
name='CourseEntitlement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('price', models.DecimalField(decimal_places=2, default=0.0, max_digits=10)),
('sku', models.CharField(blank=True, max_length=128, null=True)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='entitlements', to='course_metadata.Course')),
('currency', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Currency')),
('mode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course_metadata.SeatType')),
],
),
migrations.AlterUniqueTogether(
name='courseentitlement',
unique_together={('course', 'mode')},
),
]
| agpl-3.0 | 411,203,014,936,770,100 | 44.176471 | 149 | 0.63151 | false |
teodesson/Bookie | bookie/tests/test_auth/test_signup.py | 1 | 6228 | """Test the limited signup process
"""
import logging
from urllib.parse import (
quote,
urlencode,
)
import transaction
from bookie.models import DBSession
from bookie.models.auth import Activation
from bookie.models.auth import User
from bookie.models.auth import UserMgr
from bookie.tests import gen_random_word
from bookie.tests import TestDBBase
from bookie.tests import TestViewBase
LOG = logging.getLogger(__name__)
class TestInviteSetup(TestDBBase):
"""Verify we have/can work with the invite numbers"""
def testHasNoInvites(self):
"""Verify that if the user has no invites, they can't invite"""
u = User()
u.invite_ct = 0
self.assertFalse(u.has_invites(), 'User should have no invites')
self.assertFalse(
u.invite('[email protected]'), 'Should not be able to invite a user')
def testInviteCreatesUser(self):
"""We should get a new user when inviting something"""
me = User()
me.username = 'me'
me.email = 'me.com'
me.invite_ct = 2
you = me.invite('you.com')
self.assertEqual(
'you.com',
you.username,
'The email should be the username')
self.assertEqual(
'you.com',
you.email,
'The email should be the email')
self.assertTrue(
len(you.api_key),
'The api key should be generated for the user')
self.assertFalse(
you.activated,
'The new user should not be activated')
self.assertEqual(
1,
me.invite_ct,
'My invite count should be deprecated')
class TestSigningUpUser(TestDBBase):
"""Start out by verifying a user starts out in the right state"""
def testInitialUserInactivated(self):
"""A new user signup should be a deactivated user"""
u = User()
u.email = gen_random_word(10)
DBSession.add(u)
self.assertEqual(
False,
u.activated,
'A new signup should start out deactivated by default')
self.assertTrue(
u.activation.code is not None,
'A new signup should start out as deactivated')
self.assertEqual(
'signup',
u.activation.created_by,
'This is a new signup, so mark is as thus')
class TestOpenSignup(TestViewBase):
"""New users can request a signup for an account."""
def tearDown(self):
super(TestOpenSignup, self).tearDown()
User.query.filter(User.email == '[email protected]').delete()
def testSignupRenders(self):
"""A signup form is kind of required."""
res = self.app.get('/signup')
self.assertIn('Sign up for Bookie', res.unicode_body)
self.assertNotIn('class="error"', res.unicode_body)
def testEmailRequired(self):
"""Signup requires an email entry."""
res = self.app.post('/signup_process')
self.assertIn('Please supply', res.unicode_body)
def testEmailAlreadyThere(self):
"""Signup requires an email entry."""
res = self.app.post(
'/signup_process',
params={
'email': '[email protected]'
}
)
self.assertIn('already signed up', res.unicode_body)
def testEmailIsLowercase(self):
"""Signup saves email as all lowercase"""
res = self.app.post(
'/signup_process',
params={
'email': '[email protected]'
}
)
self.assertIn('[email protected]', res.unicode_body)
def testUsernameAlreadyThere(self):
"""Signup requires an unique username entry."""
email = '[email protected]'
new_user = UserMgr.signup_user(email, 'invite')
DBSession.add(new_user)
transaction.commit()
user = DBSession.query(User).filter(User.username == email).one()
url = quote('/{0}/reset/{1}'.format(
user.email,
user.activation.code
))
res = self.app.post(
url,
params={
'password': 'testing',
'username': user.username,
'code': user.activation.code,
'new_username': 'admin',
})
self.assertIn('Username already', res.unicode_body)
def testResetFormDisplay(self):
"""Make sure you can GET the reset form."""
email = '[email protected]'
new_user = UserMgr.signup_user(email, 'invite')
DBSession.add(new_user)
transaction.commit()
user = DBSession.query(User).filter(User.username == email).one()
url = quote('/{0}/reset/{1}'.format(
user.email,
user.activation.code
))
res = self.app.get(url)
self.assertIn('Activate', res.unicode_body)
def testUsernameIsLowercase(self):
"""Signup saves username as all lowercase"""
email = '[email protected]'
new_user = UserMgr.signup_user(email, 'testcase')
DBSession.add(new_user)
transaction.commit()
user = DBSession.query(User).filter(
User.username == email.lower()).one()
params = {
'password': 'testing',
'username': user.username,
'code': user.activation.code,
'new_username': 'TESTLowercase'
}
url = '/api/v1/suspend?' + urlencode(params, True)
# Activate the user, setting their new username which we want to
# verify does get lower cased during this process.
self.app.delete(url)
user = DBSession.query(User).filter(User.email == email.lower()).one()
self.assertIn('testlowercase', user.username)
def testSignupWorks(self):
"""Signing up stores an activation."""
email = '[email protected]'
UserMgr.signup_user(email, 'testcase')
activations = Activation.query.all()
self.assertTrue(len(activations) == 1)
act = activations[0]
self.assertEqual(
email,
act.user.email,
"The activation email is the correct one.")
| agpl-3.0 | 2,664,278,221,227,237,000 | 29.23301 | 78 | 0.580925 | false |
AfricaChess/lichesshub | grandprix/migrations/0002_auto_20171110_0640.py | 1 | 1439 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-10 06:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('grandprix', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Season',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('start_date', models.DateField()),
('end_date', models.DateField()),
],
),
migrations.CreateModel(
name='TournamentType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='tournament',
name='kind',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='grandprix.TournamentType'),
),
migrations.AddField(
model_name='tournament',
name='season',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='grandprix.Season'),
),
]
| mit | 9,022,895,200,339,313,000 | 33.261905 | 123 | 0.565671 | false |
satyrius/cmsplugin-articles | cmsplugin_articles/migrations/0001_initial.py | 1 | 1615 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0003_auto_20140926_2347'),
]
operations = [
migrations.CreateModel(
name='ArticlesPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('limit', models.PositiveIntegerField(verbose_name='Articles per page')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='TeaserExtension',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255, null=True, verbose_name='Title', blank=True)),
('image', models.ImageField(upload_to=b'teaser', null=True, verbose_name='Image', blank=True)),
('description', models.TextField(null=True, verbose_name='Description', blank=True)),
('extended_object', models.OneToOneField(editable=False, to='cms.Page')),
('public_extension', models.OneToOneField(related_name='draft_extension', null=True, editable=False, to='cmsplugin_articles.TeaserExtension')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| mit | -328,105,361,709,691,460 | 39.375 | 159 | 0.567802 | false |
minlexx/pyevemon | esi_client/models/get_characters_character_id_killmails_recent_200_ok.py | 1 | 4307 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetCharactersCharacterIdKillmailsRecent200Ok(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, killmail_hash=None, killmail_id=None):
"""
GetCharactersCharacterIdKillmailsRecent200Ok - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'killmail_hash': 'str',
'killmail_id': 'int'
}
self.attribute_map = {
'killmail_hash': 'killmail_hash',
'killmail_id': 'killmail_id'
}
self._killmail_hash = killmail_hash
self._killmail_id = killmail_id
@property
def killmail_hash(self):
"""
Gets the killmail_hash of this GetCharactersCharacterIdKillmailsRecent200Ok.
A hash of this killmail
:return: The killmail_hash of this GetCharactersCharacterIdKillmailsRecent200Ok.
:rtype: str
"""
return self._killmail_hash
@killmail_hash.setter
def killmail_hash(self, killmail_hash):
"""
Sets the killmail_hash of this GetCharactersCharacterIdKillmailsRecent200Ok.
A hash of this killmail
:param killmail_hash: The killmail_hash of this GetCharactersCharacterIdKillmailsRecent200Ok.
:type: str
"""
if killmail_hash is None:
raise ValueError("Invalid value for `killmail_hash`, must not be `None`")
self._killmail_hash = killmail_hash
@property
def killmail_id(self):
"""
Gets the killmail_id of this GetCharactersCharacterIdKillmailsRecent200Ok.
ID of this killmail
:return: The killmail_id of this GetCharactersCharacterIdKillmailsRecent200Ok.
:rtype: int
"""
return self._killmail_id
@killmail_id.setter
def killmail_id(self, killmail_id):
"""
Sets the killmail_id of this GetCharactersCharacterIdKillmailsRecent200Ok.
ID of this killmail
:param killmail_id: The killmail_id of this GetCharactersCharacterIdKillmailsRecent200Ok.
:type: int
"""
if killmail_id is None:
raise ValueError("Invalid value for `killmail_id`, must not be `None`")
self._killmail_id = killmail_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetCharactersCharacterIdKillmailsRecent200Ok):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| gpl-3.0 | -4,276,356,936,466,362,400 | 28.29932 | 101 | 0.572788 | false |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Dropbox/OAuth/FinalizeOAuth.py | 1 | 5847 | # -*- coding: utf-8 -*-
###############################################################################
#
# FinalizeOAuth
# Completes the OAuth process by retrieving a Dropbox access token and access token secret for a user, after they have visited the authorization URL returned by the InitializeOAuth choreo and clicked "allow."
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FinalizeOAuth(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FinalizeOAuth Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(FinalizeOAuth, self).__init__(temboo_session, '/Library/Dropbox/OAuth/FinalizeOAuth')
def new_input_set(self):
return FinalizeOAuthInputSet()
def _make_result_set(self, result, path):
return FinalizeOAuthResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FinalizeOAuthChoreographyExecution(session, exec_id, path)
class FinalizeOAuthInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FinalizeOAuth
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountName(self, value):
"""
Set the value of the AccountName input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(FinalizeOAuthInputSet, self)._set_input('AccountName', value)
def set_AppKeyName(self, value):
"""
Set the value of the AppKeyName input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(FinalizeOAuthInputSet, self)._set_input('AppKeyName', value)
def set_AppKeyValue(self, value):
"""
Set the value of the AppKeyValue input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(FinalizeOAuthInputSet, self)._set_input('AppKeyValue', value)
def set_CallbackID(self, value):
"""
Set the value of the CallbackID input for this Choreo. ((required, string) The callback token returned by the InitializeOAuth Choreo. Used to retrieve the callback data after the user authorizes.)
"""
super(FinalizeOAuthInputSet, self)._set_input('CallbackID', value)
def set_DropboxAppKey(self, value):
"""
Set the value of the DropboxAppKey input for this Choreo. ((required, string) The APP Key provided by Dropbox (AKA the OAuth Consumer Key).)
"""
super(FinalizeOAuthInputSet, self)._set_input('DropboxAppKey', value)
def set_DropboxAppSecret(self, value):
"""
Set the value of the DropboxAppSecret input for this Choreo. ((required, string) The App Secret provided by Dropbox (AKA the OAuth Consumer Secret).)
"""
super(FinalizeOAuthInputSet, self)._set_input('DropboxAppSecret', value)
def set_OAuthTokenSecret(self, value):
"""
Set the value of the OAuthTokenSecret input for this Choreo. ((required, string) The OAuthTokenSecret returned by the InitializeOAuth Choreo.)
"""
super(FinalizeOAuthInputSet, self)._set_input('OAuthTokenSecret', value)
def set_Timeout(self, value):
"""
Set the value of the Timeout input for this Choreo. ((optional, integer) The amount of time (in seconds) to poll your Temboo callback URL to see if your app's user has allowed or denied the request for access. Defaults to 20. Max is 60.)
"""
super(FinalizeOAuthInputSet, self)._set_input('Timeout', value)
class FinalizeOAuthResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FinalizeOAuth Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_AccessTokenSecret(self):
"""
Retrieve the value for the "AccessTokenSecret" output from this Choreo execution. ((string) The Access Token Secret retrieved during the OAuth process.)
"""
return self._output.get('AccessTokenSecret', None)
def get_AccessToken(self):
"""
Retrieve the value for the "AccessToken" output from this Choreo execution. ((string) The Access Token retrieved during the OAuth process.)
"""
return self._output.get('AccessToken', None)
def get_UserID(self):
"""
Retrieve the value for the "UserID" output from this Choreo execution. ((integer) The Dropbox user ID associated with the access token and secret returned.)
"""
return self._output.get('UserID', None)
class FinalizeOAuthChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FinalizeOAuthResultSet(response, path)
| gpl-2.0 | -6,594,500,560,399,092,000 | 45.03937 | 245 | 0.683085 | false |
ychaim/explorer | addresses/views.py | 1 | 33766 | from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.clickjacking import xframe_options_exempt
from django.shortcuts import get_object_or_404
from annoying.decorators import render_to
from annoying.functions import get_object_or_None
from blockexplorer.decorators import assert_valid_coin_symbol
from blockexplorer.settings import BLOCKCYPHER_PUBLIC_KEY, BLOCKCYPHER_API_KEY, WEBHOOK_SECRET_KEY, BASE_URL
from blockcypher.api import get_address_details, get_address_overview, subscribe_to_address_webhook, get_forwarding_address_details
from blockcypher.constants import COIN_SYMBOL_MAPPINGS
from users.models import AuthUser, LoggedLogin
from addresses.models import AddressSubscription, AddressForwarding
from transactions.models import OnChainTransaction
from services.models import WebHook
from emails.models import SentEmail
from addresses.forms import KnownUserAddressSubscriptionForm, NewUserAddressSubscriptionForm, AddressSearchForm, KnownUserAddressForwardingForm, NewUserAddressForwardingForm
from utils import get_max_pages, get_user_agent, get_client_ip, uri_to_url, simple_pw_generator
import json
from urllib.parse import urlencode
SMALL_PAYMENTS_MSG = '''
Please note that for very small payments of 100 bits or less,
the payment will not forward as the amount to forward is lower than the mining fee.
'''
@assert_valid_coin_symbol
@render_to('address_overview.html')
def address_overview(request, coin_symbol, address, wallet_name=None):
TXNS_PER_PAGE = 100
# 1 indexed page
current_page = request.GET.get('page')
if current_page:
current_page = int(current_page)
else:
current_page = 1
try:
address_details = get_address_details(
address=address,
coin_symbol=coin_symbol,
txn_limit=TXNS_PER_PAGE,
api_key=BLOCKCYPHER_API_KEY,
)
except AssertionError:
msg = _('Invalid Address')
messages.warning(request, msg)
redir_url = reverse('coin_overview', kwargs={'coin_symbol': coin_symbol})
return HttpResponseRedirect(redir_url)
#import pprint; pprint.pprint(address_details, width=1)
if 'error' in address_details:
msg = _('Sorry, that address was not found')
messages.warning(request, msg)
return HttpResponseRedirect(reverse('home'))
if request.user.is_authenticated():
# notify user on page of any forwarding or subscriptions they may have
for address_subscription in AddressSubscription.objects.filter(
auth_user=request.user,
b58_address=address,
coin_symbol=coin_symbol,
unsubscribed_at=None,
):
if address_subscription.auth_user.email_verified:
msg = _('Private Message: you are subscribed to this address and will receive email notifications at <b>%(user_email)s</b> (<a href="%(unsub_url)s">unsubscribe</a>)' % {
'user_email': request.user.email,
'unsub_url': reverse('user_unsubscribe_address', kwargs={
'address_subscription_id': address_subscription.id,
}),
})
messages.info(request, msg, extra_tags='safe')
else:
msg = _('Private Message: you are not subscribed to this address because you have not clicked the link sent to <b>%(user_email)s</b>' % {
'user_email': request.user.email,
})
messages.error(request, msg, extra_tags='safe')
print('ERROR')
# there can be only one
af_initial = get_object_or_None(AddressForwarding,
auth_user=request.user,
initial_address=address,
coin_symbol=coin_symbol,
)
if af_initial:
msg = _('''
Private Message: this address will automatically forward to <a href="%(destination_addr_uri)s">%(destination_address)s</a>
any time a payment is received.
<br /><br /> <i>%(small_payments_msg)s</i>
''' % {
'destination_address': af_initial.destination_address,
'destination_addr_uri': reverse('address_overview', kwargs={
'address': af_initial.destination_address,
'coin_symbol': coin_symbol,
}),
'small_payments_msg': SMALL_PAYMENTS_MSG,
})
messages.info(request, msg, extra_tags='safe')
# There could be many
for af_destination in AddressForwarding.objects.filter(
auth_user=request.user,
destination_address=address,
coin_symbol=coin_symbol,
):
msg = _('''
Private Message: this address will automatically receive forwarded transactions from
<a href="%(initial_addr_uri)s">%(initial_address)s</a>.
<br /><br /> <i>%(small_payments_msg)s</i>
''' % {
'initial_address': af_destination.initial_address,
'initial_addr_uri': reverse('address_overview', kwargs={
'address': af_destination.initial_address,
'coin_symbol': coin_symbol,
}),
'small_payments_msg': SMALL_PAYMENTS_MSG,
})
messages.info(request, msg, extra_tags='safe')
all_transactions = address_details.get('unconfirmed_txrefs', []) + address_details.get('txrefs', [])
# transaction pagination: 0-indexed and inclusive
tx_start_num = (current_page - 1) * TXNS_PER_PAGE
tx_end_num = current_page * TXNS_PER_PAGE - 1
# filter address details for pagination. HACK!
all_transactions = all_transactions[tx_start_num:tx_end_num]
api_url = 'https://api.blockcypher.com/v1/%s/%s/addrs/%s' % (
COIN_SYMBOL_MAPPINGS[coin_symbol]['blockcypher_code'],
COIN_SYMBOL_MAPPINGS[coin_symbol]['blockcypher_network'],
address)
return {
'coin_symbol': coin_symbol,
'address': address,
'api_url': api_url,
'wallet_name': wallet_name,
'current_page': current_page,
'max_pages': get_max_pages(num_items=address_details['final_n_tx'], items_per_page=TXNS_PER_PAGE),
'total_sent_satoshis': address_details['total_sent'],
'total_received_satoshis': address_details['total_received'],
'unconfirmed_balance_satoshis': address_details['unconfirmed_balance'],
'confirmed_balance_satoshis': address_details['balance'],
'total_balance_satoshis': address_details['final_balance'],
'all_transactions': all_transactions,
'num_confirmed_txns': address_details['n_tx'],
'num_unconfirmed_txns': address_details['unconfirmed_n_tx'],
'num_all_txns': address_details['final_n_tx'],
'BLOCKCYPHER_PUBLIC_KEY': BLOCKCYPHER_PUBLIC_KEY,
}
def subscribe_forwarding(request):
kwargs = {'coin_symbol': 'btc'}
redir_url = reverse('subscribe_address', kwargs=kwargs)
return HttpResponseRedirect(redir_url)
@assert_valid_coin_symbol
@render_to('subscribe_address.html')
def subscribe_address(request, coin_symbol):
already_authenticated = request.user.is_authenticated()
# kind of tricky because we have to deal with both logged in and new users
initial = {'coin_symbol': coin_symbol}
if already_authenticated:
form = KnownUserAddressSubscriptionForm(initial=initial)
else:
form = NewUserAddressSubscriptionForm(initial=initial)
if request.method == 'POST':
if already_authenticated:
form = KnownUserAddressSubscriptionForm(data=request.POST)
else:
form = NewUserAddressSubscriptionForm(data=request.POST)
if form.is_valid():
coin_symbol = form.cleaned_data['coin_symbol']
coin_address = form.cleaned_data['coin_address']
if already_authenticated:
auth_user = request.user
else:
user_email = form.cleaned_data['email']
# Check for existing user with that email
existing_user = get_object_or_None(AuthUser, email=user_email)
if existing_user:
msg = _('Please first login to this account to create a notification')
messages.info(request, msg)
return HttpResponseRedirect(existing_user.get_login_uri())
else:
# Create user with unknown (random) password
auth_user = AuthUser.objects.create_user(
email=user_email,
password=None, # it will create a random pw
creation_ip=get_client_ip(request),
creation_user_agent=get_user_agent(request),
)
# Login the user
# http://stackoverflow.com/a/3807891/1754586
auth_user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, auth_user)
# Log the login
LoggedLogin.record_login(request)
existing_subscription_cnt = AddressSubscription.objects.filter(
auth_user=auth_user,
b58_address=coin_address).count()
if existing_subscription_cnt:
msg = _("You're already subscribed to that address. Please choose another address.")
messages.warning(request, msg)
else:
# TODO: this is inefficiently happening before email verification
# Hit blockcypher and return subscription id
callback_uri = reverse('address_webhook', kwargs={
'secret_key': WEBHOOK_SECRET_KEY,
# hack for rare case of two webhooks requested on same address:
'ignored_key': simple_pw_generator(num_chars=10),
})
callback_url = uri_to_url(callback_uri)
bcy_id = subscribe_to_address_webhook(
subscription_address=coin_address,
callback_url=callback_url,
coin_symbol=coin_symbol,
api_key=BLOCKCYPHER_API_KEY,
)
address_subscription = AddressSubscription.objects.create(
coin_symbol=coin_symbol,
b58_address=coin_address,
auth_user=auth_user,
blockcypher_id=bcy_id,
)
address_uri = reverse('address_overview', kwargs={
'coin_symbol': coin_symbol,
'address': coin_address,
})
if already_authenticated and auth_user.email_verified:
msg = _('You will now be emailed notifications for <a href="%(address_uri)s">%(coin_address)s</a>' % {
'coin_address': coin_address,
'address_uri': address_uri,
})
messages.success(request, msg, extra_tags='safe')
return HttpResponseRedirect(reverse('dashboard'))
else:
address_subscription.send_notifications_welcome_email()
return HttpResponseRedirect(reverse('unconfirmed_email'))
elif request.method == 'GET':
coin_address = request.GET.get('a')
subscriber_email = request.GET.get('e')
if coin_address:
initial['coin_address'] = coin_address
if subscriber_email and not already_authenticated:
initial['email'] = subscriber_email
if coin_address or subscriber_email:
if already_authenticated:
form = KnownUserAddressSubscriptionForm(initial=initial)
else:
form = NewUserAddressSubscriptionForm(initial=initial)
return {
'form': form,
'coin_symbol': coin_symbol,
}
@login_required
def user_unsubscribe_address(request, address_subscription_id):
'''
For logged-in users to unsubscribe an address
'''
address_subscription = get_object_or_404(AddressSubscription, id=address_subscription_id)
assert address_subscription.auth_user == request.user
if address_subscription.unsubscribed_at:
msg = _("You've already unsubscribed from this alert")
messages.info(request, msg)
else:
address_subscription.unsubscribed_at = now()
address_subscription.save()
address_uri = reverse('address_overview', kwargs={
'coin_symbol': address_subscription.coin_symbol,
'address': address_subscription.b58_address,
})
msg = _('You have been unsubscribed from notifications on <a href="%(address_uri)s">%(b58_address)s</a>' % {
'b58_address': address_subscription.b58_address,
'address_uri': address_uri,
})
messages.success(request, msg, extra_tags='safe')
return HttpResponseRedirect(reverse('dashboard'))
@login_required
def user_archive_forwarding_address(request, address_forwarding_id):
'''
For logged-in users to archive a forwarding address
For security, the address forwarding is never disabled and can't be changed.
We just stop displaying it in the UI.
For now we don't automatically stop sending email notices, though we may want to do that in the future.
'''
address_forwarding = get_object_or_404(AddressForwarding, id=address_forwarding_id)
assert address_forwarding.auth_user == request.user
if address_forwarding.archived_at:
msg = _("You've already archived this address")
messages.info(request, msg)
else:
address_forwarding.archived_at = now()
address_forwarding.save()
initial_addr_uri = reverse('address_overview', kwargs={
'coin_symbol': address_forwarding.coin_symbol,
'address': address_forwarding.initial_address,
})
destination_addr_uri = reverse('address_overview', kwargs={
'coin_symbol': address_forwarding.coin_symbol,
'address': address_forwarding.destination_address,
})
msg = _('''
You have archived the forwarding address <a href="%(initial_addr_uri)s">%(initial_address)s</a>.
For security, payments sent to <a href="%(destination_addr_uri)s">%(destination_address)s</a>
may continue to forward to <a href="%(initial_addr_uri)s">%(initial_address)s</a>.
''' % {
'initial_address': address_forwarding.initial_address,
'destination_address': address_forwarding.destination_address,
'initial_addr_uri': initial_addr_uri,
'destination_addr_uri': destination_addr_uri,
})
messages.success(request, msg, extra_tags='safe')
return HttpResponseRedirect(reverse('dashboard'))
def unsubscribe_address(request, unsub_code):
'''
1-click unsubscribe an address via email
'''
sent_email = get_object_or_404(SentEmail, unsub_code=unsub_code)
auth_user = sent_email.auth_user
# Login the user
# http://stackoverflow.com/a/3807891/1754586
auth_user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, auth_user)
# Log the login
LoggedLogin.record_login(request)
if sent_email.unsubscribed_at:
msg = _("You've already unsubscribed from this alert")
messages.info(request, msg)
else:
address_subscription = sent_email.address_subscription
assert address_subscription
address_subscription.unsubscribed_at = now()
address_subscription.save()
addr_uri = reverse('address_overview', kwargs={
'coin_symbol': address_subscription.coin_symbol,
'address': address_subscription.b58_address,
})
msg = _('You have been unsubscribed from notifications on <a href="%(addr_uri)s">%(b58_address)s</a>' % {
'b58_address': address_subscription.b58_address,
'addr_uri': addr_uri,
})
messages.info(request, msg, extra_tags='safe')
return HttpResponseRedirect(reverse('dashboard'))
@csrf_exempt
def address_webhook(request, secret_key, ignored_key):
'''
Process an inbound webhook from blockcypher
'''
# Log webhook
webhook = WebHook.log_webhook(request, WebHook.BLOCKCYPHER_ADDRESS_NOTIFICATION)
assert secret_key == WEBHOOK_SECRET_KEY
assert request.method == 'POST', 'Request has no post'
blockcypher_id = request.META.get('HTTP_X_EVENTID')
assert 'tx-confirmation' == request.META.get('HTTP_X_EVENTTYPE')
payload = json.loads(request.body.decode())
address_subscription = AddressSubscription.objects.get(blockcypher_id=blockcypher_id)
tx_hash = payload['hash']
num_confs = payload['confirmations']
double_spend = payload['double_spend']
satoshis_sent = payload['total']
fee_in_satoshis = payload['fees']
tx_event = get_object_or_None(
OnChainTransaction,
tx_hash=tx_hash,
address_subscription=address_subscription,
)
if tx_event:
tx_is_new = False
tx_event.num_confs = num_confs
tx_event.double_spend = double_spend
tx_event.save()
else:
tx_is_new = True
input_addresses = set()
for input_entry in payload['inputs']:
for address in input_entry.get('addresses', []):
input_addresses.add(address)
if address_subscription.b58_address in input_addresses:
is_withdrawal = True
else:
is_withdrawal = False
output_addresses = set()
for output_entry in payload.get('outputs', []):
for address in output_entry['addresses']:
output_addresses.add(address)
if address_subscription.b58_address in output_addresses:
is_deposit = True
else:
is_deposit = False
tx_event = OnChainTransaction.objects.create(
tx_hash=tx_hash,
address_subscription=address_subscription,
num_confs=num_confs,
double_spend=double_spend,
satoshis_sent=satoshis_sent,
fee_in_satoshis=fee_in_satoshis,
is_deposit=is_deposit,
is_withdrawal=is_withdrawal,
)
# email sending logic
# TODO: add logic for notify on deposit vs withdrawal
# TODO: add safety check to prevent duplicate email sending
if tx_event.is_subscribed():
if double_spend and (tx_is_new or not tx_event.double_spend):
# We have the first reporting of a double-spend
tx_event.send_double_spend_tx_notification()
elif num_confs == 0 and tx_is_new:
# First broadcast
if tx_event.address_subscription.notify_on_broadcast:
if tx_event.is_deposit and tx_event.address_subscription.notify_on_deposit:
tx_event.send_unconfirmed_tx_email()
elif tx_event.is_withdrawal and tx_event.address_subscription.notify_on_withdrawal:
tx_event.send_unconfirmed_tx_email()
elif num_confs == 6 and (tx_is_new or not tx_event.num_confs == num_confs):
# Sixth confirm
if tx_event.address_subscription.notify_on_sixth_confirm:
if tx_event.is_deposit and tx_event.address_subscription.notify_on_deposit:
tx_event.send_confirmed_tx_email()
elif tx_event.is_withdrawal and tx_event.address_subscription.notify_on_withdrawal:
tx_event.send_confirmed_tx_email()
# Update logging
webhook.address_subscription = address_subscription
webhook.succeeded = True
webhook.save()
# Return something
return HttpResponse("*ok*")
@xframe_options_exempt
@render_to('balance_widget.html')
def render_balance_widget(request, coin_symbol, address):
address_overview = get_address_overview(address=address,
coin_symbol=coin_symbol, api_key=BLOCKCYPHER_API_KEY)
return {
'address_overview': address_overview,
'coin_symbol': coin_symbol,
'b58_address': address,
'BASE_URL': BASE_URL,
}
@xframe_options_exempt
@render_to('received_widget.html')
def render_received_widget(request, coin_symbol, address):
address_overview = get_address_overview(address=address,
coin_symbol=coin_symbol, api_key=BLOCKCYPHER_API_KEY)
return {
'address_overview': address_overview,
'coin_symbol': coin_symbol,
'b58_address': address,
'BASE_URL': BASE_URL,
}
@render_to('search_widgets.html')
def search_widgets(request, coin_symbol):
form = AddressSearchForm()
if request.method == 'POST':
form = AddressSearchForm(data=request.POST)
if form.is_valid():
kwargs = {
'coin_symbol': form.cleaned_data['coin_symbol'],
'address': form.cleaned_data['coin_address'],
}
redir_url = reverse('widgets_overview', kwargs=kwargs)
return HttpResponseRedirect(redir_url)
elif request.method == 'GET':
new_coin_symbol = request.GET.get('c')
if new_coin_symbol:
initial = {'coin_symbol': new_coin_symbol}
form = AddressSearchForm(initial=initial)
return {
'form': form,
'coin_symbol': coin_symbol,
}
@render_to('widgets.html')
def widgets_overview(request, coin_symbol, address):
return {
'coin_symbol': coin_symbol,
'b58_address': address,
'BASE_URL': BASE_URL,
}
def widget_forwarding(request):
kwargs = {'coin_symbol': 'btc'}
redir_url = reverse('search_widgets', kwargs=kwargs)
return HttpResponseRedirect(redir_url)
@assert_valid_coin_symbol
@render_to('setup_address_forwarding.html')
def setup_address_forwarding(request, coin_symbol):
# kind of tricky because we have to deal with both logged in and new users
already_authenticated = request.user.is_authenticated()
initial = {'coin_symbol': coin_symbol}
if already_authenticated:
form = KnownUserAddressForwardingForm(initial=initial)
else:
form = NewUserAddressForwardingForm(initial=initial)
if request.method == 'POST':
if already_authenticated:
form = KnownUserAddressForwardingForm(data=request.POST)
else:
form = NewUserAddressForwardingForm(data=request.POST)
if form.is_valid():
coin_symbol = form.cleaned_data['coin_symbol']
destination_address = form.cleaned_data['coin_address']
user_email = form.cleaned_data.get('email')
# optional. null in case of KnownUserAddressForwardingForm
if already_authenticated:
auth_user = request.user
else:
auth_user = None
if user_email:
# Check for existing user with that email
existing_user = get_object_or_None(AuthUser, email=user_email)
if existing_user:
msg = _('Please first login to this account to create a notification')
messages.info(request, msg)
return HttpResponseRedirect(existing_user.get_login_uri())
else:
# Create user with unknown (random) password
auth_user = AuthUser.objects.create_user(
email=user_email,
password=None, # it will create a random pw
creation_ip=get_client_ip(request),
creation_user_agent=get_user_agent(request),
)
# Login the user
# http://stackoverflow.com/a/3807891/1754586
auth_user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, auth_user)
# Log the login
LoggedLogin.record_login(request)
else:
# No user email given, proceed anonymously
# FIXME: confirm this
pass
# Setup Payment Forwarding
forwarding_address_details = get_forwarding_address_details(
destination_address=destination_address,
api_key=BLOCKCYPHER_API_KEY,
callback_url=None, # notifications happen separately (and not always)
coin_symbol=coin_symbol,
)
if 'error' in forwarding_address_details:
# Display error message back to user
messages.warning(request, forwarding_address_details['error'], extra_tags='safe')
else:
initial_address = forwarding_address_details['input_address']
# create forwarding object
address_forwarding_obj = AddressForwarding.objects.create(
coin_symbol=coin_symbol,
initial_address=initial_address,
destination_address=destination_address,
auth_user=auth_user,
blockcypher_id=forwarding_address_details['id'],
)
subscribe_uri = reverse('subscribe_address', kwargs={'coin_symbol': coin_symbol})
uri_qs = {'a': initial_address}
if user_email:
uri_qs['e'] = user_email
if already_authenticated:
uri_qs['e'] = auth_user.email
subscribe_uri = '%s?%s' % (subscribe_uri, urlencode(uri_qs))
initial_addr_uri = reverse('address_overview', kwargs={
'coin_symbol': coin_symbol,
'address': initial_address,
})
destination_addr_uri = reverse('address_overview', kwargs={
'coin_symbol': coin_symbol,
'address': destination_address,
})
msg_merge_dict = {
'initial_address': initial_address,
'initial_addr_uri': initial_addr_uri,
'destination_address': destination_address,
'destination_addr_uri': destination_addr_uri,
'subscribe_uri': subscribe_uri,
'small_payments_msg': SMALL_PAYMENTS_MSG,
}
if auth_user:
msg_merge_dict['user_email'] = auth_user.email
if user_email or (already_authenticated and form.cleaned_data['wants_email_notification']):
# Create an address subscription for all of these cases
# Hit blockcypher and return subscription id
callback_uri = reverse('address_webhook', kwargs={
'secret_key': WEBHOOK_SECRET_KEY,
# hack for rare case of two webhooks requested on same address:
'ignored_key': simple_pw_generator(num_chars=10),
})
callback_url = uri_to_url(callback_uri)
bcy_id = subscribe_to_address_webhook(
subscription_address=initial_address,
callback_url=callback_url,
coin_symbol=coin_symbol,
api_key=BLOCKCYPHER_API_KEY,
)
# only notify for deposits
AddressSubscription.objects.create(
coin_symbol=coin_symbol,
b58_address=initial_address,
auth_user=auth_user,
blockcypher_id=bcy_id,
notify_on_deposit=True,
notify_on_withdrawal=False,
address_forwarding_obj=address_forwarding_obj,
)
if user_email:
# New signup
msg = _('''
Transactions sent to <a href="%(initial_addr_uri)s">%(initial_address)s</a>
will now be automatically forwarded to <a href="%(destination_addr_uri)s">%(destination_address)s</a>,
but you must confirm your email to receive notifications.
<br /><br /> <i>%(small_payments_msg)s</i>
''' % msg_merge_dict)
messages.success(request, msg, extra_tags='safe')
address_forwarding_obj.send_forwarding_welcome_email()
return HttpResponseRedirect(reverse('unconfirmed_email'))
else:
if auth_user.email_verified:
msg = _('''
Transactions sent to <a href="%(initial_addr_uri)s">%(initial_address)s</a>
will now be automatically forwarded to <a href="%(destination_addr_uri)s">%(destination_address)s</a>,
and you will immediately receive an email notification at <b>%(user_email)s</b>.
<br /><br /> <i>%(small_payments_msg)s</i>
''' % msg_merge_dict)
messages.success(request, msg, extra_tags='safe')
return HttpResponseRedirect(reverse('dashboard'))
else:
# existing unconfirmed user
msg = _('''
Transactions sent to <a href="%(initial_addr_uri)s">%(initial_address)s</a>
will now be automatically forwarded to <a href="%(destination_addr_uri)s">%(destination_address)s</a>,
but you must confirm your email to receive notifications.
<br /><br /> <i>%(small_payments_msg)s</i>
''' % msg_merge_dict)
messages.success(request, msg, extra_tags='safe')
address_forwarding_obj.send_forwarding_welcome_email()
return HttpResponseRedirect(reverse('unconfirmed_email'))
elif already_authenticated:
# already authenticated and doesn't want subscriptions
msg = _('''
Transactions sent to <a href="%(initial_addr_uri)s">%(initial_address)s</a>
will now be automatically forwarded to <a href="%(destination_addr_uri)s">%(destination_address)s</a>.
You will not receive email notifications (<a href="%(subscribe_uri)s">subscribe</a>).
<br /><br /> <i>%(small_payments_msg)s</i>
''' % msg_merge_dict)
messages.success(request, msg, extra_tags='safe')
return HttpResponseRedirect(reverse('dashboard'))
else:
# New signup sans email
msg = _('''
Transactions sent to <a href="%(initial_addr_uri)s">%(initial_address)s</a>
will now be automatically forwarded to <a href="%(destination_addr_uri)s">%(destination_address)s</a>.
You will not receive email notifications (<a href="%(subscribe_uri)s">subscribe</a>).
<br /><br /> <i>%(small_payments_msg)s</i>
''' % msg_merge_dict)
messages.success(request, msg, extra_tags='safe')
return HttpResponseRedirect(destination_addr_uri)
elif request.method == 'GET':
coin_address = request.GET.get('a')
subscriber_email = request.GET.get('e')
if coin_address:
initial['coin_address'] = coin_address
if subscriber_email and not already_authenticated:
initial['email'] = subscriber_email
if coin_address or subscriber_email:
if already_authenticated:
form = KnownUserAddressForwardingForm(initial=initial)
else:
form = NewUserAddressForwardingForm(initial=initial)
return {
'form': form,
'coin_symbol': coin_symbol,
}
def forward_forwarding(request):
kwargs = {'coin_symbol': 'btc'}
redir_url = reverse('setup_address_forwarding', kwargs=kwargs)
return HttpResponseRedirect(redir_url)
| apache-2.0 | 4,320,673,146,313,630,000 | 41.313283 | 185 | 0.572825 | false |
CalebBell/ht | ht/conv_free_immersed.py | 1 | 58245 | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
from math import exp, log
__all__ = ['Nu_vertical_plate_Churchill',
'Nu_free_vertical_plate',
'Nu_free_vertical_plate_methods',
'Nu_horizontal_plate_McAdams',
'Nu_horizontal_plate_VDI',
'Nu_horizontal_plate_Rohsenow',
'Nu_free_horizontal_plate',
'Nu_free_horizontal_plate_methods',
'Nu_sphere_Churchill',
'Nu_vertical_cylinder_Griffiths_Davis_Morgan',
'Nu_vertical_cylinder_Jakob_Linke_Morgan',
'Nu_vertical_cylinder_Carne_Morgan',
'Nu_vertical_cylinder_Eigenson_Morgan',
'Nu_vertical_cylinder_Touloukian_Morgan',
'Nu_vertical_cylinder_McAdams_Weiss_Saunders',
'Nu_vertical_cylinder_Kreith_Eckert',
'Nu_vertical_cylinder_Hanesian_Kalish_Morgan',
'Nu_vertical_cylinder_Al_Arabi_Khamis',
'Nu_vertical_cylinder_Popiel_Churchill',
'Nu_vertical_cylinder',
'Nu_vertical_cylinder_methods',
'Nu_horizontal_cylinder_Churchill_Chu',
'Nu_horizontal_cylinder_Kuehn_Goldstein',
'Nu_horizontal_cylinder_Morgan',
'Nu_horizontal_cylinder',
'Nu_horizontal_cylinder_methods',
'Nu_coil_Xin_Ebadian']
def Nu_vertical_plate_Churchill(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a vertical
plate according to the Churchill-Chu [1]_ correlation, also presented in
[2]_. Plate must be isothermal; an alternate expression exists for constant
heat flux.
.. math::
Nu_{L}=\left[0.825+\frac{0.387Ra_{L}^{1/6}}
{[1+(0.492/Pr)^{9/16}]^{8/27}}\right]^2
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
Returns
-------
Nu : float
Nusselt number with respect to height, [-]
Notes
-----
Although transition from laminar to turbulent is discrete in reality, this
equation provides a smooth transition in value from laminar to turbulent.
Checked with the original source.
Can be applied to vertical cylinders as well, subject to the criteria below:
.. math::
\frac{D}{L}\ge \frac{35}{Gr_L^{1/4}}
Examples
--------
From [2]_, Example 9.2, matches:
>>> Nu_vertical_plate_Churchill(0.69, 2.63E9)
147.16185223770603
References
----------
.. [1] Churchill, Stuart W., and Humbert H. S. Chu. "Correlating Equations
for Laminar and Turbulent Free Convection from a Vertical Plate."
International Journal of Heat and Mass Transfer 18, no. 11
(November 1, 1975): 1323-29. doi:10.1016/0017-9310(75)90243-4.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
Ra = Pr*Gr
term = (0.825 + (0.387*Ra**(1/6.)*(1.0 + (Pr/0.492)**(-0.5625))**(-8.0/27.0)))
return term*term
Nu_free_vertical_plate_all_methods = ["Churchill"]
def Nu_free_vertical_plate_methods(Pr, Gr, H=None, W=None, check_ranges=True):
r'''This function returns a list of methods for calculating heat transfer
coefficient for external free convection from a verical plate.
Requires at a minimum a fluid's Prandtl number `Pr`, and the Grashof
number `Gr` for the system fluid (which require T and P to obtain).
`L` and `W` are not used by any correlations presently, but are included
for future support.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
H : float, optional
Height of vertical plate, [m]
W : float, optional
Width of the vertical plate, [m]
check_ranges : bool, optional
Whether or not to return only correlations suitable for the provided
data, [-]
Returns
-------
methods : list[str]
List of methods which can be used to calculate `Nu` with the given
inputs, [-]
Examples
--------
>>> Nu_free_vertical_plate_methods(0.69, 2.63E9)
['Churchill']
'''
return Nu_free_vertical_plate_all_methods
def Nu_free_vertical_plate(Pr, Gr, buoyancy=None, H=None, W=None, Method=None):
r'''This function calculates the heat transfer coefficient for external
free convection from a verical plate.
Requires at a minimum a fluid's Prandtl number `Pr`, and the Grashof
number `Gr` for the system fluid (which require T and P to obtain).
`L` and `W` are not used by any correlations presently, but are included
for future support.
If no correlation's name is provided as `Method`, the 'Churchill'
correlation is selected.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
H : float, optional
Height of vertical plate, [m]
W : float, optional
Width of the vertical plate, [m]
Returns
-------
Nu : float
Nusselt number with respect to plate height, [-]
Other Parameters
----------------
Method : string, optional
A string of the function name to use;
one of ('Churchill', ).
Examples
--------
Turbulent example
>>> Nu_free_vertical_plate(0.69, 2.63E9, False)
147.16185223770603
'''
if Method is None:
Method2 = 'Churchill'
else:
Method2 = Method
if Method2 == 'Churchill':
return Nu_vertical_plate_Churchill(Pr, Gr)
else:
raise ValueError("Correlation name not recognized; see the "
"documentation for the available options.")
def Nu_horizontal_plate_McAdams(Pr, Gr, buoyancy=True):
r'''Calculates the Nusselt number for natural convection above a horizontal
plate according to the McAdams [1]_ correlations. The plate must be
isothermal. Four different equations are used, two each for laminar and
turbulent; the two sets of correlations are required because if the plate
is hot, buoyancy lifts the fluid off the plate and enhances free convection
whereas if the plate is cold, the cold fluid above it settles on it and
decreases the free convection.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
Returns
-------
Nu : float
Nusselt number with respect to length, [-]
Notes
-----
Examples
--------
>>> Nu_horizontal_plate_McAdams(5.54, 3.21e8, buoyancy=True)
181.73121274384457
>>> Nu_horizontal_plate_McAdams(5.54, 3.21e8, buoyancy=False)
55.44564799362829
>>> Nu_horizontal_plate_McAdams(.01, 3.21e8, buoyancy=True)
22.857041558492334
>>> Nu_horizontal_plate_McAdams(.01, 3.21e8, buoyancy=False)
11.428520779246167
References
----------
.. [1] McAdams, William Henry. Heat Transmission. 3E. Malabar, Fla:
Krieger Pub Co, 1985.
'''
Ra = Pr*Gr
if buoyancy:
if Ra <= 1E7:
Nu = .54*Ra**0.25
else:
Nu = 0.15*Ra**(1.0/3.0)
else:
if Ra <= 1E10:
Nu = .27*Ra**0.25
else:
Nu = .15*Ra**(1.0/3.0)
return Nu
def Nu_horizontal_plate_VDI(Pr, Gr, buoyancy=True):
r'''Calculates the Nusselt number for natural convection above a horizontal
plate according to the VDI [1]_ correlations. The plate must be
isothermal. Three different equations are used, one each for laminar and
turbulent for the heat transfer happening at upper surface case and one for
the case of heat transfer happening at the lower surface. The lower surface
correlation is recommened for the laminar flow regime.
The two different sets of correlations are required because if the plate
is hot, buoyancy lifts the fluid off the plate and enhances free convection
whereas if the plate is cold, the cold fluid above it settles on it and
decreases the free convection.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
Returns
-------
Nu : float
Nusselt number with respect to length, [-]
Notes
-----
The characteristic length suggested for use is as follows, with `a` and
`b` being the length and width of the plate.
.. math::
L = \frac{ab}{2(a+b)}
The buoyancy enhanced cases are from [2]_; the other is said to be from
[3]_, although the equations there not quite the same and do not include
the Prandtl number correction.
Examples
--------
>>> Nu_horizontal_plate_VDI(5.54, 3.21e8, buoyancy=True)
203.89681224927565
>>> Nu_horizontal_plate_VDI(5.54, 3.21e8, buoyancy=False)
39.16864971535617
References
----------
.. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd ed. 2010 edition.
Berlin ; New York: Springer, 2010.
.. [2] Stewartson, Keith. "On the Free Convection from a Horizontal Plate."
Zeitschrift Für Angewandte Mathematik Und Physik ZAMP 9, no. 3
(September 1, 1958): 276-82. https://doi.org/10.1007/BF02033031.
.. [3] Schlunder, Ernst U, and International Center for Heat and Mass
Transfer. Heat Exchanger Design Handbook. Washington:
Hemisphere Pub. Corp., 1987.
'''
Ra = Pr*Gr
if buoyancy:
f2 = (1.0 + (0.322/Pr)**(0.55))**(20.0/11.0)
if Ra*f2 < 7e4:
return 0.766*(Ra*f2)**0.2
else:
return 0.15*(Ra*f2)**(1.0/3.0)
else:
f1 = (1.0 + (0.492/Pr)**(9.0/16.0))**(-16.0/9.0)
return 0.6*(Ra*f1)**0.2
def Nu_horizontal_plate_Rohsenow(Pr, Gr, buoyancy=True):
r'''Calculates the Nusselt number for natural convection above a horizontal
plate according to the Rohsenow, Hartnett, and Cho (1998) [1]_ correlations.
The plate must be isothermal. Three different equations are used, one each
for laminar and turbulent for the heat transfer happening at upper surface
case and one for the case of heat transfer happening at the lower surface.
The lower surface correlation is recommened for the laminar flow regime.
The two different sets of correlations are required because if the plate
is hot, buoyancy lifts the fluid off the plate and enhances free convection
whereas if the plate is cold, the cold fluid above it settles on it and
decreases the free convection.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
Returns
-------
Nu : float
Nusselt number with respect to length, [-]
Notes
-----
The characteristic length suggested for use is as follows, with `a` and
`b` being the length and width of the plate.
.. math::
L = \frac{ab}{2(a+b)}
Examples
--------
>>> Nu_horizontal_plate_Rohsenow(5.54, 3.21e8, buoyancy=True)
175.91054716322836
>>> Nu_horizontal_plate_Rohsenow(5.54, 3.21e8, buoyancy=False)
35.95799244863986
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
'''
Ra = Pr*Gr
if buoyancy:
C_tU = 0.14*((1.0 + 0.01707*Pr)/(1.0 + 0.01*Pr))
C_tV = 0.13*Pr**0.22/(1.0 + 0.61*Pr**0.81)**0.42
t1 = 1.0 # Ah/A # Heated to non heated area ratio
t2 = 0.0 # Lf*P/A # Lf vertical distance between lowest and highest point in body
# P is perimiter, A is area
Cl = (0.0972 - (0.0157 + 0.462*C_tV)*t1
+ (0.615*C_tV - 0.0548 - 6e-6*Pr)*t2)
Nu_T = 0.835*Cl*Ra**0.25 # average Cl
Nu_l = 1.4/(log(1.0 + 1.4/Nu_T))
Nu_t = C_tU*Ra**(1.0/3.0)
m = 10.0
Nu = ((Nu_l)**m + Nu_t**m)**(1.0/m)
return Nu
else:
# No friction/C term
Nu_T = 0.527*Ra**0.2/(1.0 + (1.9/Pr)**0.9)**(2.0/9.0)
Nu_l = 2.5/(log(1.0 + 2.5/Nu_T))
return Nu_l
conv_free_horizontal_plate_all_methods = {
'McAdams': (Nu_horizontal_plate_McAdams, ('Pr', 'Gr', 'buoyancy')),
'VDI': (Nu_horizontal_plate_VDI, ('Pr', 'Gr', 'buoyancy')),
'Rohsenow': (Nu_horizontal_plate_Rohsenow, ('Pr', 'Gr', 'buoyancy')),
}
Nu_free_horizontal_plate_all_methods = ["VDI", "McAdams", "Rohsenow"]
def Nu_free_horizontal_plate_methods(Pr, Gr, buoyancy, L=None, W=None,
check_ranges=True):
r'''This function returns a list of methods for calculating heat transfer
coefficient for external free convection from a verical plate.
Requires at a minimum a fluid's Prandtl number `Pr`, and the Grashof
number `Gr` for the system fluid, temperatures, and geometry.
`L` and `W` are not used by any correlations presently, but are included
for future support.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
L : float, optional
Length of horizontal plate, [m]
W : float, optional
Width of the horizontal plate, [m]
check_ranges : bool, optional
Whether or not to return only correlations suitable for the provided
data, [-]
Returns
-------
methods : list[str]
List of methods which can be used to calculate `Nu` with the given
inputs, [-]
Examples
--------
>>> Nu_free_horizontal_plate_methods(0.69, 2.63E9, True)
['VDI', 'McAdams', 'Rohsenow']
'''
return Nu_free_horizontal_plate_all_methods
def Nu_free_horizontal_plate(Pr, Gr, buoyancy, L=None, W=None,
Method=None):
r'''This function calculates the heat transfer coefficient for external
free convection from a horizontal plate.
Requires at a minimum a fluid's Prandtl number `Pr`, and the Grashof
number `Gr` for the system fluid, temperatures, and geometry.
`L` and `W` are not used by any correlations presently, but are included
for future support.
If no correlation's name is provided as `Method`, the 'VDI' correlation is
selected.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
L : float, optional
Length of horizontal plate, [m]
W : float, optional
Width of the horizontal plate, [m]
Returns
-------
Nu : float
Nusselt number with respect to plate length, [-]
Other Parameters
----------------
Method : string, optional
A string of the function name to use, as in the dictionary
conv_free_horizontal_plate_methods
Examples
--------
Turbulent example
>>> Nu_free_horizontal_plate(5.54, 3.21e8, buoyancy=True)
203.89681224927565
>>> Nu_free_horizontal_plate(5.54, 3.21e8, buoyancy=True, Method='McAdams')
181.73121274384457
'''
if Method is None:
Method2 = "VDI"
else:
Method2 = Method
if Method2 == 'VDI':
return Nu_horizontal_plate_VDI(Pr=Pr, Gr=Gr, buoyancy=buoyancy)
if Method2 == 'McAdams':
return Nu_horizontal_plate_McAdams(Pr=Pr, Gr=Gr, buoyancy=buoyancy)
if Method2 == 'Rohsenow':
return Nu_horizontal_plate_Rohsenow(Pr=Pr, Gr=Gr, buoyancy=buoyancy)
else:
raise ValueError("Correlation name not recognized; see the "
"documentation for the available options.")
def Nu_sphere_Churchill(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a sphere
according to the Churchill [1]_ correlation. Sphere must be isothermal.
.. math::
Nu_D=2+\frac{0.589Ra_D^{1/4}} {\left[1+(0.469/Pr)^{9/16}\right]^{4/9}}
\cdot\left\{1 + \frac{7.44\times 10^{-8}Ra}
{[1+(0.469/Pr)^{9/16}]^{16/9}}\right\}^{1/12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Although transition from laminar to turbulent is discrete in reality, this
equation provides a smooth transition in value from laminar to turbulent.
Checked with the original source.
Good for Ra < 1E13. Limit of Nu is 2 at low Grashof numbers.
Examples
--------
>>> Nu_sphere_Churchill(.7, 1E7)
25.670869440317578
References
----------
.. [1] Schlunder, Ernst U, and International Center for Heat and Mass
Transfer. Heat Exchanger Design Handbook. Washington:
Hemisphere Pub. Corp., 1987.
'''
Ra = Pr*Gr
Nu = 2 + (0.589*Ra**0.25/(1 + (0.469/Pr)**(9/16.))**(4/9.)*(
1 + 7.44E-8*Ra/(1 + (0.469/Pr)**(9/16.))**(16/9.))**(1/12.))
return Nu
### Vertical cylinders
def Nu_vertical_cylinder_Griffiths_Davis_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_, as
presented in [3]_ and [4]_.
.. math::
Nu_H = 0.67 Ra_H^{0.25},\; 10^{7} < Ra < 10^{9}
.. math::
Nu_H = 0.0782 Ra_H^{0.357}, \; 10^{9} < Ra < 10^{11}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Cylinder of diameter 17.43 cm, length from 4.65 to 263.5 cm. Air as fluid.
Transition between ranges is not smooth.
If outside of range, no warning is given.
Examples
--------
>>> Nu_vertical_cylinder_Griffiths_Davis_Morgan(.7, 2E10)
327.6230596100138
References
----------
.. [1] Griffiths, Ezer, A. H. Davis, and Great Britain. The Transmission of
Heat by Radiation and Convection. London: H. M. Stationery off., 1922.
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E9 and turbulent is None):
Nu = 0.0782*Ra**0.357
else:
Nu = 0.67*Ra**0.25
return Nu
def Nu_vertical_cylinder_Jakob_Linke_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_, as
presented in [3]_ and [4]_.
.. math::
Nu_H = 0.555 Ra_H^{0.25},\; 10^{4} < Ra < 10^{8}
.. math::
Nu_H = 0.129 Ra_H^{1/3},\; 10^{8} < Ra < 10^{12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Cylinder of diameter 3.5 cm, length from L/D = 4.3. Air as fluid.
Transition between ranges is not smooth.
If outside of range, no warning is given. Results are presented rounded in
[4]_, and the second range is not shown in [3]_.
Examples
--------
>>> Nu_vertical_cylinder_Jakob_Linke_Morgan(.7, 2E10)
310.90835207860454
References
----------
.. [1] Jakob, M., and Linke, W., Warmeubergang beim Verdampfen von
Flussigkeiten an senkrechten und waagerechten Flaschen, Phys. Z.,
vol. 36, pp. 267-280, 1935.
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E8 and turbulent is None):
Nu = 0.129*Ra**(1/3.)
else:
Nu = 0.555*Ra**0.25
return Nu
def Nu_vertical_cylinder_Carne_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_, as
presented in [3]_ and [4]_.
.. math::
Nu_H = 1.07 Ra_H^{0.28},\; 2\times 10^{6} < Ra < 2\times 10^{8}
.. math::
Nu_H = 0.152 Ra_H^{0.38},\; 2\times 10^{8} < Ra < 2\times 10^{11}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Cylinder of diameters 0.475 cm to 7.62 cm, L/D from 8 to 127. Isothermal
boundary condition was assumed, but not verified. Transition between ranges
is not smooth. If outside of range, no warning is given. The higher range
of [1]_ is not shown in [3]_, and the formula for the first is actually for
the second in [3]_.
Examples
--------
>>> Nu_vertical_cylinder_Carne_Morgan(.7, 2E8)
204.31470629065677
References
----------
.. [1] J. B. Carne. "LIX. Heat Loss by Natural Convection from Vertical
Cylinders." The London, Edinburgh, and Dublin Philosophical Magazine and
Journal of Science 24, no. 162 (October 1, 1937): 634-53.
doi:10.1080/14786443708565140.
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 2E8 and turbulent is None):
return 0.152*Ra**0.38
else:
return 1.07*Ra**0.28
def Nu_vertical_cylinder_Eigenson_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_,
presented in [3]_ and in more detail in [4]_.
.. math::
Nu_H = 0.48 Ra_H^{0.25},\; 10^{9} < Ra
.. math::
Nu_H = 51.5 + 0.0000726 Ra_H^{0.63},\; 10^{9} < Ra < 1.69 \times 10^{10}
.. math::
Nu_H = 0.148 Ra_H^{1/3} - 127.6 ,\; 1.69 \times 10^{10} < Ra
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Author presents results as appropriate for both flat plates and cylinders.
Height of 2.5 m with diameters of 2.4, 7.55, 15, 35, and 50 mm. Another
experiment of diameter 58 mm and length of 6.5 m was considered.
Cylinder of diameters 0.475 cm to 7.62 cm, L/D from 8 to 127.Transition
between ranges is not smooth. If outside of range, no warning is given.
Formulas are presented similarly in [3]_ and [4]_, but only [4]_ shows
the transition formula.
Examples
--------
>>> Nu_vertical_cylinder_Eigenson_Morgan(0.7, 2E10)
230.55946525499715
References
----------
.. [1] Eigenson L (1940). Les lois gouvernant la transmission de la chaleur
aux gaz biatomiques par les parois des cylindres verticaux dans le cas
de convection naturelle. Dokl Akad Nauk SSSR 26:440-444
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1.69E10 and turbulent is None):
return 0.148*Ra**(1/3.) - 127.6
elif 1E9 < Ra < 1.69E10 and turbulent is not False:
return 51.5 + 0.0000726*Ra**0.63
else:
return 0.48*Ra**0.25
def Nu_vertical_cylinder_Touloukian_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_, as
presented in [3]_ and [4]_.
.. math::
Nu_H = 0.726 Ra_H^{0.25},\; 2\times 10^{8} < Ra < 4\times 10^{10}
.. math::
Nu_H = 0.0674 (Gr_H Pr^{1.29})^{1/3},\; 4\times 10^{10} < Ra < 9\times 10^{11}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Cylinder of diameters 2.75 inch, with heights of 6, 18, and 36.25 inch.
Temperature was controlled via multiple separately controlled heating
sections. Fluids were water and ethylene-glycol. Transition between ranges
is not smooth. If outside of range, no warning is given. [2]_, [3]_, and
[4]_ are in complete agreement about this formulation.
Examples
--------
>>> Nu_vertical_cylinder_Touloukian_Morgan(.7, 2E10)
249.72879961097854
References
----------
.. [1] Touloukian, Y. S, George A Hawkins, and Max Jakob. Heat Transfer by
Free Convection from Heated Vertical Surfaces to Liquids.
Trans. ASME 70, 13-18 (1948).
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 4E10 and turbulent is None):
return 0.0674*(Gr*Pr**1.29)**(1/3.)
else:
return 0.726*Ra**0.25
def Nu_vertical_cylinder_McAdams_Weiss_Saunders(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ and [2]_ correlated by
[3]_, as presented in [4]_, [5]_, and [6]_.
.. math::
Nu_H = 0.59 Ra_H^{0.25},\; 10^{4} < Ra < 10^{9}
.. math::
Nu_H = 0.13 Ra_H^{1/3.},\; 10^{9} < Ra < 10^{12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Transition between ranges is not smooth. If outside of range, no warning is
given. For ranges under 10^4, a graph is provided, not included here.
Examples
--------
>>> Nu_vertical_cylinder_McAdams_Weiss_Saunders(.7, 2E10)
313.31849434277973
References
----------
.. [1] Weise, Rudolf. "Warmeubergang durch freie Konvektion an
quadratischen Platten." Forschung auf dem Gebiet des Ingenieurwesens
A 6, no. 6 (November 1935): 281-92. doi:10.1007/BF02592565.
.. [2] Saunders, O. A. "The Effect of Pressure Upon Natural Convection in
Air." Proceedings of the Royal Society of London A: Mathematical,
Physical and Engineering Sciences 157, no. 891 (November 2, 1936):
278-91. doi:10.1098/rspa.1936.0194.
.. [3] McAdams, William Henry. Heat Transmission. 3E. Malabar, Fla:
Krieger Pub Co, 1985.
.. [4] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [5] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [6] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E9 and turbulent is None):
return 0.13*Ra**(1/3.)
else:
return 0.59*Ra**0.25
def Nu_vertical_cylinder_Kreith_Eckert(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by
[2]_, also as presented in [3]_, [4]_, and [5]_.
.. math::
Nu_H = 0.555 Ra_H^{0.25},\; 10^{5} < Ra < 10^{9}
.. math::
Nu_H = 0.021 Ra_H^{0.4},\; 10^{9} < Ra < 10^{12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Transition between ranges is not smooth. If outside of range, no warning is
given.
Examples
--------
>>> Nu_vertical_cylinder_Kreith_Eckert(.7, 2E10)
240.25393473033196
References
----------
.. [1] Eckert, E. R. G., Thomas W. Jackson, and United States. Analysis of
Turbulent Free-Convection Boundary Layer on Flat Plate. National
Advisory Committee for Aeronautics, no. 2207. Washington, D.C.: National
Advisoty Committee for Aeronautics, 1950.
.. [2] Kreith, Frank, Raj Manglik, and Mark Bohn. Principles of Heat
Transfer. Cengage, 2010.
.. [3] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [4] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [5] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E9 and turbulent is None):
return 0.021*Ra**0.4
else:
return 0.555*Ra**0.25
def Nu_vertical_cylinder_Hanesian_Kalish_Morgan(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by
[2]_, also as presented in [3]_ and [4]_.
.. math::
Nu_H = 0.48 Ra_H^{0.23},\; 10^{6} < Ra < 10^{8}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
For air and fluoro-carbons. If outside of range, no warning is given.
Laminar range only!
Examples
--------
>>> Nu_vertical_cylinder_Hanesian_Kalish_Morgan(.7, 1E7)
18.014150492696604
References
----------
.. [1] Hanesian, D. and Kalish, R. "Heat Transfer by Natural Convection
with Fluorocarbon Gases." IEEE Transactions on Parts, Materials and
Packaging 6, no. 4 (December 1970): 147-148.
doi:10.1109/TPMP.1970.1136270.
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
return 0.48*Ra**0.23
### Vertical cylinders, more complex correlations
def Nu_vertical_cylinder_Al_Arabi_Khamis(Pr, Gr, L, D, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to [1]_, also as presented in [2]_ and [3]_.
.. math::
Nu_H = 2.9Ra_H^{0.25}/Gr_D^{1/12},\; 9.88 \times 10^7 \le Ra_H \le 2.7\times10^{9}
.. math::
Nu_H = 0.47 Ra_H^{0.333}/Gr_D^{1/12},\; 2.7 \times 10^9 \le Ra_H \le 2.95\times10^{10}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder height [-]
L : float
Length of vertical cylinder, [m]
D : float
Diameter of cylinder, [m]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection, [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
For air. Local Nusselt number results also given in [1]_. D from 12.75 to
51 mm; H from 300 to 2000 mm. Temperature kept constant by steam condensing.
If outside of range, no warning is given. Applies for range of:
.. math::
1.08 \times 10^4 \le Gr_D \le 6.9 \times 10^5
Examples
--------
>>> Nu_vertical_cylinder_Al_Arabi_Khamis(.71, 2E10, 10, 1)
280.39793209114765
References
----------
.. [1] Al-Arabi, M., and M. Khamis. "Natural Convection Heat Transfer from
Inclined Cylinders." International Journal of Heat and Mass Transfer 25,
no. 1 (January 1982): 3-15. doi:10.1016/0017-9310(82)90229-0.
.. [2] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [3] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Gr_D = Gr/L**3*D**3
Ra = Pr*Gr
if turbulent or (Ra > 2.6E9 and turbulent is None):
return 0.47*Ra**(1/3.)*Gr_D**(-1/12.)
else:
return 2.9*Ra**0.25*Gr_D**(-1/12.)
def Nu_vertical_cylinder_Popiel_Churchill(Pr, Gr, L, D):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to [1]_, also presented in [2]_.
.. math::
\frac{Nu}{Nu_{L,fp}} = 1 + B\left[32^{0.5}Gr_L^{-0.25}\frac{L}{D}\right]^C
.. math::
B = 0.0571322 + 0.20305 Pr^{-0.43}
.. math::
C = 0.9165 - 0.0043Pr^{0.5} + 0.01333\ln Pr + 0.0004809/Pr
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder height [-]
L : float
Length of vertical cylinder, [m]
D : float
Diameter of cylinder, [m]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
For 0.01 < Pr < 100. Requires a vertical flat plate correlation.
Both [2], [3] present a power of 2 instead of 0.5 on the 32 in the equation,
but the original has the correct form.
Examples
--------
>>> Nu_vertical_cylinder_Popiel_Churchill(0.7, 1E10, 2.5, 1)
228.89790055149896
References
----------
.. [1] Popiel, C. O., J. Wojtkowiak, and K. Bober. "Laminar Free Convective
Heat Transfer from Isothermal Vertical Slender Cylinder." Experimental
Thermal and Fluid Science 32, no. 2 (November 2007): 607-613.
doi:10.1016/j.expthermflusci.2007.07.003.
.. [2] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [3] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
B = 0.0571322 + 0.20305*Pr**-0.43
C = 0.9165 - 0.0043*Pr**0.5 + 0.01333*log(Pr) + 0.0004809/Pr
Nu_fp = Nu_vertical_plate_Churchill(Pr, Gr)
return Nu_fp*(1 + B*(32**0.5*Gr**-0.25*L/D)**C)
# Nice Name : (function_call, does_turbulent, does_laminar, transition_Ra, is_only_Pr_Gr)
vertical_cylinder_correlations = {
'Churchill Vertical Plate': (Nu_vertical_plate_Churchill, True, True, None, True),
'Griffiths, Davis, & Morgan': (Nu_vertical_cylinder_Griffiths_Davis_Morgan, True, True, 1.00E+009, True),
'Jakob, Linke, & Morgan': (Nu_vertical_cylinder_Jakob_Linke_Morgan, True, True, 1.00E+008, True),
'Carne & Morgan': (Nu_vertical_cylinder_Carne_Morgan, True, True, 2.00E+008, True),
'Eigenson & Morgan': (Nu_vertical_cylinder_Eigenson_Morgan, True, True, 6.90E+011, True),
'Touloukian & Morgan': (Nu_vertical_cylinder_Touloukian_Morgan, True, True, 4.00E+010, True),
'McAdams, Weiss & Saunders': (Nu_vertical_cylinder_McAdams_Weiss_Saunders, True, True, 1.00E+009, True),
'Kreith & Eckert': (Nu_vertical_cylinder_Kreith_Eckert, True, True, 1.00E+009, True),
'Hanesian, Kalish & Morgan': (Nu_vertical_cylinder_Hanesian_Kalish_Morgan, False, True, 1.00E+008, True),
'Al-Arabi & Khamis': (Nu_vertical_cylinder_Al_Arabi_Khamis, True, True, 2.60E+009, False),
'Popiel & Churchill': (Nu_vertical_cylinder_Popiel_Churchill, False, True, 1.00E+009, False),
}
def Nu_vertical_cylinder_methods(Pr, Gr, L=None, D=None, check_ranges=True):
r'''This function returns a list of correlation names for free convetion
to a vertical cylinder.
The functions returned are 'Popiel & Churchill' for fully defined geometries,
and 'McAdams, Weiss & Saunders' otherwise.
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder height [-]
L : float, optional
Length of vertical cylinder, [m]
D : float, optional
Diameter of cylinder, [m]
check_ranges : bool, optional
Whether or not to return only correlations suitable for the provided
data, [-]
Returns
-------
methods : list[str]
List of methods which can be used to calculate `Nu` with the given
inputs
Examples
--------
>>> Nu_vertical_cylinder_methods(0.72, 1E7)[0]
'McAdams, Weiss & Saunders'
'''
if L is None or D is None:
return ['McAdams, Weiss & Saunders', 'Churchill Vertical Plate',
'Griffiths, Davis, & Morgan', 'Jakob, Linke, & Morgan', 'Carne & Morgan',
'Eigenson & Morgan', 'Touloukian & Morgan', 'Kreith & Eckert', 'Hanesian, Kalish & Morgan']
else:
return ['Popiel & Churchill', 'Churchill Vertical Plate', 'Griffiths, Davis, & Morgan',
'Jakob, Linke, & Morgan', 'Carne & Morgan', 'Eigenson & Morgan', 'Touloukian & Morgan',
'McAdams, Weiss & Saunders', 'Kreith & Eckert', 'Hanesian, Kalish & Morgan',
'Al-Arabi & Khamis']
def Nu_vertical_cylinder(Pr, Gr, L=None, D=None, Method=None):
r'''This function handles choosing which vertical cylinder free convection
correlation is used. Generally this is used by a helper class, but can be
used directly. Will automatically select the correlation to use if none is
provided; returns None if insufficient information is provided.
Preferred functions are 'Popiel & Churchill' for fully defined geometries,
and 'McAdams, Weiss & Saunders' otherwise.
Examples
--------
>>> Nu_vertical_cylinder(0.72, 1E7)
30.562236756513943
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder height [-]
L : float, optional
Length of vertical cylinder, [m]
D : float, optional
Diameter of cylinder, [m]
Returns
-------
Nu : float
Nusselt number, [-]
Other Parameters
----------------
Method : string, optional
A string of the function name to use, as in the dictionary
vertical_cylinder_correlations
'''
if Method is None:
if L is None or D is None:
Method2 = 'McAdams, Weiss & Saunders'
else:
Method2 = 'Popiel & Churchill'
else:
Method2 = Method
if Method2 == 'Churchill Vertical Plate':
return Nu_vertical_plate_Churchill(Pr=Pr, Gr=Gr)
elif Method2 == 'Griffiths, Davis, & Morgan':
return Nu_vertical_cylinder_Griffiths_Davis_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Jakob, Linke, & Morgan':
return Nu_vertical_cylinder_Jakob_Linke_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Carne & Morgan':
return Nu_vertical_cylinder_Carne_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Eigenson & Morgan':
return Nu_vertical_cylinder_Eigenson_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Touloukian & Morgan':
return Nu_vertical_cylinder_Touloukian_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'McAdams, Weiss & Saunders':
return Nu_vertical_cylinder_McAdams_Weiss_Saunders(Pr=Pr, Gr=Gr)
elif Method2 == 'Kreith & Eckert':
return Nu_vertical_cylinder_Kreith_Eckert(Pr=Pr, Gr=Gr)
elif Method2 == 'Hanesian, Kalish & Morgan':
return Nu_vertical_cylinder_Hanesian_Kalish_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Al-Arabi & Khamis':
return Nu_vertical_cylinder_Al_Arabi_Khamis(Pr=Pr, Gr=Gr, L=L, D=D)
elif Method2 == 'Popiel & Churchill':
return Nu_vertical_cylinder_Popiel_Churchill(Pr=Pr, Gr=Gr, L=L, D=D)
else:
raise ValueError("Correlation name not recognized; see the "
"documentation for the available options.")
#import matplotlib.pyplot as plt
#import numpy as np
##L, D = 1.5, 0.1
#Pr, Gr = 0.72, 1E8
#methods = Nu_vertical_cylinder_methods(Pr, Gr)
#Grs = np.logspace(2, 12, 10000)
#
#for method in methods:
# Nus = [Nu_vertical_cylinder(Pr=Pr, Gr=i, Method=method) for i in Grs]
# plt.loglog(Grs, Nus, label=method)
#plt.legend()
#plt.show()
### Horizontal Cylinders
def Nu_horizontal_cylinder_Churchill_Chu(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a horizontal
cylinder according to the Churchill-Chu [1]_ correlation, also presented in
[2]_. Cylinder must be isothermal; an alternate expression exists for
constant heat flux.
.. math::
Nu_{D}=\left[0.60+\frac{0.387Ra_{D}^{1/6}}
{[1+(0.559/Pr)^{9/16}]^{8/27}}\right]^2
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
Although transition from laminar to turbulent is discrete in reality, this
equation provides a smooth transition in value from laminar to turbulent.
Checked with the original source, which has its powers unsimplified but
is equivalent.
[1]_ recommends 1E-5 as the lower limit for Ra, but no upper limit. [2]_
suggests an upper limit of 1E12.
Examples
--------
From [2]_, Example 9.2, matches:
>>> Nu_horizontal_cylinder_Churchill_Chu(0.69, 2.63E9)
139.13493970073597
References
----------
.. [1] Churchill, Stuart W., and Humbert H. S. Chu. "Correlating Equations
for Laminar and Turbulent Free Convection from a Horizontal Cylinder."
International Journal of Heat and Mass Transfer 18, no. 9
(September 1975): 1049-53. doi:10.1016/0017-9310(75)90222-7.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
Ra = Pr*Gr
return (0.6 + 0.387*Ra**(1/6.)/(1. + (0.559/Pr)**(9/16.))**(8/27.))**2
def Nu_horizontal_cylinder_Kuehn_Goldstein(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a horizontal
cylinder according to the Kuehn-Goldstein [1]_ correlation, also shown in
[2]_. Cylinder must be isothermal.
.. math::
\frac{2}{Nu_D} = \ln\left[1 + \frac{2}{\left[\left\{0.518Ra_D^{0.25}
\left[1 + \left(\frac{0.559}{Pr}\right)^{3/5}\right]^{-5/12}
\right\}^{15} + (0.1Ra_D^{1/3})^{15}\right]^{1/15}}\right]
Parameters
----------
Pr : float
Prandtl number with respect to film temperature [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
[1]_ suggests this expression is valid for all cases except low-Pr fluids.
[2]_ suggests no restrictions.
Examples
--------
>>> Nu_horizontal_cylinder_Kuehn_Goldstein(0.69, 2.63E9)
122.99323525628186
References
----------
.. [1] Kuehn, T. H., and R. J. Goldstein. "Correlating Equations for
Natural Convection Heat Transfer between Horizontal Circular Cylinders."
International Journal of Heat and Mass Transfer 19, no. 10
(October 1976): 1127-34. doi:10.1016/0017-9310(76)90145-9
.. [2] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
return 2./log(1 + 2./((0.518*Ra**0.25*(1. + (0.559/Pr)**0.6)**(-5/12.))**15
+ (0.1*Ra**(1/3.))**15)**(1/15.))
def Nu_horizontal_cylinder_Morgan(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a horizontal
cylinder according to the Morgan [1]_ correlations, a product of a very
large review of the literature. Sufficiently common as to be shown in [2]_.
Cylinder must be isothermal.
.. math::
Nu_D = C Ra_D^n
+----------+----------+-------+-------+
| Gr min | Gr max | C | n |
+==========+==========+=======+=======+
| 10E-10 | 10E-2 | 0.675 | 0.058 |
+----------+----------+-------+-------+
| 10E-2 | 10E2 | 1.02 | 0.148 |
+----------+----------+-------+-------+
| 10E2 | 10E4 | 0.850 | 0.188 |
+----------+----------+-------+-------+
| 10E4 | 10E7 | 0.480 | 0.250 |
+----------+----------+-------+-------+
| 10E7 | 10E12 | 0.125 | 0.333 |
+----------+----------+-------+-------+
Parameters
----------
Pr : float
Prandtl number with respect to film temperature [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
Most comprehensive review with a new proposed equation to date.
Discontinuous among the jumps in range. Blindly runs outside if upper and
lower limits without warning.
Examples
--------
>>> Nu_horizontal_cylinder_Morgan(0.69, 2.63E9)
151.3881997228419
References
----------
.. [1] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [2] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if Ra < 1E-2:
C, n = 0.675, 0.058
elif Ra < 1E2:
C, n = 1.02, 0.148
elif Ra < 1E4:
C, n = 0.850, 0.188
elif Ra < 1E7:
C, n = 0.480, 0.250
else:
# up to 1E12
C, n = 0.125, 0.333
return C*Ra**n
horizontal_cylinder_correlations = {
'Churchill-Chu': (Nu_horizontal_cylinder_Churchill_Chu),
'Kuehn & Goldstein': (Nu_horizontal_cylinder_Kuehn_Goldstein),
'Morgan': (Nu_horizontal_cylinder_Morgan)
}
def Nu_horizontal_cylinder_methods(Pr, Gr, check_ranges=True):
r'''This function returns a list of correlation names for free convetion
to a horizontal cylinder.
Preferred functions are 'Morgan' when discontinuous results are acceptable
and 'Churchill-Chu' otherwise.
Parameters
----------
Pr : float
Prandtl number with respect to film temperature [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
check_ranges : bool, optional
Whether or not to return only correlations suitable for the provided
data, [-]
Returns
-------
methods : list[str]
List of methods which can be used to calculate `Nu` with the given
inputs
Examples
--------
>>> Nu_horizontal_cylinder_methods(0.72, 1E7)[0]
'Morgan'
'''
return ['Morgan', 'Churchill-Chu', 'Kuehn & Goldstein']
def Nu_horizontal_cylinder(Pr, Gr, Method=None):
r'''This function handles choosing which horizontal cylinder free convection
correlation is used. Generally this is used by a helper class, but can be
used directly. Will automatically select the correlation to use if none is
provided; returns None if insufficient information is provided.
Preferred functions are 'Morgan' when discontinuous results are acceptable
and 'Churchill-Chu' otherwise.
Parameters
----------
Pr : float
Prandtl number with respect to film temperature [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Other Parameters
----------------
Method : string, optional
A string of the function name to use, as in the dictionary
horizontal_cylinder_correlations
Notes
-----
All fluid properties should be evaluated at the film temperature, the
average between the outer surface temperature of the solid, and the fluid
temperature far away from the heat transfer interface - normally the same
as the temperature before any cooling or heating occurs.
.. math::
T_f = (T_{\text{surface}} + T_\infty)/2
Examples
--------
>>> Nu_horizontal_cylinder(0.72, 1E7)
24.864192615468973
'''
if Method is None:
Method2 = 'Morgan'
else:
Method2 = Method
if Method2 == 'Churchill-Chu':
return Nu_horizontal_cylinder_Churchill_Chu(Pr=Pr, Gr=Gr)
elif Method2 == 'Kuehn & Goldstein':
return Nu_horizontal_cylinder_Kuehn_Goldstein(Pr=Pr, Gr=Gr)
elif Method2 == 'Morgan':
return Nu_horizontal_cylinder_Morgan(Pr=Pr, Gr=Gr)
else:
raise ValueError("Correlation name not recognized; see the "
"documentation for the available options.")
#import matplotlib.pyplot as plt
#import numpy as np
#Pr, Gr = 0.72, 1E8
#methods = Nu_horizontal_cylinder_methods(Pr, Gr)
#Grs = np.logspace(-2, 2.5, 10000)
#
#for method in methods:
# Nus = [Nu_horizontal_cylinder(Pr=Pr, Gr=i, Method=method) for i in Grs]
# plt.semilogx(Grs, Nus, label=method)
#plt.legend()
#plt.show()
def Nu_coil_Xin_Ebadian(Pr, Gr, horizontal=False):
r'''Calculates Nusselt number for natural convection around a vertical
or horizontal helical coil suspended in a fluid without
forced convection.
For horizontal cases:
.. math::
Nu_D = 0.318 Ra_D^{0.293},\; 5 \times {10}^{3} < Ra < 1 \times {10}^5
For vertical cases:
.. math::
Nu_D = 0.290 Ra_D^{0.293},\; 5 \times {10}^{3} < Ra < 1 \times {10}^5
Parameters
----------
Pr : float
Prandtl number calculated with the film temperature -
wall and temperature very far from the coil average, [-]
Gr : float
Grashof number calculated with the film temperature -
wall and temperature very far from the coil average,
and using the outer diameter of the coil [-]
horizontal : bool, optional
Whether the coil is horizontal or vertical, [-]
Returns
-------
Nu : float
Nusselt number using the outer diameter of the coil
and the film temperature, [-]
Notes
-----
This correlation is also reviewed in [2]_.
Examples
--------
>>> Nu_coil_Xin_Ebadian(0.7, 2E4, horizontal=False)
4.755689726250451
>>> Nu_coil_Xin_Ebadian(0.7, 2E4, horizontal=True)
5.2148597687849785
References
----------
.. [1] Xin, R. C., and M. A. Ebadian. "Natural Convection Heat Transfer
from Helicoidal Pipes." Journal of Thermophysics and Heat Transfer 10,
no. 2 (1996): 297-302.
.. [2] Prabhanjan, Devanahalli G., Timothy J. Rennie, and G. S. Vijaya
Raghavan. "Natural Convection Heat Transfer from Helical Coiled Tubes."
International Journal of Thermal Sciences 43, no. 4 (April 1, 2004):
359-65.
'''
Ra = Pr*Gr
if horizontal:
return 0.318*Ra**0.293
else:
return 0.290*Ra**0.293 | mit | 6,765,372,046,632,319,000 | 33.813509 | 107 | 0.621733 | false |
thijsmie/imp_flask | imp_flask/extensions.py | 1 | 1038 | """Flask and other extensions instantiated here.
To avoid circular imports with views and create_app(), extensions are instantiated here. They will be initialized
(calling init_app()) in application.py.
"""
from logging import getLogger
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from flask_wtf.csrf import CsrfProtect
from sqlalchemy.event import listens_for
from sqlalchemy.pool import Pool
LOG = getLogger(__name__)
@listens_for(Pool, 'connect', named=True)
def _on_connect(dbapi_connection, **_):
"""Set MySQL mode to TRADITIONAL on databases that don't set this automatically.
Without this, MySQL will silently insert invalid values in the database, causing very long debugging sessions in the
long run.
http://www.enricozini.org/2012/tips/sa-sqlmode-traditional/
"""
LOG.debug('Setting SQL Mode to TRADITIONAL.')
dbapi_connection.cursor().execute("SET SESSION sql_mode='TRADITIONAL'")
db = SQLAlchemy()
mail = Mail()
csrf = CsrfProtect()
| mit | 1,401,830,091,470,726,700 | 30.4375 | 120 | 0.728324 | false |
pyinvoke/invocations | invocations/autodoc.py | 1 | 3987 | """
Sphinx autodoc hooks for documenting Invoke-level objects such as tasks.
Unlike most of the rest of Invocations, this module isn't for reuse in the
"import and call functions" sense, but instead acts as a Sphinx extension which
allows Sphinx's `autodoc`_ functionality to see and document
Invoke tasks and similar Invoke objects.
.. note::
This functionality is mostly useful for redistributable/reusable tasks
which have been defined as importable members of some Python package or
module, as opposed to "local-only" tasks that live in a single project's
``tasks.py``.
However, it will work for any tasks that Sphinx autodoc can import, so in a
pinch you could for example tweak ``sys.path`` in your Sphinx ``conf.py``
to get it loading up a "local" tasks file for import.
To use:
- Add ``"sphinx.ext.autodoc"`` and ``"invocations.autodoc"`` to your Sphinx
``conf.py``'s ``extensions`` list.
- Use Sphinx autodoc's ``automodule`` directive normally, aiming it at your
tasks module(s), e.g. ``.. automodule:: myproject.tasks`` in some ``.rst``
document of your choosing.
- As noted above, this only works for modules that are importable, like any
other Sphinx autodoc use case.
- Unless you want to opt-in which module members get documented, use
``:members:`` or add ``"members"`` to your ``conf.py``'s
``autodoc_default_flags``.
- By default, only tasks with docstrings will be picked up, unless you also
give the ``:undoc-members:`` flag or add ``:undoc-members:`` / add
``"undoc-members"`` to ``autodoc_default_flags``.
- Please see the `autodoc`_ docs for details on these settings and more!
- Build your docs, and you should see your tasks showing up as documented
functions in the result.
.. _autodoc: http://www.sphinx-doc.org/en/master/ext/autodoc.html
"""
from invoke import Task
from sphinx.util.inspect import getargspec # Improved over raw stdlib
# For sane mock patching. Meh.
from sphinx.ext import autodoc
class TaskDocumenter(
autodoc.DocstringSignatureMixin, autodoc.ModuleLevelDocumenter
):
objtype = "task"
directivetype = "function"
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, Task)
def format_args(self):
function = self.object.body
# TODO: consider extending (or adding a sibling to) Task.argspec so it
# preserves more of the full argspec tuple.
# TODO: whether to preserve the initial context argument is an open
# question. For now, it will appear, but only pending invoke#170 -
# after which point "call tasks as raw functions" may be less common.
# TODO: also, it may become moot-ish if we turn this all into emission
# of custom domain objects and/or make the CLI arguments the focus
return autodoc.formatargspec(function, *getargspec(function))
def document_members(self, all_members=False):
# Neuter this so superclass bits don't introspect & spit out autodoc
# directives for task attributes. Most of that's not useful.
pass
def setup(app):
# NOTE: the "correct", forward compatible call to make here is
# app.add_autodocumenter() - because as of Sphinx 1.7, the inner API we are
# manipulating here got changed around a bunch (but the outer
# API of add_autodocumenter() remained the same, on purpose).
# Unfortunately, in both cases add_autodocumenter() both registers the
# documenter AND adds an `auto<type>` directive - meaning it's not possible
# to register a "acts kinda like another" Documenter or you double-define
# e.g. autofunction, which Sphinx warns about and also presumably kills
# real function documenting.
# NOTE: sooo for now, since a bunch of our other shit breaks on Sphinx 1.7,
# we are just explicitly calling autodoc's add_documenter. Sadface.
autodoc.add_documenter(TaskDocumenter)
| bsd-2-clause | -2,718,473,051,826,308,000 | 43.3 | 79 | 0.709054 | false |
leonid-shevtsov/ClickableUrls_SublimeText | clickable_urls.py | 1 | 5787 | import sublime
import sublime_plugin
import webbrowser
import threading
class UrlHighlighter(sublime_plugin.EventListener):
# Thanks Jeff Atwood http://www.codinghorror.com/blog/2008/10/the-problem-with-urls.html
# ^ that up here is a URL that should be matched
URL_REGEX = "\\bhttps?://[-A-Za-z0-9+&@#/%?=~_()|!:,.;']*[-A-Za-z0-9+&@#/%=~_(|]"
DEFAULT_MAX_URLS = 200
SETTINGS_FILENAME = 'ClickableUrls.sublime-settings'
urls_for_view = {}
scopes_for_view = {}
ignored_views = []
browser = None
highlight_semaphore = threading.Semaphore()
def on_activated(self, view):
self.update_url_highlights(view)
# Blocking handlers for ST2
def on_load(self, view):
if sublime.version() < '3000':
self.update_url_highlights(view)
def on_modified(self, view):
if sublime.version() < '3000':
self.update_url_highlights(view)
# Async listeners for ST3
def on_load_async(self, view):
self.update_url_highlights_async(view)
def on_modified_async(self, view):
self.update_url_highlights_async(view)
def on_close(self, view):
for map in [self.urls_for_view, self.scopes_for_view, self.ignored_views]:
if view.id() in map:
del map[view.id()]
"""The logic entry point. Find all URLs in view, store and highlight them"""
def update_url_highlights(self, view):
settings = sublime.load_settings(UrlHighlighter.SETTINGS_FILENAME)
should_highlight_urls = settings.get('highlight_urls', True)
max_url_limit = settings.get('max_url_limit', UrlHighlighter.DEFAULT_MAX_URLS)
if view.id() in UrlHighlighter.ignored_views:
return
urls = view.find_all(UrlHighlighter.URL_REGEX)
# Avoid slowdowns for views with too much URLs
if len(urls) > max_url_limit:
print("UrlHighlighter: ignoring view with %u URLs" % len(urls))
UrlHighlighter.ignored_views.append(view.id())
return
UrlHighlighter.urls_for_view[view.id()] = urls
should_highlight_urls = sublime.load_settings(UrlHighlighter.SETTINGS_FILENAME).get('highlight_urls', True)
if (should_highlight_urls):
self.highlight_urls(view, urls)
"""Same as update_url_highlights, but avoids race conditions with a
semaphore."""
def update_url_highlights_async(self, view):
UrlHighlighter.highlight_semaphore.acquire()
try:
self.update_url_highlights(view)
finally:
UrlHighlighter.highlight_semaphore.release()
"""Creates a set of regions from the intersection of urls and scopes,
underlines all of them."""
def highlight_urls(self, view, urls):
# We need separate regions for each lexical scope for ST to use a proper color for the underline
scope_map = {}
for url in urls:
scope_name = view.scope_name(url.a)
scope_map.setdefault(scope_name, []).append(url)
for scope_name in scope_map:
self.underline_regions(view, scope_name, scope_map[scope_name])
self.update_view_scopes(view, scope_map.keys())
"""Apply underlining with provided scope name to provided regions.
Uses the empty region underline hack for Sublime Text 2 and native
underlining for Sublime Text 3."""
def underline_regions(self, view, scope_name, regions):
if sublime.version() >= '3019':
# in Sublime Text 3, the regions are just underlined
view.add_regions(
u'clickable-urls ' + scope_name,
regions,
scope_name,
flags=sublime.DRAW_NO_FILL|sublime.DRAW_NO_OUTLINE|sublime.DRAW_SOLID_UNDERLINE)
else:
# in Sublime Text 2, the 'empty region underline' hack is used
char_regions = [sublime.Region(pos, pos) for region in regions for pos in range(region.a, region.b)]
view.add_regions(
u'clickable-urls ' + scope_name,
char_regions,
scope_name,
sublime.DRAW_EMPTY_AS_OVERWRITE)
"""Store new set of underlined scopes for view. Erase underlining from
scopes that were used but are not anymore."""
def update_view_scopes(self, view, new_scopes):
old_scopes = UrlHighlighter.scopes_for_view.get(view.id(), None)
if old_scopes:
unused_scopes = set(old_scopes) - set(new_scopes)
for unused_scope_name in unused_scopes:
view.erase_regions(u'clickable-urls ' + unused_scope_name)
UrlHighlighter.scopes_for_view[view.id()] = new_scopes
def open_url(url):
browser = sublime.load_settings(UrlHighlighter.SETTINGS_FILENAME).get('clickable_urls_browser')
try:
webbrowser.get(browser).open(url, autoraise=True)
except(webbrowser.Error):
sublime.error_message('Failed to open browser. See "Customizing the browser" in the README.')
class OpenUrlUnderCursorCommand(sublime_plugin.TextCommand):
def run(self, edit):
if self.view.id() in UrlHighlighter.urls_for_view:
selection = self.view.sel()[0]
if selection.empty():
selection = next((url for url in UrlHighlighter.urls_for_view[self.view.id()] if url.contains(selection)), None)
if not selection:
return
url = self.view.substr(selection)
open_url(url)
class OpenAllUrlsCommand(sublime_plugin.TextCommand):
def run(self, edit):
if self.view.id() in UrlHighlighter.urls_for_view:
for url in set([self.view.substr(url_region) for url_region in UrlHighlighter.urls_for_view[self.view.id()]]):
open_url(url)
| mit | -7,902,812,575,243,236,000 | 38.636986 | 128 | 0.632625 | false |
iniweb/deployCD | app/views.py | 1 | 4840 | from flask import render_template, flash, url_for, request, redirect, session
from flask_login import login_user, logout_user, current_user, login_required
from app import app, db, gitlab, login_manager
from forms import ProjectForm
from models import User, Project, ROLE_USER, ROLE_ADMIN
import copy
import ansible.runner
import ansible.inventory
import ansible.callbacks
import ansible.utils
@app.route('/')
@app.route('/projects')
@app.route('/projects/<int:page>')
def index(page=1):
if current_user.is_authenticated():
projects = Project.query.order_by(Project.deploy_at.desc(), Project.updated_at.desc()).paginate(page, 10, False)
return render_template('index.html', projects=projects)
return redirect(url_for('login'))
@app.route('/project/create', methods=["GET", "POST"])
@login_required
def project_create():
form = ProjectForm()
if form.validate_on_submit():
new_project = Project(
title=form.title.data,
branch=form.branch.data,
user_id=current_user.get_id(),
repo_url=form.repo_url.data
)
db.session.add(new_project)
db.session.commit()
flash('Project has been created successfully.', 'success')
return redirect(url_for('project', project_id=new_project.id))
return render_template('project/form.html', form=form, action_url=url_for('project_create'))
@app.route('/project/<int:project_id>/edit', methods=["GET", "POST"])
@login_required
def project_edit(project_id):
project = Project.query.filter_by(id=project_id).first_or_404()
form = ProjectForm(obj=project)
if request.method == 'POST' and form.validate():
form.populate_obj(project)
db.session.commit()
flash('Project has been updated successfully.', 'success')
return redirect(url_for('project', project_id=project.id))
return render_template('project/form.html', form=form, action_url=url_for('project_edit', project_id=project.id))
@app.route('/project/<int:project_id>')
@login_required
def project(project_id):
project = Project.query.filter_by(id=project_id).first_or_404()
return render_template('project/show.html', project=project)
@app.route('/project/<int:project_id>/servers')
@login_required
def project_servers(project_id):
project = Project.query.filter_by(id=project_id).first_or_404()
return render_template('servers/list.html', project=project)
@app.route('/project/<int:project_id>/deploy')
@login_required
def project_deploy(project_id):
project = Project.query.filter_by(id=project_id).first_or_404()
hosts = ["localhost"]
ansible.utils.VERBOSITY = 1
inventory = ansible.inventory.Inventory(hosts)
base_runner = ansible.runner.Runner(
pattern='all',
transport='local',
inventory=inventory,
# callbacks=runner_cb,
check=False,
background=1
)
runner = copy.copy(base_runner)
runner.module_name = 'git'
runner.module_args = '[email protected]:iniweb/ansible-vagrant-sf2.git'
result = runner.run()
print result
return render_template('project/deploy.html', project=project)
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/login/gitlab')
def login_gitlab():
if current_user.is_authenticated():
return redirect(url_for('index'))
return gitlab.authorize(callback=url_for('authorized', _external=True))
@app.route('/logout')
def logout():
logout_user()
session.pop('gitlab_token', None)
return redirect(url_for('index'))
@app.route('/oauth-authorized')
def authorized():
if current_user.is_authenticated():
return redirect(url_for('index'))
resp = gitlab.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error'],
request.args['error_description']
)
session['gitlab_token'] = (resp['access_token'], '')
me = gitlab.get('user')
user = User.query.filter_by(email=me.data['email']).first()
if not user:
role = ROLE_ADMIN if me.data['is_admin'] else ROLE_USER
user = User(
role=role,
email=me.data['email'],
avatar_url=me.data['avatar_url'],
enabled=True
)
db.session.add(user)
db.session.commit()
login_user(user, True)
return redirect(url_for('index'))
@app.errorhandler(404)
def not_found_error():
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error():
db.session.rollback()
return render_template('500.html'), 500
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
@gitlab.tokengetter
def get_gitlab_token():
return session.get('gitlab_token')
| mit | 3,487,567,261,366,403,000 | 26.816092 | 120 | 0.66157 | false |
394954369/horizon | openstack_dashboard/dashboards/project/images/images/forms.py | 1 | 11525 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing images.
"""
from django.conf import settings
from django.forms import ValidationError # noqa
from django.forms.widgets import HiddenInput # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard import policy
IMAGE_BACKEND_SETTINGS = getattr(settings, 'OPENSTACK_IMAGE_BACKEND', {})
IMAGE_FORMAT_CHOICES = IMAGE_BACKEND_SETTINGS.get('image_formats', [])
class CreateImageForm(forms.SelfHandlingForm):
name = forms.CharField(max_length="255", label=_("Name"))
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'class': 'modal-body-fixed-width'}),
label=_("Description"),
required=False)
source_type = forms.ChoiceField(
label=_('Image Source'),
required=False,
choices=[('url', _('Image Location')),
('file', _('Image File'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'source'}))
copy_from = forms.CharField(max_length="255",
label=_("Image Location"),
help_text=_("An external (HTTP) URL to load "
"the image from."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-url': _('Image Location'),
'ng-model': 'copyFrom',
'ng-change':
'selectImageFormat(copyFrom)'}),
required=False)
image_file = forms.FileField(label=_("Image File"),
help_text=_("A local image to upload."),
widget=forms.FileInput(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-file': _('Image File'),
'ng-model': 'imageFile',
'ng-change':
'selectImageFormat(imageFile.name)',
'image-file-on-change': None}),
required=False)
disk_format = forms.ChoiceField(label=_('Format'),
choices=[],
widget=forms.Select(attrs={
'class': 'switchable',
'ng-model': 'diskFormat'}))
architecture = forms.CharField(max_length="255", label=_("Architecture"),
required=False)
minimum_disk = forms.IntegerField(label=_("Minimum Disk (GB)"),
help_text=_('The minimum disk size'
' required to boot the'
' image. If unspecified, this'
' value defaults to 0'
' (no minimum).'),
required=False)
minimum_ram = forms.IntegerField(label=_("Minimum RAM (MB)"),
help_text=_('The minimum memory size'
' required to boot the'
' image. If unspecified, this'
' value defaults to 0 (no'
' minimum).'),
required=False)
is_public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def __init__(self, request, *args, **kwargs):
super(CreateImageForm, self).__init__(request, *args, **kwargs)
if (not settings.HORIZON_IMAGES_ALLOW_UPLOAD or
not policy.check((("image", "upload_image"),), request)):
self._hide_file_source_type()
if not policy.check((("image", "set_image_location"),), request):
self._hide_url_source_type()
if not policy.check((("image", "publicize_image"),), request):
self._hide_is_public()
self.fields['disk_format'].choices = IMAGE_FORMAT_CHOICES
def _hide_file_source_type(self):
self.fields['image_file'].widget = HiddenInput()
source_type = self.fields['source_type']
source_type.choices = [choice for choice in source_type.choices
if choice[0] != 'file']
if len(source_type.choices) == 1:
source_type.widget = HiddenInput()
def _hide_url_source_type(self):
self.fields['copy_from'].widget = HiddenInput()
source_type = self.fields['source_type']
source_type.choices = [choice for choice in source_type.choices
if choice[0] != 'url']
if len(source_type.choices) == 1:
source_type.widget = HiddenInput()
def _hide_is_public(self):
self.fields['is_public'].widget = HiddenInput()
self.fields['is_public'].initial = False
def clean(self):
data = super(CreateImageForm, self).clean()
# The image_file key can be missing based on particular upload
# conditions. Code defensively for it here...
image_file = data.get('image_file', None)
image_url = data.get('copy_from', None)
if not image_url and not image_file:
raise ValidationError(
_("A image or external image location must be specified."))
elif image_url and image_file:
raise ValidationError(
_("Can not specify both image and external image location."))
else:
return data
def handle(self, request, data):
# Glance does not really do anything with container_format at the
# moment. It requires it is set to the same disk_format for the three
# Amazon image types, otherwise it just treats them as 'bare.' As such
# we will just set that to be that here instead of bothering the user
# with asking them for information we can already determine.
if data['disk_format'] in ('ami', 'aki', 'ari',):
container_format = data['disk_format']
else:
container_format = 'bare'
meta = {'is_public': data['is_public'],
'protected': data['protected'],
'disk_format': data['disk_format'],
'container_format': container_format,
'min_disk': (data['minimum_disk'] or 0),
'min_ram': (data['minimum_ram'] or 0),
'name': data['name'],
'properties': {}}
if data['description']:
meta['properties']['description'] = data['description']
if data['architecture']:
meta['properties']['architecture'] = data['architecture']
if (settings.HORIZON_IMAGES_ALLOW_UPLOAD and
policy.check((("image", "upload_image"),), request) and
data.get('image_file', None)):
meta['data'] = self.files['image_file']
else:
meta['copy_from'] = data['copy_from']
try:
image = api.glance.image_create(request, **meta)
messages.success(request,
_('Your image %s has been queued for creation.') %
data['name'])
return image
except Exception:
exceptions.handle(request, _('Unable to create new image.'))
class UpdateImageForm(forms.SelfHandlingForm):
image_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(max_length="255", label=_("Name"))
description = forms.CharField(
widget=forms.widgets.Textarea(),
label=_("Description"),
required=False,
)
kernel = forms.CharField(
max_length="36",
label=_("Kernel ID"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
ramdisk = forms.CharField(
max_length="36",
label=_("Ramdisk ID"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
architecture = forms.CharField(
label=_("Architecture"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
disk_format = forms.ChoiceField(
label=_("Format"),
)
public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def __init__(self, request, *args, **kwargs):
super(UpdateImageForm, self).__init__(request, *args, **kwargs)
self.fields['disk_format'].choices = [(value, name) for value,
name in IMAGE_FORMAT_CHOICES
if value]
if not policy.check((("image", "publicize_image"),), request):
self.fields['public'].widget = forms.CheckboxInput(
attrs={'readonly': 'readonly'})
def handle(self, request, data):
image_id = data['image_id']
error_updating = _('Unable to update image "%s".')
if data['disk_format'] in ['aki', 'ari', 'ami']:
container_format = data['disk_format']
else:
container_format = 'bare'
meta = {'is_public': data['public'],
'protected': data['protected'],
'disk_format': data['disk_format'],
'container_format': container_format,
'name': data['name'],
'properties': {'description': data['description']}}
if data['kernel']:
meta['properties']['kernel_id'] = data['kernel']
if data['ramdisk']:
meta['properties']['ramdisk_id'] = data['ramdisk']
if data['architecture']:
meta['properties']['architecture'] = data['architecture']
# Ensure we do not delete properties that have already been
# set on an image.
meta['purge_props'] = False
try:
image = api.glance.image_update(request, image_id, **meta)
messages.success(request, _('Image was successfully updated.'))
return image
except Exception:
exceptions.handle(request, error_updating % image_id)
| apache-2.0 | 7,588,015,271,839,535,000 | 43.157088 | 78 | 0.531453 | false |
alexpeattie/wethepeopletoolkit | wethepeopletoolkit/clusterer.py | 1 | 2828 | import pandas
import numpy as np
import click
from bitstring import BitArray
from base58 import b58encode_int, b58decode_int
class Clusterer:
def __init__(self):
pass
def cluster(self, n, state_processor, pca = False, model_type = 'kmeans', z_score_exclude = 0.0, seed = None, quiet = False):
from sklearn.cluster import FeatureAgglomeration, KMeans, SpectralClustering
from scipy import stats
model_types = {
'feature-agglomeration': FeatureAgglomeration,
'kmeans': KMeans,
'spectral': SpectralClustering,
}
states = state_processor.states(two_d = pca)
excluded_states, labels = [], []
if z_score_exclude > 0:
if not model_type == 'kmeans':
raise click.UsageError("--z-score-exclude can only be used when --model-type is 'kmeans'")
states_2d = state_processor.states(two_d = True)
excluded_states = states[-(np.abs(stats.zscore(states_2d)) < z_score_exclude).all(axis=1)]
states = states[(np.abs(stats.zscore(states_2d)) < z_score_exclude).all(axis=1)]
seed = seed or np.random.randint(0, 10 ** 6)
np.random.seed(seed)
if not quiet:
click.echo("Clustering with seed %d..." % seed)
self.model = model_types[model_type](n_clusters = n)
self.data = states.as_matrix()
self.model.fit(self.data)
labels = self.model.labels_
self.results = pandas.DataFrame([states.index, self.model.labels_]).T.sort_values(by=0)
if any(excluded_states):
excluded_results = pandas.DataFrame([excluded_states.index, self.model.predict(excluded_states)]).T
self.results = pandas.DataFrame(np.concatenate([self.results, excluded_results]))
def cluster_ids(self):
labels = self.results[1]
sorted_labels = sorted(labels.unique())
ids = map(lambda l: b58encode_int(BitArray((labels == l).astype(int).tolist()).uint), sorted_labels)
return zip(sorted_labels, ids)
def cluster_id_to_states(self, cluster_id):
states = np.array(['AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NM', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WA', 'WV', 'WI', 'WY'])
return states[list(BitArray(uint = b58decode_int(cluster_id), length = 50))]
def evaluate(self, metric, distance = None):
from sklearn.metrics import silhouette_score, calinski_harabaz_score
if metric == 'silhouette':
return silhouette_score(self.data, self.model.labels_, metric = distance)
if metric == 'calinski_harabaz':
return calinski_harabaz_score(self.data, self.model.labels_)
if metric == 'inertia':
return self.model.inertia_
def results_dict(self):
return self.results.set_index(0)[1].to_dict() | mit | -3,378,057,671,160,845,000 | 41.223881 | 323 | 0.642857 | false |
cschnei3/forseti-security | tests/enforcer/enforcer_test.py | 1 | 4766 | #!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for google.cloud.security.enforcer.enforcer."""
import copy
import json
import httplib2
import mock
import testing_constants as constants
from tests.unittest_utils import ForsetiTestCase
from google.protobuf import text_format
from tests.unittest_utils import get_datafile_path
from google.cloud.security.enforcer import enforcer_log_pb2
from google.cloud.security.enforcer import enforcer
# Used anywhere a real timestamp could be generated to ensure consistent
# comparisons in tests
MOCK_TIMESTAMP = 1234567890
class EnforcerTest(ForsetiTestCase):
"""Extended unit tests for BatchFirewallEnforcer class."""
def setUp(self):
"""Set up."""
self.mock_compute = mock.patch.object(enforcer.batch_enforcer.compute,
'ComputeClient').start()
self.gce_service = self.mock_compute().service
self.gce_service.networks().list().execute.return_value = (
constants.SAMPLE_TEST_NETWORK_SELFLINK)
self.project = constants.TEST_PROJECT
self.mock_time = mock.patch.object(enforcer.batch_enforcer.datelib,
'Timestamp').start()
self.mock_time.now().AsMicroTimestamp.return_value = MOCK_TIMESTAMP
self.mock_time.now().AsSecondsSinceEpoch.return_value = MOCK_TIMESTAMP
self.enforcer = enforcer.initialize_batch_enforcer(
{},
concurrent_threads=1,
max_write_threads=1,
max_running_operations=0,
dry_run=True)
self.expected_summary = (
enforcer_log_pb2.BatchResult(
batch_id=MOCK_TIMESTAMP,
timestamp_start_msec=MOCK_TIMESTAMP,
timestamp_end_msec=MOCK_TIMESTAMP))
self.addCleanup(mock.patch.stopall)
def test_enforce_single_project(self):
"""Verifies enforce_single_project returns the correct results.
Setup:
* Set API calls to return the different firewall rules from the new
policy on the first call, and the expected new firewall rules on the
second call.
* Load a mock policy file.
* Create a temporary directory for writing the dremel recordio table out
to.
* Send the policy and project to EnforceSingleProject.
Expected Results:
* The results proto returned matches the expected results.
"""
self.gce_service.firewalls().list().execute.side_effect = [
constants.DEFAULT_FIREWALL_API_RESPONSE,
constants.EXPECTED_FIREWALL_API_RESPONSE]
policy_filename = get_datafile_path(__file__, 'sample_policy.json')
results = enforcer.enforce_single_project(self.enforcer, self.project,
policy_filename)
self.expected_summary.projects_total = 1
self.expected_summary.projects_success = 1
self.expected_summary.projects_changed = 1
self.expected_summary.projects_unchanged = 0
self.assertEqual(self.expected_summary, results.summary)
expected_results = enforcer_log_pb2.ProjectResult()
text_format.Merge(constants.SAMPLE_ENFORCER_PROJECTRESULTS_ASCIIPB,
expected_results)
expected_results.run_context = enforcer_log_pb2.ENFORCER_ONE_PROJECT
expected_results.gce_firewall_enforcement.policy_path = policy_filename
project_result = results.results[0]
self.assertEqual(expected_results, project_result)
def test_enforcer_raises_exception_with_invalid_json_policy(self):
"""Verifies json parsed correct as a list of dictionaries.
Setup:
* Load an invalid json file (no list).
* Give it to enforcer to parse and load
Expected Results:
* Enforcer should raise InvalidParsedPolicyFileError
"""
policy_filename = get_datafile_path(__file__, 'invalid_sample_policy.json')
with self.assertRaises(enforcer.InvalidParsedPolicyFileError) as r:
enforcer.enforce_single_project(
self.enforcer, self.project, policy_filename)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -2,825,034,318,722,512,400 | 35.945736 | 83 | 0.671842 | false |
bkidwell/wshare | wshare.py | 1 | 4231 | # See https://github.com/bkidwell/wshare
import argparse
import subprocess
import re
import sys
from wshare_config import config
def getConnDict():
txt = subprocess.getoutput('net use')
if re.search(r'There are no entries in the list\.', txt):
return dict()
match = re.search(r'--------.*\n([\w\W]*?)The command completed', txt)
if match is None:
print("Can't parse 'net use' output.")
sys.exit()
data = match.group(1).split('\n')
data = [row for row in data if not re.match('^ ', row)]
data = [re.split(r' +', row) for row in data]
result = dict()
for row in data:
if len(row) < 2: continue
if re.match(r'\w:', row[1]):
result[(row[1] + row[2]).lower()] = {
'drive_letter': row[1],
'path': row[2],
'username': None,
'password': None,
'status': row[0],
'in_conf': False,
}
else:
result[row[1].lower()] = {
'drive_letter': None,
'path': row[1],
'username': None,
'password': None,
'status': row[0],
'in_conf': False,
}
return result
def getAll():
conns = getConnDict()
for key, value in config.items():
if value['drive_letter']:
value['drive_letter'] = value['drive_letter'][0].upper() + ':'
path = value['path'].replace('/', '\\')
skey = (value['drive_letter'] + path if value['drive_letter'] else path).lower()
value['username'] = value['username'].replace('/', '\\')
if skey in conns:
conn = conns[skey]
conn['username'] = value['username']
conn['password'] = value['password'] if 'password' in value else ''
conn['drive_letter'] = conn['drive_letter'] or value['drive_letter']
conn['in_conf'] = key
else:
value['path'] = path
value['in_conf'] = key
value['status'] = 'Not connected'
conns[path.lower()] = value
conns = [conns[key] for key in sorted(conns.keys())]
return conns
def printStatus(connList):
i = 0
for conn in connList:
i += 1
if conn['in_conf']:
print(str(i) + ' [' + conn['in_conf'] + ']:: ' + (conn['drive_letter'] or '') + ' ' + conn['path'])
else:
print(':: ' + (conn['drive_letter'] or '') + ' ' + conn['path'] + ' (not in config)')
print(' ' + str(conn['status']))
def main(sel):
conns = getAll()
if sel is None:
print('\nNetwork shares:')
print('')
printStatus(conns)
print('')
num = input('Reconnect which share number or name? (ENTER to quit) ')
print('')
else:
num = sel
if num == '' or num == '0': return False
conn = None
for value in conns:
if value['in_conf'] and value['in_conf'] == num:
conn = value
if conn is None:
try:
num = int(num)
conn = conns[num - 1]
except:
print('Bad number or name.')
if sel: return False
else: return True
if sel:
print('Reconnecting ' + sel + '...')
if conn['drive_letter']:
subprocess.getoutput('net use ' + conn['drive_letter'] + ' /delete')
subprocess.getoutput('net use ' + conn['path'] + ' /delete')
if not 'password' in conn: conn['password'] = ''
p = ' "' + conn['password'] + '"' if conn['password'] else ''
subprocess.call(
'net use ' +
(conn['drive_letter'] if conn['drive_letter'] else '') + ' ' +
conn['path'] + ' ' +
'/user:' + conn['username'] + p
)
if not sel is None:
input('Press ENTER to continue.')
return False
else:
return True
parser = argparse.ArgumentParser(description='List Windows File Sharing shares and reconnect bookmarks.')
parser.add_argument(
'selection', metavar='NAME', type=str, nargs='?',
help='The name of the bookmark from wshare_config.py to reconnect'
)
args = parser.parse_args()
while True:
if not main(args.selection): break
| mit | 739,811,618,023,276,800 | 30.81203 | 111 | 0.509572 | false |
wasim21k/pihome | cron/port_scanner.py | 1 | 1392 | #!/usr/bin/env python
#TheZero
#This code is under Public Domain
#ref: https://gist.github.com/TheZ3ro/7255052
from threading import Thread
import socket
import os, re, time, sys, subprocess
class bc:
HEADER = '\033[0;36;40m'
ENDC = '\033[0m'
SUB = '\033[3;30;45m'
WARN = '\033[0;31;40m'
GREEN = '\033[0;32;40m'
org = '\033[91m'
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('google.com', 0))
ip = s.getsockname()[0]
#Get the Local IP
end = re.search('^[\d]{1,3}.[\d]{1,3}.[\d]{1,3}.[\d]{1,3}', ip)
#Chop down the last IP Digits
create_ip = re.search('^[\d]{1,3}.[\d]{1,3}.[\d]{1,3}.', ip)
print "PiHome IP Address: "+bc.GREEN+str(end.group(0))+bc.ENDC
host = str(end.group(0))
host = '192.168.99.5'
from_port = 5000
to_port = 5005
#host = raw_input('host > ')
#from_port = input('start scan from port > ')
#to_port = input('finish scan to port > ')
counting_open = []
counting_close = []
threads = []
def scan(port):
s = socket.socket()
result = s.connect_ex((host,port))
print('working on port > '+(str(port)))
if result == 0:
counting_open.append(port)
print((str(port))+' -> open')
s.close()
else:
counting_close.append(port)
#print((str(port))+' -> close')
s.close()
for i in range(from_port, to_port+1):
t = Thread(target=scan, args=(i,))
threads.append(t)
t.start()
#[x.join() for x in threads]
print(counting_open)
| gpl-3.0 | -1,480,537,210,011,121,200 | 21.819672 | 63 | 0.623563 | false |
OpenDMM/bitbake | lib/bb/server/xmlrpc.py | 1 | 13971 | #
# BitBake XMLRPC Server
#
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
# Copyright (C) 2006 - 2008 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
This module implements an xmlrpc server for BitBake.
Use this by deriving a class from BitBakeXMLRPCServer and then adding
methods which you want to "export" via XMLRPC. If the methods have the
prefix xmlrpc_, then registering those function will happen automatically,
if not, you need to call register_function.
Use register_idle_function() to add a function which the xmlrpc server
calls from within server_forever when no requests are pending. Make sure
that those functions are non-blocking or else you will introduce latency
in the server's main loop.
"""
import bb
import xmlrpclib, sys
from bb import daemonize
from bb.ui import uievent
import hashlib, time
import socket
import os, signal
import threading
try:
import cPickle as pickle
except ImportError:
import pickle
DEBUG = False
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import inspect, select, httplib
from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer
class BBTransport(xmlrpclib.Transport):
def __init__(self, timeout):
self.timeout = timeout
self.connection_token = None
xmlrpclib.Transport.__init__(self)
# Modified from default to pass timeout to HTTPConnection
def make_connection(self, host):
#return an existing connection if possible. This allows
#HTTP/1.1 keep-alive.
if self._connection and host == self._connection[0]:
return self._connection[1]
# create a HTTP connection object from a host descriptor
chost, self._extra_headers, x509 = self.get_host_info(host)
#store the host argument along with the connection object
self._connection = host, httplib.HTTPConnection(chost, timeout=self.timeout)
return self._connection[1]
def set_connection_token(self, token):
self.connection_token = token
def send_content(self, h, body):
if self.connection_token:
h.putheader("Bitbake-token", self.connection_token)
xmlrpclib.Transport.send_content(self, h, body)
def _create_server(host, port, timeout = 60):
t = BBTransport(timeout)
s = xmlrpclib.Server("http://%s:%d/" % (host, port), transport=t, allow_none=True)
return s, t
class BitBakeServerCommands():
def __init__(self, server):
self.server = server
self.has_client = False
def registerEventHandler(self, host, port, featureset = []):
"""
Register a remote UI Event Handler
"""
s, t = _create_server(host, port)
# we don't allow connections if the cooker is running
if (self.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
return None
original_featureset = list(self.cooker.featureset)
for f in featureset:
self.cooker.featureset.setFeature(f)
if (original_featureset != list(self.cooker.featureset)):
self.cooker.reset()
self.event_handle = bb.event.register_UIHhandler(s)
return self.event_handle
def unregisterEventHandler(self, handlerNum):
"""
Unregister a remote UI Event Handler
"""
return bb.event.unregister_UIHhandler(handlerNum)
def runCommand(self, command):
"""
Run a cooker command on the server
"""
return self.cooker.command.runCommand(command, self.server.readonly)
def getEventHandle(self):
return self.event_handle
def terminateServer(self):
"""
Trigger the server to quit
"""
self.server.quit = True
print("Server (cooker) exiting")
return
def addClient(self):
if self.has_client:
return None
token = hashlib.md5(str(time.time())).hexdigest()
self.server.set_connection_token(token)
self.has_client = True
return token
def removeClient(self):
if self.has_client:
self.server.set_connection_token(None)
self.has_client = False
if self.server.single_use:
self.server.quit = True
# This request handler checks if the request has a "Bitbake-token" header
# field (this comes from the client side) and compares it with its internal
# "Bitbake-token" field (this comes from the server). If the two are not
# equal, it is assumed that a client is trying to connect to the server
# while another client is connected to the server. In this case, a 503 error
# ("service unavailable") is returned to the client.
class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
def __init__(self, request, client_address, server):
self.server = server
SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server)
def do_POST(self):
try:
remote_token = self.headers["Bitbake-token"]
except:
remote_token = None
if remote_token != self.server.connection_token and remote_token != "observer":
self.report_503()
else:
if remote_token == "observer":
self.server.readonly = True
else:
self.server.readonly = False
SimpleXMLRPCRequestHandler.do_POST(self)
def report_503(self):
self.send_response(503)
response = 'No more client allowed'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class XMLRPCProxyServer(BaseImplServer):
""" not a real working server, but a stub for a proxy server connection
"""
def __init__(self, host, port):
self.host = host
self.port = port
class XMLRPCServer(SimpleXMLRPCServer, BaseImplServer):
# remove this when you're done with debugging
# allow_reuse_address = True
def __init__(self, interface):
"""
Constructor
"""
BaseImplServer.__init__(self)
if (interface[1] == 0): # anonymous port, not getting reused
self.single_use = True
# Use auto port configuration
if (interface[1] == -1):
interface = (interface[0], 0)
SimpleXMLRPCServer.__init__(self, interface,
requestHandler=BitBakeXMLRPCRequestHandler,
logRequests=False, allow_none=True)
self.host, self.port = self.socket.getsockname()
self.connection_token = None
#self.register_introspection_functions()
self.commands = BitBakeServerCommands(self)
self.autoregister_all_functions(self.commands, "")
self.interface = interface
self.single_use = False
def addcooker(self, cooker):
BaseImplServer.addcooker(self, cooker)
self.commands.cooker = cooker
def autoregister_all_functions(self, context, prefix):
"""
Convenience method for registering all functions in the scope
of this class that start with a common prefix
"""
methodlist = inspect.getmembers(context, inspect.ismethod)
for name, method in methodlist:
if name.startswith(prefix):
self.register_function(method, name[len(prefix):])
def serve_forever(self):
# Start the actual XMLRPC server
bb.cooker.server_main(self.cooker, self._serve_forever)
def _serve_forever(self):
"""
Serve Requests. Overloaded to honor a quit command
"""
self.quit = False
while not self.quit:
fds = [self]
nextsleep = 0.1
for function, data in self._idlefuns.items():
try:
retval = function(self, data, False)
if retval is False:
del self._idlefuns[function]
elif retval is True:
nextsleep = 0
else:
fds = fds + retval
except SystemExit:
raise
except:
import traceback
traceback.print_exc()
pass
socktimeout = self.socket.gettimeout() or nextsleep
socktimeout = min(socktimeout, nextsleep)
# Mirror what BaseServer handle_request would do
fd_sets = select.select(fds, [], [], socktimeout)
if fd_sets[0] and self in fd_sets[0]:
self._handle_request_noblock()
# Tell idle functions we're exiting
for function, data in self._idlefuns.items():
try:
retval = function(self, data, True)
except:
pass
self.server_close()
return
def set_connection_token(self, token):
self.connection_token = token
class BitBakeXMLRPCServerConnection(BitBakeBaseServerConnection):
def __init__(self, serverImpl, clientinfo=("localhost", 0), observer_only = False, featureset = []):
self.connection, self.transport = _create_server(serverImpl.host, serverImpl.port)
self.clientinfo = clientinfo
self.serverImpl = serverImpl
self.observer_only = observer_only
self.featureset = featureset
def connect(self):
if not self.observer_only:
token = self.connection.addClient()
else:
token = "observer"
if token is None:
return None
self.transport.set_connection_token(token)
self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo, self.featureset)
for event in bb.event.ui_queue:
self.events.queue_event(event)
return self
def removeClient(self):
if not self.observer_only:
self.connection.removeClient()
def terminate(self):
# Don't wait for server indefinitely
import socket
socket.setdefaulttimeout(2)
try:
self.events.system_quit()
except:
pass
try:
self.connection.removeClient()
except:
pass
class BitBakeServer(BitBakeBaseServer):
def initServer(self, interface = ("localhost", 0)):
self.interface = interface
self.serverImpl = XMLRPCServer(interface)
def detach(self):
daemonize.createDaemon(self.serverImpl.serve_forever, "bitbake-cookerdaemon.log")
del self.cooker
def establishConnection(self, featureset):
self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, self.interface, False, featureset)
return self.connection.connect()
def set_connection_token(self, token):
self.connection.transport.set_connection_token(token)
class BitBakeXMLRPCClient(BitBakeBaseServer):
def __init__(self, observer_only = False):
self.observer_only = observer_only
# if we need extra caches, just tell the server to load them all
pass
def saveConnectionDetails(self, remote):
self.remote = remote
def saveConnectionConfigParams(self, configParams):
self.configParams = configParams
def establishConnection(self, featureset):
# The format of "remote" must be "server:port"
try:
[host, port] = self.remote.split(":")
port = int(port)
except Exception as e:
bb.fatal("Failed to read remote definition (%s)" % str(e))
# use automatic port if port set to -1, meaning read it from
# the bitbake.lock file
if port == -1:
lock_location = "%s/bitbake.lock" % self.configParams.environment.get('BUILDDIR')
lock = bb.utils.lockfile(lock_location, False, False)
if lock:
# This means there is no server running which we can
# connect to on the local system.
bb.utils.unlockfile(lock)
return None
try:
lf = open(lock_location, 'r')
remotedef = lf.readline()
[host, port] = remotedef.split(":")
port = int(port)
lf.close()
self.remote = remotedef
except Exception as e:
bb.fatal("Failed to read bitbake.lock (%s)" % str(e))
# We need our IP for the server connection. We get the IP
# by trying to connect with the server
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((host, port))
ip = s.getsockname()[0]
s.close()
except Exception as e:
bb.fatal("Could not create socket for %s:%s (%s)" % (host, port, str(e)))
try:
self.serverImpl = XMLRPCProxyServer(host, port)
self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, (ip, 0), self.observer_only, featureset)
return self.connection.connect()
except Exception as e:
bb.fatal("Could not connect to server at %s:%s (%s)" % (host, port, str(e)))
def endSession(self):
self.connection.removeClient()
| gpl-2.0 | 7,607,331,998,244,826,000 | 34.549618 | 117 | 0.620643 | false |
datawire/quark | quarkc/backend.py | 1 | 34267 | # Copyright 2015 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os, types, tempfile, logging, inspect
import json
from collections import OrderedDict
from . import java, python, javascript, ruby, shell
from .ast import (
AST, Method, Class, Function, Package, File, Dependency, Interface, Primitive,
Macro, Field, Type, TypeParam, Import, Local, ExprStmt,
Assign, If, Return, While, Break, Continue, Var, Call, String, Number,
Bool, List, Map, Name, Null, Native, NativeCase, Fixed, Attr, Cast,
Param, Declaration, Super, Expression,
)
from .compiler import texpr, TypeExpr
from .constants import (BUILTIN, BUILTIN_FILE, REFLECT)
from .dispatch import overload
from .helpers import (
is_meta, has_super, compiled_quark, is_newer, namever, mdroot, readme,
base_type, get_defaulted_methods, is_abstract, base_constructors, doc,
get_field, constructors, get_defaulted_statics
)
from quarkc import reflection
class FakeExpr(object): pass
class Backend(object):
PRETTY_INSTALL = "TBD"
ext = None
gen = None
def __init__(self, include_stdlib=False):
self.include_stdlib = include_stdlib
self.files = OrderedDict()
self._imports = OrderedDict()
self.current_file = None
self.current_package = None
self.packages = []
self.definitions = []
self.names = []
self.bindings = None
self.entry = None
self.dist = None
self.root = None
self.roots = None
self.dependencies = OrderedDict()
self.log = logging.getLogger("quark.compile")
def install(self, offline):
cls = self.__class__.__name__
pkg = self.packages[0].name
target = self.install_target()
if os.path.exists(self.root.url):
deps = (compiled_quark(self.root.url),)
else:
deps = ()
modified = getattr(self.root, "_modified", False)
newer = is_newer(target,
__file__, inspect.getsourcefile(self.gen), *deps)
uptodate = not modified and bool(newer)
# F F T T
# F F T F
# T T F T
# F T F F
self.log.debug("Uptodate: %s, Modified %s, Newer: %s",
uptodate, modified, newer)
if uptodate:
self.log.debug("Skipping %s for %s[%s]", cls, pkg, target)
return
self.log.debug("Emitting generated %s for %s", cls, pkg)
dir = tempfile.mkdtemp(suffix="-%s" % cls,
prefix="%s-" % pkg)
self.write(dir)
quark_pkg = "quark"
if offline:
# XXX: while use` of external packages is private...
if str(pkg) == quark_pkg:
offline = False
mode = "online, automatic"
else:
mode = "offline"
else:
mode = "online, selected by user"
self.log.info("Installing %s %s with %s (%s)",
cls, repr(pkg), self.PRETTY_INSTALL, mode)
self.install_command(dir, offline)
self.root._modified = False
def visit_Root(self, r):
self.root = r
def visit_DistUnit(self, du):
self.dist = du
def visit_Dependency(self, dep):
self.dependencies["%s:%s.%s-%s" % (dep.lang, dep.group, dep.artifact, dep.version)] = dep
def visit_Use(self, use):
# XXX This is *probably* not a bug, but fact self.roots starts as None
# isn't great...
entry = self.roots[use.qualified].files[0] # pylint: disable=unsubscriptable-object
name, ver = namever(entry)
self.dependencies[name] = entry
def visit_File(self, file):
if not self.entry and not is_meta(file):
self.entry = file
def visit_Class(self, cls):
self.definitions.append(cls)
def visit_Primitive(self, p):
pass
def visit_Function(self, f):
if not isinstance(f, Method):
self.definitions.append(f)
def visit_Package(self, p):
self.packages.append(p)
self.definitions.append(p)
def add_native_includes(self, code):
if self.entry.name.endswith(BUILTIN_FILE):
return code
du_name, _ = namever(self.entry)
includes = []
for path, content in self.entry.root.included.items():
if path.endswith(self.ext):
includes.append(self.gen.native_include(path, du_name))
code.head += "".join(includes)
return code
def setfile(self, fname, maker):
self.current_file = fname
if fname not in self._imports:
self._imports[fname] = OrderedDict()
if fname not in self.files:
self.files[fname] = self.add_native_includes(maker())
return False
return True
def leave_Root(self, r):
if self.dist:
self.entry = self.dist.file
self.mdpkg, cleanup = reflection.reflect(r, self)
self.main = None
for d in self.definitions:
fname = self.file(d)
if fname is None:
continue
self.current_package = d.package
if self.setfile(fname, lambda _d=d: self.make_file(_d)):
self.files[fname] += "\n"
dfn_code = self.definition(d)
if dfn_code and d.package is None and d.file.name.endswith(BUILTIN_FILE):
self.files[fname] += self.gen.comment("BEGIN_BUILTIN") + "\n"
self.files[fname] += dfn_code
self.files[fname] += "\n" + self.gen.comment("END_BUILTIN")
else:
self.files[fname] += dfn_code
cleanup()
if self.main:
self.genmain()
for name in self.files:
code = self.files[name]
# XXX: this is a hack to avoid circularly dependent
# imports for generated metadata. To fix this properly, we
# really need to change the import model for python and js
# to import classes on demand at the point of use rather
# than into the module/package level scope.
raw_imports = self._imports[name].keys()
refimps = filter(lambda x: x[0] == (BUILTIN, REFLECT), raw_imports)
imports = filter(lambda x: x[0] != (BUILTIN, REFLECT), raw_imports)
mdimps = filter(lambda x: x[0][0].endswith("_md"), imports)
imports = filter(lambda x: not x[0][0].endswith("_md"), imports)
if name.split("/")[0].endswith("_md"):
headimps = self.genimps(refimps)
tailimps = self.genimps(imports + mdimps, lazy=True)
else:
headimps = self.genimps(refimps + imports)
tailimps = self.genimps(mdimps, lazy=True)
if headimps: code.head += headimps + "\n\n"
if tailimps: code.tail = "\n\n" + tailimps + "\n\n" + code.tail
content = str(code)
if content[-1:] != "\n": content += "\n"
self.files[name] = content
def genmain(self):
self.current_package = None
name, ver = namever(self.entry)
fname = self.gen.main_file(self.gen.name(name))
self.setfile(fname, lambda: self.gen.make_main_file(self.gen.name(name)))
path = self.add_import(self.main)
self.files[fname] += self.gen.main(path, self.name(self.main.name))
def genimps(self, imps, lazy=False):
seen = set()
imps = [self.gen.import_(pkg, org, dep, seen=seen, lazy=lazy) for (pkg, org, dep) in imps]
return "\n".join(filter(lambda x: x is not None, imps))
@overload(AST)
def add_import(self, obj):
return self.add_import(tuple(self.package(obj)), obj.root, obj.file)
@overload(list)
def add_import(self, pkg, root, file):
return self.add_import(tuple(pkg), root, file)
@overload(tuple)
def add_import(self, pkg, root, file):
imports = self._imports[self.current_file]
if self.current_package:
org = tuple(self.package(self.current_package))
else:
org = ()
if pkg != org:
if root != self.root:
dep, ver = namever(file or root)
else:
dep = None
imports[(pkg, org, dep)] = True
return list(self.qualify(pkg, org))
@overload(Class)
def file(self, cls):
return self.gen.class_file(self.package(cls), self.name(cls.name), self.fname(cls))
@overload(Function)
def file(self, fun):
return self.gen.function_file(self.package(fun), self.name(fun.name), self.fname(fun))
@overload(Package)
def file(self, pkg):
return self.gen.package_file(self.package(pkg.package), self.name(pkg.name), self.fname(pkg))
def fname(self, obj):
return os.path.splitext(os.path.basename(obj.file.name))[0]
@property
def rtloc(self):
if self.include_stdlib:
rtloc, _ = namever(self.entry)
else:
rtloc = BUILTIN
return rtloc
@overload(Class)
def make_file(self, cls):
return self.gen.make_class_file(self.package(cls), self.name(cls.name), rtloc=self.rtloc)
@overload(Function)
def make_file(self, fun):
return self.gen.make_function_file(self.package(fun), self.name(fun.name), mdroot(self.entry))
@overload(Package)
def make_file(self, pkg):
return self.gen.make_package_file(self.package(pkg.package), self.name(pkg.name), rtloc = self.rtloc)
def write(self, target):
if not os.path.exists(target):
os.makedirs(target)
name, version = namever(self.entry)
packages = OrderedDict()
for pkg in self.packages:
lines = []
readme(pkg, lines)
packages[tuple(self.package(pkg))] = "\n".join(lines)
packages[tuple(self.mdpkg)] = "## Root\n"
files_to_emit = OrderedDict(self.files)
for path, content in self.entry.root.included.items():
if path.endswith(self.ext):
files_to_emit[path] = content
deps = [] # List of (group, artifact, version)
for dep in self.dependencies.values():
if isinstance(dep, File):
dep_name, dep_ver = namever(dep)
deps.append((dep_name, dep_name, dep_ver))
elif isinstance(dep, Dependency):
if dep.lang.text == self.ext:
deps.append((dep.group, dep.artifact, dep.version))
else:
assert False, (dep, type(dep))
files = self.gen.package(name, version, packages, files_to_emit, deps)
for name, content in files.items():
path = os.path.join(target, name)
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
with open(path, "wb") as fd:
fd.write(content)
self.log.debug(" wrote %s", path)
@overload(Package)
def definition(self, pkg):
self.current_package = pkg
for d in pkg.definitions:
if isinstance(d, Package):
self.add_import(d)
return "" # self.doc(pkg)
def is_entry_package(self, pkg):
name, ver = namever(pkg)
return pkg.name.text == name
@overload(Function)
def definition(self, fun):
if fun.body is None: return ""
prolog = ""
if fun.name.text == "main" and len(fun.params) == 1 and \
fun.params[0].resolved.type.name.text == "List":
if fun.file == self.root.files[0] and \
self.is_entry_package(fun.package):
self.main = fun
prolog = self.gen.main_prolog()
return prolog + self.gen.function(self.doc(fun),
self.type(fun.type),
self.name(fun.name),
[self.param(p) for p in fun.params],
self.block(fun.body))
@overload(Class)
def definition(self, cls):
clazz = self.name(cls.name)
parameters = [self.name(p.name) for p in cls.parameters]
base = self.type(base_type(cls))
interfaces = [self.type(t) for t in cls.bases
if isinstance(t.resolved.type, (Interface, Primitive))]
static_fields = []
fields = []
methods = []
constructors = []
defaulted, self.bindings = get_defaulted_methods(cls)
defaulted_statics = get_defaulted_statics(cls)
for d in cls.definitions + [None] + defaulted.values() + defaulted_statics.values():
if isinstance(d, Macro): continue
if d is None:
extra_methods = getattr(cls, "_extra_methods", None)
if extra_methods:
methods.extend(extra_methods())
del cls._extra_methods
extra_statics = getattr(cls, "_extra_statics", None)
if extra_statics:
static_fields.extend(extra_statics())
del cls._extra_statics
continue
doc = self.doc(d)
if isinstance(d, Field):
fun = self.gen.static_field if d.static else self.gen.field
holder = static_fields if d.static else fields
holder.append(fun(doc,
clazz,
self.type(d.type),
self.name(d.name),
self.expr(d.value)))
elif d.type:
if d.body:
fun = self.gen.static_method if d.static else self.gen.method
methods.append(fun(doc,
clazz,
self.type(d.type),
self.name(d.name),
[self.param(p) for p in d.params],
self.block(d.body)))
else:
methods.append(self.gen.abstract_method(doc,
clazz,
self.type(d.type),
self.name(d.name),
[self.param(p) for p in d.params]))
else:
if base and not has_super(d):
header = [self.gen.expr_stmt(self.gen.invoke_super(clazz,
self.name(base_type(cls).resolved.type.name),
[]))]
elif not base:
finit = self.gen.field_init()
if finit:
header = [finit]
else:
header = None
else:
header = None
constructors.append(self.gen.constructor(doc,
clazz,
[self.param(p) for p in d.params],
self.block(d.body, header)))
if not constructors:
constructors = self.default_constructors(cls)
return self.gen.clazz(self.doc(cls), is_abstract(cls), clazz, parameters, base,
interfaces, static_fields, fields, constructors, methods)
@overload(Interface)
def definition(self, iface):
name = self.name(iface.name)
parameters = [self.name(p.name) for p in iface.parameters]
bases = [self.type(t) for t in iface.bases]
methods = []
static_fields = []
for d in iface.definitions + [None]:
if d is None:
extra_methods = getattr(iface, "_extra_methods", None)
if extra_methods:
methods.extend(extra_methods())
del iface._extra_methods
extra_statics = getattr(iface, "_extra_statics", None)
if extra_statics:
static_fields.extend(extra_statics())
del iface._extra_statics
if isinstance(d, Field) and d.static:
static_fields.append(self.gen.static_field(self.doc(d),
name,
self.type(d.type),
self.name(d.name),
self.expr(d.value)))
elif isinstance(d, Method):
methods.append(self.gen.interface_method(self.doc(d),
name,
self.type(d.type),
self.name(d.name),
[self.param(p) for p in d.params],
self.block(d.body)))
return self.gen.interface(self.doc(iface), name, parameters, bases, static_fields,
methods)
def default_constructors(self, cls):
name = self.name(cls.name)
btype = base_type(cls)
base = btype.resolved.type if btype else None
cons = base_constructors(cls)
result = []
for con in cons:
params = [self.param(p) for p in con.params]
args = [self.name(p.name) for p in con.params]
stmt = self.gen.expr_stmt(self.gen.invoke_super(name, self.name(base.name), args))
result.append(self.gen.constructor("", name, params, self.gen.block([stmt])))
if result:
return result
elif base:
body = self.gen.block([self.gen.expr_stmt(self.gen.invoke_super(name, self.name(base.name), []))])
result.append(self.gen.constructor("", name, [], body))
else:
result.append(self.gen.default_constructor(name))
return result
def doc(self, obj):
return self.gen.doc(doc(obj))
def push(self, env):
self.names.append(env)
def pop(self):
self.names.pop()
@overload(Name)
def name(self, n):
return self.name(n.text)
@overload(basestring)
def name(self, n):
if self.names:
env = self.names[-1]
if n in env:
return env[n]
return self.gen.name(n)
@overload(AST)
def package(self, node):
if isinstance(node, Package):
me = self.name(node.name)
if node.package:
return self.package(node.package) + [me]
else:
return [me]
elif node.package:
return self.package(node.package)
else:
assert False
@overload(types.NoneType)
def package(self, node):
return []
@overload(list)
def package(self, path):
return path
@overload(Type)
def type(self, t):
return self.type(t.resolved)
@overload(TypeExpr)
def type(self, texpr):
return self.type(texpr.type, texpr.bindings)
@overload(Class, dict)
def type(self, cls, bindings):
mapping = None
for a in cls.annotations:
if a.name.text == "mapping":
mapping = a
break
if mapping:
path = []
name = self.expr(mapping.arguments[0])
else:
path = self.add_import(cls)
name = self.name(cls.name)
if cls.parameters:
params = [self.type(texpr(bindings[p].type, bindings, bindings[p].bindings)) for p in cls.parameters]
else:
params = []
return self.gen.type(path, name, params)
def qualify(self, package, origin):
return self.gen.qualify(package, origin)
@overload(TypeParam)
def type(self, tparam, bindings):
if tparam in bindings:
return self.type(bindings[tparam])
elif self.bindings and tparam in self.bindings:
return self.type(self.bindings[tparam])
else:
return self.name(tparam.name)
@overload(types.NoneType)
def type(self, n):
return None
def param(self, p):
return self.gen.param(self.type(p.type),
self.name(p.name),
self.expr(p.value))
def block(self, b, header=None):
if b is None:
return header
else:
return self.gen.block((header or []) + [self.statement(s) for s in b.statements])
@overload(Import)
def statement(self, imp):
return self.gen.comment(imp.code())
@overload(Local)
def statement(self, s):
return self.gen.local(self.type(s.declaration.type),
self.name(s.declaration.name),
self.maybe_cast(s.declaration.type, s.declaration.value))
@overload(ExprStmt)
def statement(self, s):
return self.gen.expr_stmt(self.expr(s.expr))
@overload(Assign)
def statement(self, ass):
return self.gen.assign(self.expr(ass.lhs), self.maybe_cast(ass.lhs, ass.rhs))
@overload(Return)
def statement(self, ret):
return self.gen.return_(self.maybe_cast(ret.callable.type, ret.expr))
@overload(If)
def statement(self, iff):
return self.gen.if_(self.expr(iff.predicate),
self.block(iff.consequence),
self.block(iff.alternative))
@overload(While)
def statement(self, wh):
return self.gen.while_(self.expr(wh.condition), self.block(wh.body))
@overload(Break)
def statement(self, brk):
return self.gen.break_()
@overload(Continue)
def statement(self, cnt):
return self.gen.continue_()
@overload(str)
def expr(self, s):
return s
@overload(Var)
def expr(self, v):
return self.var(v.definition, v)
@overload(Call)
def expr(self, c):
type = c.expr.resolved.type
return self.invoke(type, c.expr, [self.coerce(a) for a in c.args])
@overload(String)
def expr(self, s):
return self.gen.string(s)
@overload(Number)
def expr(self, n):
return self.gen.number(n)
@overload(Bool)
def expr(self, b):
return self.gen.bool_(b)
@overload(List)
def expr(self, l):
return self.gen.list_([self.expr(e) for e in l.elements])
@overload(Map)
def expr(self, m):
return self.gen.map([(self.expr(e.key), self.expr(e.value)) for e in m.entries])
@overload(Null)
def expr(self, n):
return self.gen.null()
@overload(Native)
def expr(self, n):
return "".join([self.expr(c) for c in n.cases])
@overload(NativeCase)
def expr(self, nc):
if nc.name in (None, self.ext):
return "".join([self.expr(c) for c in nc.children])
else:
return ""
@overload(Fixed)
def expr(self, f):
return f.text
@overload(Attr)
def expr(self, a):
type = a.expr.resolved.type
return self.get(type, a.resolved.type, a.expr, a.attr)
@overload(Type)
def expr(self, t):
return self.type(t)
@overload(Cast)
def expr(self, c):
return self.maybe_cast(c, c.expr)
@overload(types.NoneType)
def expr(self, n):
return None
@overload(Param)
def var(self, _, v):
return self.gen.local_ref(self.name(v.name))
@overload(Declaration)
def var(self, _, v):
return self.gen.local_ref(self.name(v.name))
@overload(Class)
def var(self, _, v):
return self.gen.class_ref(self.name(v.name))
@overload(Method)
def var(self, _, v):
return self.gen.method_ref(self.name(v.name))
@overload(Field)
def var(self, f, v):
if f.static:
path = self.add_import(f.clazz)
return self.gen.get_static_field(path, self.name(f.clazz.name), self.name(v.name))
else:
return self.gen.field_ref(self.name(v.name))
@overload(Class, Class)
def get(self, cls, type, expr, attr):
f = get_field(cls, attr)
if f.static:
path = self.add_import(f.clazz)
return self.gen.get_static_field(path, self.name(f.clazz.name), self.name(attr))
else:
return self.gen.get_field(self.expr(expr), self.name(attr))
@overload(Class, TypeParam)
def get(self, cls, type, expr, attr):
return self.gen.get_field(self.expr(expr), self.name(attr))
@overload(Class, Method)
def get(self, cls, type, expr, attr):
return self.gen.get_method(self.expr(expr), self.name(attr))
@overload(Package, Package)
def get(self, pkg, type, expr, attr):
return self.gen.get_package(self.expr(expr), self.name(attr))
@overload(Package, Function)
def get(self, pkg, type, expr, attr):
return self.gen.get_function(self.expr(expr), self.name(attr))
@overload(Function)
def invoke(self, func, expr, args):
path = self.add_import(func)
return self.gen.invoke_function(path, self.name(func.name), args)
@overload(Method, Attr)
def invoke(self, method, expr, args):
if isinstance(expr.expr, Super):
return self.gen.invoke_super_method(self.name(expr.clazz.name),
self.name(expr.expr.resolved.type.name),
self.name(method.name),
args)
else:
if method.static:
path = self.add_import(method.clazz)
return self.gen.invoke_static_method(path, self.name(method.clazz.name), self.name(method.name), args)
else:
return self.gen.invoke_method(self.expr(expr.expr), self.name(method.name), args)
@overload(Method, Var)
def invoke(self, method, var, args):
if method.static:
path = self.add_import(method.clazz)
return self.gen.invoke_static_method(path, self.name(method.clazz.name), self.name(method.name), args)
else:
return self.gen.invoke_method_implicit(self.name(method.name), args)
@overload(Class)
def invoke(self, cls, expr, args):
cons = constructors(cls)
con = cons[0] if cons else None
if isinstance(con, Macro):
return self.apply_macro(con, expr, args)
else:
return self.gen.construct(self.type(expr.resolved), args)
@overload(Class, Super)
def invoke(self, cls, sup, args):
return self.gen.invoke_super(self.name(sup.clazz.name), self.name(cls.name), args)
@overload(Macro)
def invoke(self, macro, expr, args):
return self.apply_macro(macro, expr, args)
@overload(Expression)
def coerce(self, expr):
if expr.coersion:
if isinstance(expr.coersion, Macro):
fake = FakeExpr()
fake.expr = expr
fake.resolved = expr.coersion.resolved
return self.apply_macro(expr.coersion, fake, ())
else:
return self.gen.invoke_method(self.expr(expr), self.name(expr.coersion.name), [])
else:
return self.expr(expr)
def apply_macro(self, macro, expr, args):
env = {}
if macro.clazz and macro.type:
bindings = expr.resolved.bindings
for tparam in bindings:
env[tparam.name.text] = self.type(bindings[tparam])
# for method macros we use expr to access self
env["self"] = self.expr(expr.expr)
idx = 0
for p in macro.params:
env[p.name.text] = args[idx]
idx += 1
self.push(env)
try:
result = self.expr(macro.body)
return result
finally:
self.pop()
@overload(AST, object)
def maybe_cast(self, type, expr):
return self.maybe_cast(type.resolved, expr)
@overload(TypeExpr, object)
def maybe_cast(self, texpr, expr):
if expr is None: return None
if expr.coersion:
return self.coerce(expr)
if texpr.assignableFrom(expr.resolved):
return self.expr(expr)
else:
return self.gen.cast(self.type(texpr), self.expr(expr))
def fake(self, type, expr):
fake = FakeExpr()
fake.resolved = type
fake.coersion = None
fake.expr = expr
return fake
@overload(FakeExpr)
def expr(self, fake):
return fake.expr
def is_virtual(python_command):
output = shell.call(python_command, "-c", 'import sys; print(hasattr(sys, "real_prefix"))')
return output.strip() == "True"
def is_root():
return os.geteuid() == 0
def is_user(python_command):
return not is_virtual(python_command) and not is_root()
class Java(Backend):
PRETTY_INSTALL = "Maven"
argswitch = "--java"
ext = "java"
gen = java
def install_target(self):
name, ver = namever(self.entry)
return self._install_target(name, ver)
def _install_target(self, name, ver):
jar = os.path.join(os.environ["HOME"], ".m2/repository", name, name, ver, "%s-%s.jar" % (name, ver))
if os.path.exists(jar):
return jar
return None
def install_command(self, dir, offline):
cmd = ["mvn", "install", "-DskipTests"]
if offline: cmd += ["--offline"]
shell.call(*cmd, cwd=dir, stage="install")
def run(self, name, version, args):
jar = os.path.join(os.environ["HOME"], ".m2", "repository", name, name, version,
"%s-%s.jar" % (name, version))
os.execlp("java", "java", "-jar", jar, name, *args)
class Python(Backend):
PRETTY_INSTALL = "PIP"
argswitch = "--python"
ext = "py"
gen = python
python_command = "python2"
pip_command = "pip2"
def install_target(self):
name, ver = namever(self.entry)
return self._install_target(name, ver)
def _install_target(self, name, ver):
return shell.get_pip_pkg(name, stage="install", command=self.pip_command)
def install_command(self, dir, offline):
shell.call(self.python_command, "setup.py", "-q", "bdist_wheel", cwd=dir, stage="install")
wheels = [name for name in os.listdir(os.path.join(dir, "dist")) if name.endswith(".whl")]
for wheel in wheels:
cmd = [self.pip_command, "install",]
if offline: cmd += ["--no-index"]
if is_user(self.python_command): cmd += ["--user"]
cmd += ["--upgrade", "dist/%s" % wheel]
shell.call(*cmd, cwd=dir, stage="install")
def run(self, name, version, args):
main = self.gen.name(name)
python = shell.user_override((self.python_command,))[0]
os.execlp(python, python, "-c",
"import %s; %s.call_main()" % (main, main), name, *args)
class Python3(Python):
argswitch = "--python3"
python_command = "python3"
pip_command = "pip3"
class JavaScript(Backend):
PRETTY_INSTALL = "NPM"
argswitch = "--javascript"
ext = "js"
gen = javascript
def install_target(self):
name, ver = namever(self.entry)
return self._install_target(name, ver)
def _install_target(self, name, ver):
try:
output = shell.call("npm", "ll", "--depth", "0", "--json", name, errok=True)
return json.loads(output).get("dependencies",{}).get(name,{}).get("path")
except ValueError:
pass
except shell.ShellError:
pass
return None
def install_command(self, dir, offline):
cmd = ["npm", "install"]
if offline: cmd += ["--cache-min", "9999999"]
cmd += [dir]
shell.call(*cmd, stage="install")
def run(self, name, version, args):
main = self.gen.name(name)
os.execlp("node", "node", "-e", 'require("%s").%s.call_main()' % (name, main), name, *args)
class Ruby(Backend):
PRETTY_INSTALL = "GEM"
argswitch = "--ruby"
ext = "rb"
gen = ruby
def install_target(self):
name, ver = namever(self.entry)
return self._install_target(name, ver)
def _install_target(self, name, ver):
try:
output = shell.call("gem", "which", name, stage="install", errok=True)
return output.strip()
except shell.ShellError:
pass
return None
def install_command(self, dir, offline):
name, ver = namever(self.entry)
cmd = ["gem", "build", "-q", "%s.gemspec" % name]
shell.call(*cmd, cwd=dir, stage="install")
cmd = ["gem", "install"]
if offline: cmd += ["--local"]
cmd += ["%s/%s-%s.gem" % (dir, name, ver)]
shell.call(*cmd, stage="install")
def run(self, name, version, args):
main = self.gen.name(name)
os.execlp("ruby", "ruby", "-e", "require('%s'); ::Quark.%s.call_main()" % (name, main), name, *args)
| apache-2.0 | 6,072,187,217,217,607,000 | 34.181725 | 118 | 0.538857 | false |
heromod/migrid | mig/simulation/user.py | 1 | 2151 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# user - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
#
# User class used for simulating users that randomly submit jobs
#
import random
class User:
id = ''
submit_prob = None
logger = None
server = None
maxprice = None
length = 2
jobs = 0
def __init__(
self,
id,
logger,
prob,
price,
server,
vgrid,
):
self.id = id
self.logger = logger
self.submit_prob = prob
self.maxprice = price
self.server = server
self.vgrid = vgrid
def submit_job(self, step):
self.logger.info('%s submitting job with maxprice %s to %s in step %d'
, self.id, self.maxprice, self.server.id, step)
name = '%s' % self.id
self.server.submit(name, self.length, self.maxprice, self.vgrid)
self.jobs += 1
def sleep(self):
self.logger.debug('%s sleeping', self.id)
def simulate(self, timestep):
# Randomly submit a job during given timestep
rand = random.random()
qlen = self.server.job_queue.queue_length()
if rand <= self.submit_prob and qlen < 200:
self.submit_job(timestep)
else:
self.sleep()
| gpl-2.0 | -7,407,505,360,815,817,000 | 24.607143 | 81 | 0.621571 | false |
googleapis/python-pubsublite | google/cloud/pubsublite_v1/services/subscriber_service/transports/grpc_asyncio.py | 1 | 11672 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.pubsublite_v1.types import subscriber
from .base import SubscriberServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import SubscriberServiceGrpcTransport
class SubscriberServiceGrpcAsyncIOTransport(SubscriberServiceTransport):
"""gRPC AsyncIO backend transport for SubscriberService.
The service that a subscriber client application uses to
receive messages from subscriptions.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "pubsublite.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "pubsublite.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def subscribe(
self,
) -> Callable[
[subscriber.SubscribeRequest], Awaitable[subscriber.SubscribeResponse]
]:
r"""Return a callable for the subscribe method over gRPC.
Establishes a stream with the server for receiving
messages.
Returns:
Callable[[~.SubscribeRequest],
Awaitable[~.SubscribeResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "subscribe" not in self._stubs:
self._stubs["subscribe"] = self.grpc_channel.stream_stream(
"/google.cloud.pubsublite.v1.SubscriberService/Subscribe",
request_serializer=subscriber.SubscribeRequest.serialize,
response_deserializer=subscriber.SubscribeResponse.deserialize,
)
return self._stubs["subscribe"]
__all__ = ("SubscriberServiceGrpcAsyncIOTransport",)
| apache-2.0 | -8,276,490,674,875,871,000 | 43.549618 | 87 | 0.620459 | false |
pbvarga1/qimage2ndarray | qimage2ndarray/__init__.py | 1 | 14973 | import sys as _sys
import numpy as _np
from .dynqt import QtGui as _qt
from .dynqt import qt as _qt_driver
if _qt_driver.name() == 'PythonQt':
from .qimageview import QImage2ndarray as _temp
_qimageview = _temp.qimageview
else:
from .qimageview_python import qimageview as _qimageview
__version__ = "1.5"
if _sys.byteorder == 'little':
_bgra = (0, 1, 2, 3)
else:
_bgra = (3, 2, 1, 0)
_bgra_fields = {'b': (_np.uint8, _bgra[0], 'blue'),
'g': (_np.uint8, _bgra[1], 'green'),
'r': (_np.uint8, _bgra[2], 'red'),
'a': (_np.uint8, _bgra[3], 'alpha')}
bgra_dtype = _np.dtype(_bgra_fields)
"""Complex dtype offering the named fields 'r','g','b', and 'a' and
corresponding long names, conforming to QImage_'s 32-bit memory layout."""
try:
_basestring = basestring
except NameError:
# 'basestring' undefined, must be Python 3
_basestring = str
def _qimage_or_filename_view(qimage):
if isinstance(qimage, _basestring):
qimage = _qt.QImage(qimage)
return _qimageview(qimage)
def raw_view(qimage):
"""Returns raw 2D view of the given QImage_'s memory. The result
will be a 2-dimensional numpy.ndarray with an appropriately sized
integral dtype. (This function is not intented to be used
directly, but used internally by the other -- more convenient --
view creation functions.)
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_
:rtype: numpy.ndarray_ with shape (height, width)"""
return _qimage_or_filename_view(qimage)
def byte_view(qimage, byteorder = 'little'):
"""Returns raw 3D view of the given QImage_'s memory. This will
always be a 3-dimensional numpy.ndarray with dtype numpy.uint8.
Note that for 32-bit images, the last dimension will be in the
[B,G,R,A] order (if little endian) due to QImage_'s memory layout
(the alpha channel will be present for Format_RGB32 images, too).
For 8-bit (indexed) images, the array will still be 3-dimensional,
i.e. shape will be (height, width, 1).
The order of channels in the last axis depends on the `byteorder`,
which defaults to 'little', i.e. BGRA order. You may set the
argument `byteorder` to 'big' to get ARGB, or use None which means
sys.byteorder here, i.e. return native order for the machine the
code is running on.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_
:param byteorder: specify order of channels in last axis
:rtype: numpy.ndarray_ with shape (height, width, 1 or 4) and dtype uint8"""
raw = _qimage_or_filename_view(qimage)
result = raw.view(_np.uint8).reshape(raw.shape + (-1, ))
if byteorder and byteorder != _sys.byteorder:
result = result[...,::-1]
return result
def rgb_view(qimage, byteorder = 'big'):
"""Returns RGB view of a given 32-bit color QImage_'s memory.
Similarly to byte_view(), the result is a 3D numpy.uint8 array,
but reduced to the rgb dimensions (without alpha), and reordered
(using negative strides in the last dimension) to have the usual
[R,G,B] order. The image must have 32 bit pixel size, i.e. be
RGB32, ARGB32, or ARGB32_Premultiplied. (Note that in the latter
case, the values are of course premultiplied with alpha.)
The order of channels in the last axis depends on the `byteorder`,
which defaults to 'big', i.e. RGB order. You may set the argument
`byteorder` to 'little' to get BGR, or use None which means
sys.byteorder here, i.e. return native order for the machine the
code is running on.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:param byteorder: specify order of channels in last axis
:rtype: numpy.ndarray_ with shape (height, width, 3) and dtype uint8"""
if byteorder is None:
byteorder = _sys.byteorder
bytes = byte_view(qimage, byteorder)
if bytes.shape[2] != 4:
raise ValueError("For rgb_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)")
if byteorder == 'little':
return bytes[...,:3] # strip A off BGRA
else:
return bytes[...,1:] # strip A off ARGB
def alpha_view(qimage):
"""Returns alpha view of a given 32-bit color QImage_'s memory.
The result is a 2D numpy.uint8 array, equivalent to
byte_view(qimage)[...,3]. The image must have 32 bit pixel size,
i.e. be RGB32, ARGB32, or ARGB32_Premultiplied. Note that it is
not enforced that the given qimage has a format that actually
*uses* the alpha channel -- for Format_RGB32, the alpha channel
usually contains 255 everywhere.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:rtype: numpy.ndarray_ with shape (height, width) and dtype uint8"""
bytes = byte_view(qimage, byteorder = None)
if bytes.shape[2] != 4:
raise ValueError("For alpha_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)")
return bytes[...,_bgra[3]]
def recarray_view(qimage):
"""Returns recarray_ view of a given 32-bit color QImage_'s
memory.
The result is a 2D array with a complex record dtype, offering the
named fields 'r','g','b', and 'a' and corresponding long names.
Thus, each color components can be accessed either via string
indexing or via attribute lookup (through numpy.recarray_):
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
>>> from PyQt4.QtGui import QImage, qRgb
>>> qimg = QImage(320, 240, QImage.Format_ARGB32)
>>> qimg.fill(qRgb(12,34,56))
>>>
>>> import qimage2ndarray
>>> v = qimage2ndarray.recarray_view(qimg)
>>>
>>> red = v["r"]
>>> red[10,10]
12
>>> pixel = v[10,10]
>>> pixel["r"]
12
>>> (v.g == v["g"]).all()
True
>>> (v.alpha == 255).all()
True
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:rtype: numpy.ndarray_ with shape (height, width) and dtype :data:`bgra_dtype`"""
raw = _qimage_or_filename_view(qimage)
if raw.itemsize != 4:
raise ValueError("For rgb_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)")
return raw.view(bgra_dtype, _np.recarray)
def _normalize255(array, normalize, clip = (0, 255)):
if normalize:
if normalize is True:
normalize = array.min(), array.max()
if clip == (0, 255):
clip = None
elif _np.isscalar(normalize):
normalize = (0, normalize)
nmin, nmax = normalize
if nmin:
array = array - nmin
if nmax != nmin:
scale = 255. / (nmax - nmin)
if scale != 1.0:
array = array * scale
if clip:
low, high = clip
_np.clip(array, low, high, array)
return array
def gray2qimage(gray, normalize = False):
"""Convert the 2D numpy array `gray` into a 8-bit, indexed QImage_
with a gray colormap. The first dimension represents the vertical
image axis.
The parameter `normalize` can be used to normalize an image's
value range to 0..255:
`normalize` = (nmin, nmax):
scale & clip image values from nmin..nmax to 0..255
`normalize` = nmax:
lets nmin default to zero, i.e. scale & clip the range 0..nmax
to 0..255
`normalize` = True:
scale image values to 0..255 (same as passing (gray.min(),
gray.max()))
If the source array `gray` contains masked values, the result will
have only 255 shades of gray, and one color map entry will be used
to make the corresponding pixels transparent.
A full alpha channel cannot be supported with indexed images;
instead, use `array2qimage` to convert into a 32-bit QImage.
:param gray: image data which should be converted (copied) into a QImage_
:type gray: 2D or 3D numpy.ndarray_ or `numpy.ma.array <masked arrays>`_
:param normalize: normalization parameter (see above, default: no value changing)
:type normalize: bool, scalar, or pair
:rtype: QImage_ with RGB32 or ARGB32 format"""
if _np.ndim(gray) != 2:
raise ValueError("gray2QImage can only convert 2D arrays" +
" (try using array2qimage)" if _np.ndim(gray) == 3 else "")
h, w = gray.shape
result = _qt.QImage(w, h, _qt.QImage.Format_Indexed8)
if not _np.ma.is_masked(gray):
for i in range(256):
result.setColor(i, _qt.qRgb(i,i,i))
_qimageview(result)[:] = _normalize255(gray, normalize)
else:
# map gray value 1 to gray value 0, in order to make room for
# transparent colormap entry:
result.setColor(0, _qt.qRgb(0,0,0))
for i in range(2, 256):
result.setColor(i-1, _qt.qRgb(i,i,i))
_qimageview(result)[:] = _normalize255(gray, normalize, clip = (1, 255)) - 1
result.setColor(255, 0)
_qimageview(result)[gray.mask] = 255
return result
def array2qimage(array, normalize = False):
"""Convert a 2D or 3D numpy array into a 32-bit QImage_. The
first dimension represents the vertical image axis; the optional
third dimension is supposed to contain 1-4 channels:
========= ===================
#channels interpretation
========= ===================
1 scalar/gray
2 scalar/gray + alpha
3 RGB
4 RGB + alpha
========= ===================
Scalar data will be converted into corresponding gray RGB triples;
if you want to convert to an (indexed) 8-bit image instead, use
`gray2qimage` (which cannot support an alpha channel though).
The parameter `normalize` can be used to normalize an image's
value range to 0..255:
`normalize` = (nmin, nmax):
scale & clip image values from nmin..nmax to 0..255
`normalize` = nmax:
lets nmin default to zero, i.e. scale & clip the range 0..nmax
to 0..255
`normalize` = True:
scale image values to 0..255 (same as passing (array.min(),
array.max()))
If `array` contains masked values, the corresponding pixels will
be transparent in the result. Thus, the result will be of
QImage.Format_ARGB32 if the input already contains an alpha
channel (i.e. has shape (H,W,4)) or if there are masked pixels,
and QImage.Format_RGB32 otherwise.
:param array: image data which should be converted (copied) into a QImage_
:type array: 2D or 3D numpy.ndarray_ or `numpy.ma.array <masked arrays>`_
:param normalize: normalization parameter (see above, default: no value changing)
:type normalize: bool, scalar, or pair
:rtype: QImage_ with RGB32 or ARGB32 format"""
if _np.ndim(array) == 2:
array = array[...,None]
elif _np.ndim(array) != 3:
raise ValueError("array2qimage can only convert 2D or 3D arrays (got %d dimensions)" % _np.ndim(array))
if array.shape[2] not in (1, 2, 3, 4):
raise ValueError("array2qimage expects the last dimension to contain exactly one (scalar/gray), two (gray+alpha), three (R,G,B), or four (R,G,B,A) channels")
h, w, channels = array.shape
hasAlpha = _np.ma.is_masked(array) or channels in (2, 4)
fmt = _qt.QImage.Format_ARGB32 if hasAlpha else _qt.QImage.Format_RGB32
result = _qt.QImage(w, h, fmt)
array = _normalize255(array, normalize)
if channels >= 3:
rgb_view(result)[:] = array[...,:3]
else:
rgb_view(result)[:] = array[...,:1] # scalar data
alpha = alpha_view(result)
if channels in (2, 4):
alpha[:] = array[...,-1]
else:
alpha[:] = 255
if _np.ma.is_masked(array):
alpha[:] *= _np.logical_not(_np.any(array.mask, axis = -1))
return result
def imread(filename, masked = False):
"""Convenience function that uses the QImage_ constructor to read an
image from the given file and return an `rgb_view` of the result.
This is intentionally similar to scipy.ndimage.imread (which uses
PIL), scipy.misc.imread, or matplotlib.pyplot.imread (using PIL
for non-PNGs).
For grayscale images, return 2D array (even if it comes from a 32-bit
representation; this is a consequence of the QImage API).
For images with an alpha channel, the resulting number of channels
will be 2 (grayscale+alpha) or 4 (RGB+alpha). Alternatively, one may
pass `masked = True' in order to get `numpy.ma.array <masked
arrays>`_ back. Note that only fully transparent pixels are masked
(and that masked arrays only support binary masks). The value of
`masked` is ignored when the loaded image has no alpha channel
(i.e., one would not get a masked array in that case).
This function has been added in version 1.3.
"""
qImage = _qt.QImage(filename)
isGray = qImage.isGrayscale()
if isGray and qImage.depth() == 8:
return byte_view(qImage)[...,0]
hasAlpha = qImage.hasAlphaChannel()
if hasAlpha:
targetFormat = _qt.QImage.Format_ARGB32
else:
targetFormat = _qt.QImage.Format_RGB32
if qImage.format() != targetFormat:
qImage = qImage.convertToFormat(targetFormat)
result = rgb_view(qImage)
if isGray:
result = result[...,0]
if hasAlpha:
if masked:
mask = (alpha_view(qImage) == 0)
if _np.ndim(result) == 3:
mask = _np.repeat(mask[...,None], 3, axis = 2)
result = _np.ma.masked_array(result, mask)
else:
result = _np.dstack((result, alpha_view(qImage)))
return result
def imsave(filename, image, normalize = False, format = None, quality = -1):
"""Convenience function that uses QImage.save to save an image to the
given file. This is intentionally similar to scipy.misc.imsave.
However, it supports different optional arguments:
:param normalize: see :func:`array2qimage` (which is used internally)
:param format: image filetype (e.g. 'PNG'), (default: check filename's suffix)
:param quality: see QImage.save (0 = small .. 100 = uncompressed, -1 = default compression)
:returns: boolean success, see QImage.save
This function has been added in version 1.4.
"""
qImage = array2qimage(image, normalize = normalize)
return qImage.save(filename, format, quality)
| bsd-3-clause | 2,436,135,751,722,920,000 | 36.153846 | 165 | 0.645896 | false |
Backflipz/plugin.video.excubed | addon.py | 1 | 81609 | from whoosh.index import create_in
# from xbmcswift2 import Plugin
from kodiswift import Plugin
import os
import sys
import re
import json
import xbmc
import xbmcaddon
import xbmcplugin
import xbmcgui
import threading
import glob
import shlex
from BeautifulSoup import BeautifulSoup as BS
from whoosh.filedb.filestore import FileStorage
from whoosh.fields import *
from whoosh.qparser import QueryParser
import hurry.filesize as hf
import datetime
# import xbmcswift2_playlists
# import socket
plugin = Plugin()
# lists = xbmcswift2_playlists.Playlists(plugin)
# lib = os.path.join(plugin._addon_id, 'resources', 'lib' )
# print lib
olib = 'special://home' + '/addons/' + plugin._addon_id
lib = xbmc.translatePath(olib)
cache_dir = 'special://home' + '/userdata/addon_data/' \
+ plugin._addon_id
cache_dir += '/cache/'
cache_dir = xbmc.translatePath(cache_dir)
print lib
lib = os.path.join(lib, 'resources', 'lib')
print lib
sys.path.append(lib)
sys.path.append(xbmc.translatePath(os.path.join(os.getcwd(), 'resources'
, 'lib')))
import requests
# from xbmcswift2 import actions
from kodiswift import actions
import cfscrape
from pprint import pformat as pp
# from xdcc import XDCC
import xbot
import dataset
import copy
# from m2g import magnet2torrent as m2t
# from autonomotorrent.BTManager import BTManager
# from autonomotorrent.BTApp import BTApp,BTConfig
# plugin.log.info(cache_dir)
nick = plugin.get_setting('nickname')
db = dataset.connect('sqlite:///' + cache_dir + 'Meta.db')
table = db['meta']
scraper = cfscrape.create_scraper()
# Borrowed from metahandlers
import thetvdbapi
api = thetvdbapi.TheTVDB()
# s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# s.bind((plugin.get_setting('host'),plugin.get_setting('listen_port',int)))
api_key = plugin.get_setting('api_key', str)
api_key = api_key.replace(' ', '')
headers = {'Authorization': api_key}
api_url = 'http://%s:%s/api/1.0/' % (plugin.get_setting('host', str),
plugin.get_setting('port', str))
tmp_path = plugin.get_setting('tmp_path', str)
tmp_path += '*.*'
dl_path = plugin.get_setting('xg_dl_path', str)
dl_path += '*.*'
log = plugin.log.info
whoosh_path = plugin.get_setting('whoosh_path', str)
class SEP(object):
def __init__(self, **entries):
self.__dict__.update(entries)
FA_api = 'a9494e131f434a23f1c130ec6cb8a2a3'
@plugin.cached_route('/')
def index():
items = [{'label': 'Search XG...', 'path': plugin.url_for('search',
search_term='first_page', page='1'),
'is_playable': False}, {'label': 'Enter Custom Message',
'path': plugin.url_for('play_local_file')},
{'label': 'Webpage Parsers',
'path': plugin.url_for('parsers')}]
# {'label': 'Enter Magnet Link',
# 'path': plugin.url_for('torrent')}] # ,
# {
# 'label' : 'Enter Custom File Request',
# 'path' : plugin.url_for('enter_custom')}]
return items
# @plugin.route('/torrent/')
# def torrent():
# labs = {'title': 'Test'}
# app = BTApp(save_dir=plugin.get_setting('xg_dl_path'),
# listen_port=plugin.get_setting('listen_port', int),
# enable_DHT=True)
# try:
# labs = get_meta()
# except:
# pass
# mag = plugin.keyboard(heading='Enter Magnet Link')
# try:
# Torrent().stop_all_torrents()
# except:
# pass
# app.save_dir = plugin.get_setting('xg_dl_path')
# config = BTConfig(m2t(mag, plugin.get_setting('tmp_path')))
# biggest = 0
# for f in config.metainfo.files:
# if f['length'] > biggest:
# biggest = f['length']
# path = f['path']
# path = plugin.get_setting('xg_dl_path') + path
# plugin.log.info(path)
# app.add_torrent(config)
# manager = BTManager(app, config)
# dialog = xbmcgui.DialogProgress()
# dialog.create('Preparing File')
# threading.Thread(target=manager.app.start_reactor).start()
# while not os.path.exists(path):
# plugin.log.info(manager.get_speed())
# if dialog.iscanceled():
# break
# dialog.close()
# t.join()
# plugin.finish([{
# 'label': labs['title'],
# 'info': labs,
# 'path': path,
# 'context_menu': [('Stop All Torrents',
# actions.background(app.stop_all_torrents()))],
# 'is_playable': True,
# }])
@plugin.route('/search/<search_term>/<page>/')
def search(
search_term='first_page',
page='1',
id=None,
labs=None,
):
# packs = xdcc_search.get_packs('http://xdcc.horriblesubs.info','naruto')
# plugin.log.info('Packs' + str(packs))
# %s.%s?searchTerm=%s' % (port,type,format,searchTerm)
if search_term == 'first_page':
keyboard = xbmc.Keyboard('', 'Enter Search Term', False)
keyboard.doModal()
if keyboard.isConfirmed():
search_term = keyboard.getText()
search_packets = 'packets.json?searchTerm=%s&maxResults=20&page=%s' \
% (search_term, page)
request = requests.get(api_url + search_packets, headers=headers)
results = request.json()
# results = json.loads(results)
items = []
idx = 0
for option in results['Results']:
guid_url = api_url + 'packets/%s/enable.json' % option['Guid']
item = {
'label': option['Name'] + ' || Size: %s'
% hf.size(option['Size']),
'path': plugin.url_for('play_file', url=guid_url,
name=option['Name']),
'is_playable': True,
'context_menu': [
('Assign Metadata', actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=search_term,
page=page,
from_XG=True,
name=False,
bot=False,
cache=False,
))),
('Reapply Metadata', actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=search_term,
page=page,
from_XG=True,
name=False,
bot=False,
cache=True,
))),
('Just Download',
actions.background(plugin.url_for('just_download',
url=guid_url, data=False))),
('Delete File',
actions.background(plugin.url_for('delete_file',
name=option['Name'], all_files=False))),
('Delete All Files',
actions.background(plugin.url_for('delete_file',
name=option['Name'], all_files=True))),
],
}
try:
if str(idx) == str(id):
item['info'] = labs
item['thumbnail'] = labs['cover_url']
item['properties'] = \
{'Fanart_Image': labs['backdrop_url']}
except:
pass
idx += 1
items.append(item.copy())
items.append({'label': 'Next Page >>',
'path': plugin.url_for('search',
search_term=search_term, page=str(int(page) + 1))})
return plugin.finish(items)
# noinspection PyArgumentList
@plugin.route('/play/<name>/<url>/')
def play_file(name, url, data=None):
if data is None:
data = {}
plugin.log.info('Url is: %s' % url)
# Check to see if file already exists
tmp_files = glob.glob(tmp_path)
tmpName = re.sub(r'[\W_]+', '', name)
tmpName = tmpName.lower()
dl_file = False
local_url = ''
plugin.log.info('Temp Name is' + tmpName)
dl_files = glob.glob(dl_path)
for filename in dl_files:
plugin.log.info('Filepath is ' + re.sub(r'[\W_]+', '',
filename).lower())
if tmpName in re.sub(r'[\W_]+', '', filename).lower():
local_url = filename
dl_file = True
break
if local_url == '':
for filename in tmp_files:
plugin.log.info('Filepath is ' + filename)
if tmpName in filename:
local_url = filename
break
if len(local_url) > 0:
plugin.set_resolved_url(local_url)
else:
# if data:
# headers['Content-Type'] = 'application/json'
# r = requests.put(url,headers = headers, data = json.dumps(data))
# plugin.log.info('Url is %s \n Data is %s \n Status is %s \n Text is %s' % (r.url,data,r.status_code,r.text))
# else: r = requests.post(url,headers=headers)....
if data:
stream(
server=data['server'],
channel=data['channel'],
bot=data['bot'],
packetId=data['packetId'],
filename=data['packetName'],
download=True,
)
# if manual_meta: infoLabels = get_meta()
# else: infoLabels = {'title' : name,'cover_url':''}
tmp_files = glob.glob(tmp_path)
tmpName = re.sub(r'[\W_]+', '', name)
tmpName = tmpName.lower()
local_url = ''
plugin.log.info('Temp Name is' + tmpName)
for filename in tmp_files:
plugin.log.info('Filepath is ' + filename)
if tmpName in filename:
local_url = filename
break
plugin.log.info('Playing url: %s' % local_url)
# item = {'info':infoLabels, 'path' : local_url , 'thumbnail' : infoLabels['cover_url']}
plugin.set_resolved_url(local_url)
@plugin.route('/play_local_file/')
def play_local_file():
# tmp_files = glob.glob(tmp_path)
# keyboard = xbmc.Keyboard('','Enter File Name',False)
# keyboard.doModal()
# if keyboard.isConfirmed(): name = keyboard.getText()
# names = name.strip()
# local_url = ''
# for filename in tmp_files:
# plugin.log.info('Filepath is ' + filename)
# for term in names:
# if term in filename:
# allTerms = True
# break
# else:
# allTerms = False
# break
# if allTerms:....local_url = filename
# if local_url == '':
# dialog = xbmcgui.Dialog()
# dialog.notification(message = 'Could Not find file')
# plugin.log.info('Playing url: %s' % local_url)
# item = {'path':local_url,'label':name}
# plugin.set_resolved_url(local_url)
s = plugin.get_storage('message')
dialog = xbmcgui.Dialog()
options = ['Manual', 'Storage']
storageopt = []
# try:
for i in s:
plugin.log.info(i)
storageopt.append(i)
# except: pass
plugin.log.info(options)
index = dialog.select('Choose', options)
if index == 0:
server = \
plugin.keyboard(heading='Enter server (Ex: irc.server.net)')
channel = plugin.keyboard(heading='Enter channel (Ex: #channel)'
)
s[channel] = {'server': server, 'channel': channel}
else:
index = dialog.select('Stored', storageopt)
server = s[storageopt[index]]['server']
channel = storageopt[index]
plugin.log.info(channel + server)
filename = \
plugin.keyboard(heading='Enter filename (Ex: A.Movie.mkv)')
if '#' not in channel:
channel = '#' + channel
message = \
plugin.keyboard(heading='Enter message (Ex: /msg bot xdcc send #packetid)'
)
parts = shlex.split(message)
bot = parts[1]
id = parts[4].replace('#', '')
labs = get_meta()
return [{
'label': labs['title'],
'info': labs,
'path': plugin.url_for(
'stream',
download=False,
server=server,
channel=channel,
bot=bot,
packetId=id,
filename=filename,
),
'is_playable': True,
}]
@plugin.route('/webpages/')
def parsers():
items = [{'label': 'Add a Channel...',
'path': plugin.url_for('add_server')},
{'label': 'Search ixIRC...',
'path': plugin.url_for('search_ix', query='**just_search**'
, page='0')}, {'label': 'Search Haruhichan...',
'path': plugin.url_for('haruhichan', key='None')},
{'label': 'Search xweasel...', 'path': plugin.url_for('xweasel', query='lala', page='1')},
{'label': 'Ginpachi-Sensei', 'path': plugin.url_for('gin_sensei', search='blah')},
{'label': 'Hi10', 'path': plugin.url_for('cloud10')}]
for storage in plugin.list_storage():
if storage == 'meta_cache' or storage == 'showcache' or storage \
== 'message':
continue
try:
storage = plugin.get_storage(storage)
except:
continue
# plugin.log.info('Storage %s' % storage)
try:
items.append({'label': storage['name'],
'path': plugin.url_for('channel',
name=storage['name']),
'context_menu': [('Refresh Packlist',
actions.background(plugin.url_for('refresh',
name=storage['name']))), ('Refresh Local Packlist',
actions.background(plugin.url_for('refresh',
name=storage['name']+".Local"))),('Refresh AniDB',
actions.background(
plugin.url_for(
'refresh',
name='animetitles')))]})
except:
pass
return items
@plugin.route('/add_server/')
def add_server():
global name, server, url
keyboard = xbmc.Keyboard('',
'Enter Host Server (Ex: irc.server.net)',
False)
keyboard.doModal()
if keyboard.isConfirmed():
server = keyboard.getText()
keyboard = xbmc.Keyboard('', 'Enter Channel Name', False)
keyboard.doModal()
if keyboard.isConfirmed():
name = keyboard.getText()
channel = plugin.get_storage('%s' % name, ttl=60 * 24 * 5)
channel['name'] = name
keyboard = xbmc.Keyboard('',
'Enter Webpage Url (Ex: http://xdcc.channel.com/'
, False)
keyboard.doModal()
if keyboard.isConfirmed():
url = keyboard.getText()
packlist = get_packlist(url)
channel['url'] = url
channel['server'] = server
channel['packlist'] = packlist
channel['bots'] = []
@plugin.cached_route('/webpages/<name>/')
def channel(name):
items = [{'label': 'Search Packlist...',
'path': plugin.url_for('search_channel', name=name,
bot='list_all')}, {'label': 'List All Packlist',
'path': plugin.url_for('list_packlist', name=name,
search_term='list_all', bot='list_all',
page='1')},
{'label': 'List Bots', 'path': plugin.url_for('list_bots',
channel=name)}]
return items
def file_meta(name):
wstorage = FileStorage(whoosh_path)
ix = wstorage.open_index()
google = ix.searcher()
try:
show, ep = name.split(']')[1].split('[')[0].lstrip().rstrip().replace(' - ', ' ').rpartition(
re.search('\d{1,3}', name).group(0))[:2]
except:
show = name.split('_-_')[0].rpartition(')_')[2].replace('_', ' ')
ep = name.split('_-_')[1].split('_')[0]
plugin.log.info('ShowEp %s %s' % (show, ep))
if int(ep) == 0: return {}
info = plugin.get_storage('%s' % show)
infoLabels = {}
plugin.log.info('SHOW STORAGE %s' % pp([x for x in info.items() if len(repr(x[1])) < 20]))
if len(info.keys()) == 0 or info is None or (
'last' in info.keys() and datetime.datetime.today().toordinal() - info['last'] >= 5):
info['last'] = datetime.datetime.today().toordinal()
query = QueryParser("title", ix.schema).parse(show)
results = google.search(query)
plugin.log.info('SEARCH %s' % pp([(x['title'], x['content']) for x in results[:5]]))
info['noresults'] = 0 if len(results) else 1
v = []
ot = None
if len(results):
aid = results[0]['aid']
info['aid'] = aid
log('REQUESTING ANIDB DATA')
r = requests.get(
'http://api.anidb.net:9001/httpapi?request=anime&client=anidbtvdbmeta&clientver=1&protover=1&aid=%s' % aid)
log("Status %s\n" % r.status_code)
soup = BS(r.text)
v = [x for x in soup.findAll('epno') if x.text == str(int(ep))]
info['aniupdate'] = 0 if len(v) else 1
plugin.log.info('V %s' % v)
'''try:
log('CHANGING SHOW SEARCH FROM %s to %s' %(show,results[0]['content'][0]))
show = results[0]['content'][0]
except:
pass'''
ot = results[0]['content']
ot = [ot[-1]] + ot[:-1]
log('OT %s' % ot)
google.close()
id = None
theaders = {'Content-Type': 'application/json',
'trakt-api-version': '2',
'trakt-api-key': '05bcd2c0baf2685b8c196162d099e539033c21f7aa9fe1f87b234c2d62c2c1e4'}
results = \
requests.get('https://api-v2launch.trakt.tv/search?query=%s&type=show'
% show, headers=theaders)
log('STATUS %s' % results)
results = results.json()
# results = api.get_matching_shows(title)
search_meta = []
for item in results:
option = {
'tvdb_id': item['show']['ids']['tvdb'],
'title': item['show']['title'],
'imdb_id': item['show']['ids']['imdb'],
'trakt_id': item['show']['ids']['trakt'],
}
search_meta.append(option)
log('Search Meta %s' % pp(search_meta))
if len(search_meta):
id = str(search_meta[0]['tvdb_id'])
info['id'] = id
log('ID %s' % id)
else:
shws = api.get_matching_shows(show)
log('Matching Shows %s' % pp(shws))
try:
id = shws[0][0] if show != 'Drifters' else shws[1][0]
except:
if ot is not None:
for x in ot:
try:
id = api.get_matching_shows(x)[0][0] if show != 'Drifters' else \
api.get_matching_shows(x)[1][0]
if len(id) > 0: break
except:
pass
info['noid'] = 0 if id is not None else 1
if id is None: return {}
info['id'] = id
if info['noid'] == 0: info['aniupdate'] = 0
e = api.get_show_and_episodes(id)
info['shownep'] = [e[0].__dict__, [i.__dict__ for i in e[1]]]
log(pp(info))
if len(v):
info['anidb'] = repr(v[0].parent.parent)
try:
info['EP%sairdate' % ep] = v[0].parent.airdate.text
log('AIRDATE %s' % v[0].parent.airdate.text)
airdate = api.convert_date(v[0].parent.airdate.text)
episode = [i for i in e[1] if 2 >= (lambda x: x.days)(
airdate - api.convert_date(
i.first_aired) if i.first_aired else airdate - airdate) >= -2] # Was a -9 after else
except Exception, ed:
#log(e.__dict__)
log('ERROR %s LINE: %s' % (ed, sys.exc_info()[2].tb_lineno))
log('AIRDATE DIDNT WORK ON EPISODE %s' % ep)
try:
episode = [i for i in e[1] if int(i.absolute_number) == int(ep)]
except:
episode = [i for i in e[1] if int(i.episode_number) == int(ep)]
info['tvupdate'] = 0 if len(episode) else 1
try:
infoLabels = transform_ep_object(episode[0])
except Exception, excptn:
log('ERROR %s LINE: %s' % (excptn, sys.exc_info()[2].tb_lineno))
infoLabels['TVShowTitle'] = e[0].name
infoLabels['backdrop_url'] = e[0].fanart_url
info['EP%s' % ep] = infoLabels
plugin.log.info('INFO %s' % info.keys())
return infoLabels
elif id:
episode = [x for x in e[1] if
(lambda i: int(i.absolute_number) if i.absolute_number != '' else int(i.episode_number))(
x) == int(ep)]
info['tvupdate'] = 0 if len(episode) else 1
try:
infoLabels = transform_ep_object(episode[0])
except Exception, excptn:
log('ERROR %s LINE: %s' % (excptn, sys.exc_info()[2].tb_lineno))
infoLabels['TVShowTitle'] = e[0].name
infoLabels['backdrop_url'] = e[0].fanart_url
info['EP%s' % ep] = infoLabels
plugin.log.info('INFO %s' % info.keys())
return infoLabels
else:
if 'EP%s' % ep in info.keys():
infoLabels = info['EP%s' % ep]
return infoLabels
if info['noid']: return {}
if info['aniupdate']:
query = QueryParser("title", ix.schema).parse(show)
results = google.search(query)
aid = results[0]['aid']
google.close()
info['aid'] = aid
r = requests.get(
'http://api.anidb.net:9001/httpapi?request=anime&client=anidbtvdbmeta&clientver=1&protover=1&aid=%s' % aid)
log("Status %s\n" % r.status_code)
log("HTML CODE: %s" % r.text)
soup = BS(r.text)
v = [x for x in soup.findAll('epno') if x.text == str(int(ep))]
info['anidb'] = repr(v[0].parent.parent)
info['EP%sairdate' % ep] = v[0].parent.airdate.text
info['aniupdate'] = 0 if len(v) else 1
if info['tvupdate']:
e = api.get_show_and_episodes(info['id'])
info['shownep'] = [e[0].__dict__, [i.__dict__ for i in e[1]]]
try:
airdate = api.convert_date(info['EP%sairdate' % ep])
episode = [i for i in e[1] if 2 >= (lambda x: x.days)(airdate - api.convert_date(i.first_aired)) >= -2]
except Exception, excptn:
log('ERROR %s LINE: %s' % (excptn, sys.exc_info()[2].tb_lineno))
try:
episode = [i for i in e[1] if int(i.absolute_number) == int(ep)]
except:
episode = [i for i in e[1] if int(i.episode_number) == int(ep)]
info['tvupdate'] = 0 if len(episode) else 1
try:
infoLabels = transform_ep_object(episode[0])
except Exception, excptn:
log('ERROR %s LINE: %s' % (excptn, sys.exc_info()[2].tb_lineno))
infoLabels['TVShowTitle'] = e[0].name
infoLabels['backdrop_url'] = e[0].fanart_url
info['EP%s' % ep] = infoLabels
return infoLabels
if 'EP%s' % ep not in info.keys():
e = [SEP(**info['shownep'][0]), [SEP(**i) for i in info['shownep'][1]]]
try:
soup = BS(info['anidb'])
v = [x for x in soup.findAll('epno') if x.text == str(int(ep))]
info['EP%sairdate' % ep] = v[0].parent.airdate.text
airdate = api.convert_date(v[0].parent.airdate.text)
episode = [i for i in e[1] if 2 >= (lambda x: x.days)(airdate - api.convert_date(i.first_aired)) >= -2]
info['tvupdate'] = 0 if len(episode) else 1
try:
infoLabels = transform_ep_object(episode[0])
except Exception, excptn:
log(excptn)
infoLabels['TVShowTitle'] = e[0].name
infoLabels['backdrop_url'] = e[0].fanart_url
info['EP%s' % ep] = infoLabels
plugin.log.info('INFO %s' % info.keys())
return infoLabels
except Exception, er:
plugin.log.info('EP ERROR %s' % er)
try:
episode = [x for x in e[1] if x.absolute_number != '' and int(x.absolute_number) == int(ep)]
except:
episode = [x for x in e[1] if x.episode_number != '' and int(x.episode_number) == int(ep)]
info['tvupdate'] = 0 if len(episode) else 1
infoLabels = transform_ep_object(episode[0])
infoLabels['TVShowTitle'] = e[0].name
infoLabels['backdrop_url'] = e[0].fanart_url
info['EP%s' % ep] = infoLabels
plugin.log.info('INFO %s' % info.keys())
return infoLabels
else:
return {}
@plugin.route('/webpages/<name>/list_packlist/<bot>/<search_term>/<page>')
def list_packlist(
name,
search_term='',
bot='',
page='1',
labs=None,
id='',
cache='nope'
):
global all_Terms
if labs is None:
labs = {}
page = int(page)
cache = plugin.get_storage('%s' % name)
log(cache.keys())
packlist = copy.copy(cache['packlist'])
items = []
prev = (page - 1) * 20
curr = page * 20
if bot != 'list_all':
bot_packlist = []
for item in packlist:
if bot == item['bot']:
bot_packlist.append(item)
packlist = bot_packlist
if search_term != 'list_all':
search_packlist = []
search_terms = search_term.split()
plugin.log.info('Search Terms %s' % search_terms)
for i in packlist:
for term in search_terms:
if term.lower() in i['filename'].lower():
all_Terms = True
else:
all_Terms = False
break
if all_Terms:
search_packlist.append(i)
packlist = search_packlist
idx = 0
for item in packlist: # [prev:curr]:
flabs = {'title':'','plot':'','season':'','episode':'','premiered':''}
try:
flabs.update(file_meta(item['filename']))
flabs['plot'] = item['filename'] + ' || Size: ' + str(item['size']) + ' MB || Bot : ' + item[
'bot'] + '\n\n' + flabs['plot']
log(flabs['premiered'])
try:
flabs['Size'] = api.convert_date(flabs['premiered']).toordinal()
except Exception, e:
log(e)
flabs['Size'] = flabs['premiered']
except Exception, ed:
log('ERROR %s LINE: %s' % (ed, sys.exc_info()[2].tb_lineno))
flabs = {}
log(pp(flabs))
items.append({
'label': item['filename'] + ' || Size: '
+ str(item['size']) + ' MB || Bot : ' + item['bot'
],
'path': plugin.url_for(
'stream',
download=item['size'],
server=cache['server'],
channel=name,
bot=item['bot'],
packetId=item['packetId'],
filename=item['filename'],
),
'is_playable': True,
'context_menu': [('Assign Metadata',
actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=search_term,
page=page,
name=name,
bot=bot,
from_XG=False,
cache=False,
))), ('Reapply Metadata',
actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=search_term,
page=page,
name=name,
bot=bot,
from_XG=False,
cache='reapply',
))), ('Next Episode',
actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=search_term,
page=page,
name=name,
bot=bot,
from_XG=False,
cache='next',
))), ('Previous Episode',
actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=search_term,
page=page,
name=name,
bot=bot,
from_XG=False,
cache='prev',
))), ('File Metadata',
actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=search_term,
page=page,
name=name,
bot=bot,
from_XG=False,
cache=item['filename']
))), ('Just Download',
actions.background(plugin.url_for(
'stream',
download=True,
server=cache['server'],
channel=name,
bot=item['bot'],
packetId=item['packetId'],
filename=item['filename'],
))), ('Delete File',
actions.background(plugin.url_for('delete_file'
, name=item[
'filename'],
all_files=False))),
('Delete All Files',
actions.background(plugin.url_for('delete_file'
, name=item['filename'], all_files=True)))],
'info': flabs if flabs else '',
'thumbnail': flabs['cover_url'] if 'cover_url' in flabs.keys() else '',
'properties': {'Fanart_Image': flabs['backdrop_url']} if 'backdrop_url' in flabs.keys() else '',
'info_type': 'video'
})
try:
if str(idx) == str(id):
items[idx]['info'] = labs
items[idx]['thumbnail'] = labs['cover_url']
items[idx]['properties'] = \
{'Fanart_Image': labs['backdrop_url']}
except:
pass
idx += 1
# if curr <= len(packlist):
# items.append({'label': 'Next Page >>',
# 'path': plugin.url_for('list_packlist', name=name,
# search_term=search_term, bot=bot, page=str(page
# + 1))})
# if page > 1:
# items.insert(0, {'label': '<< Previous Page',
# 'path': plugin.url_for('list_packlist', name=name,
# search_term=search_term, bot=bot, page=str(page
# - 1))})
plugin.finish(items=items, sort_methods=['Size'])
@plugin.route('/webpages/<name>/search/<bot>/')
def search_channel(name, bot='all_bots'):
lastsearch = plugin.get_storage('lastsearch')
if 'last' not in lastsearch.keys():
lastsearch['last'] = ''
search_term = plugin.keyboard(default=lastsearch['last'], heading='Enter Search Term')
lastsearch['last'] = search_term
return list_packlist(name=name, search_term=search_term, page='1', bot=bot)
# plugin.finish(items=[{'label': 'Results',
# 'path': plugin.url_for('list_packlist', name=name, search_term=search_term, page='1',
# bot=bot)}])
@plugin.route('/webpages/<channel>/bots/')
def list_bots(channel):
cache = plugin.get_storage(channel)
packlist = cache['packlist']
log(cache.keys())
if not cache['bots']:
for item in packlist:
log('KEYS %s' % item.keys())
if item['bot'] not in str(cache['bots']):
cache['bots'].append({'label': item['bot'],
'path': plugin.url_for('bots', channel=channel,
bot=item['bot'])})
return cache['bots']
@plugin.cached_route('/webpages/<channel>/bots/<bot>/')
def bots(channel, bot):
return [{'label': 'Search Bot Packlist',
'path': plugin.url_for('search_channel', name=channel,
bot=bot)}, {'label': 'List All Packs for %s' % bot,
'path': plugin.url_for('list_packlist', name=channel,
search_term='list_all', bot=bot, page='1')}]
@plugin.route('/update_packlist/<name>/')
def refresh(name):
if name == 'animetitles':
# t = requests.get('http://anidb.net/api/anime-titles.xml.gz')
# log('ANITITLES STATUS %s' % t.status_code)
anilist = xbmc.translatePath(olib) + '\\anime-titles.xml'
import shutil
with open(anilist, 'rb') as ani:
soup = BS(ani)
log('FINISHED PARSING BS ANITITLES')
shutil.rmtree(whoosh_path)
os.mkdir(whoosh_path)
log('REMOVED ORIGINAL WHOOSH PATH')
wstorage = FileStorage(whoosh_path)
# ix = wstorage.open_index()
log('OPENING WHOOSH INDEX')
schema = Schema(title=TEXT(stored=True), aid=NUMERIC(stored=True), content=NGRAMWORDS(stored=True))
ix = create_in(whoosh_path, schema)
writer = ix.writer()
log('BEGINNING WRITING PROCESS')
for x in soup.findAll('title', type='main'):
c = [unicode(i.text) for i in x.parent.findAll('title', attrs={'xml:lang': 'en'})]
c.append(unicode(x.text))
writer.add_document(title=x.text, aid=x.parent['aid'], content=c)
writer.commit()
log('FINISHED WRITING PROCESS')
local = 0
if '.Local' in name:
local = 1
name=name.split('.Local')[0]
storage = plugin.get_storage(name)
if local:
if 'local' not in storage.keys():
storage['local'] = plugin.keyboard(heading='Enter local Packlist location')
if 'packlist' not in storage.keys():
storage['packlist'] = ''
storage['packlist'] = get_packlist(storage['local'],local)
else:
storage['packlist'] = get_packlist(storage['url'],local)
y = len(storage['packlist'])
dlg = xbmcgui.DialogProgress()
x = 0
dlg.create("Refreshing...")
for item in storage['packlist']:
if item['bot'] not in str(storage['bots']):
storage['bots'].append({'label': item['bot'],
'path': plugin.url_for('bots',
channel=name, bot=item['bot'])})
x += 1
dlg.update(int(float((x / y)) * 100), item['bot'])
def get_packlist(url,local=0):
if local==0:
url += 'search.php'
specific = xbmcgui.Dialog().yesno('Select Specific Bot',"Add a Specific Bot Nickname?")
if specific:
url+= '?nick=' + plugin.keyboard()
try:
r = scraper.get(url)
except:
r = requests.get(url)
plugin.log.info('Packlist Status %s' % r)
if str(r.status_code) != '200':
xbmcgui.Dialog().ok(line1 = "Failed to get Packlist status %s" % r.status_code, heading = '')
text = r.text
else:
text = open(url, 'rb').read()
m = re.findall('= (.+?);\n', text)
items = []
for item in m:
item = item.replace('b:', "'bot':").replace('n:', "'packetId':"
).replace('f:', "'filename':").replace('s:', "'size':")
try:
dict = eval(item)
items.append(dict.copy())
except:
pass
return items
@plugin.cached(ttl=60 * 24 * 3)
def get_gin():
plugin.log.info('Getting Text')
with open(cache_dir + 'Gin.txt', 'wb') as gtxt:
gtxt.write(scraper.get('https://gin.sadaharu.eu/Gin.txt').text)
with open(cache_dir + 'Gin.txt', 'rb') as gtxt:
items = []
for x in gtxt.readlines():
if x[0] == '#' and x[:3] != '#1 ':
num = x.find(' ')
num = x[1:num]
s = x.find('[') + 1
f = x.find(']') - 1
size = x[s:f]
size = int(size) if '.' not in size else float(size)
if size < 100 and x[f] == 'M': size *= 10
if x[f] == 'G': size = int(hf.size(size * 1073741824, [(1048576, '')]))
if x[f] == 'K': size = int(hf.size(size * 1024, [(1048576, '')]))
name = x[f + 3:-1]
items.append({'packetId': num, 'filename': name, 'bot': 'Gintoki', 'size': size})
g = plugin.get_storage('Ginpachi-Sensei')
g.update({'packlist': items, 'server': 'irc.rizon.net'})
@plugin.route('/gin_sensei/<search>')
def gin_sensei(search):
get_gin()
if search != 'none':
lastsearch = plugin.get_storage('lastsearch')
search = plugin.keyboard(default=lastsearch['last'], heading='Enter Search Term')
lastsearch['last'] = search
return [{'label': 'Results',
'path': plugin.url_for(list_packlist, name='Ginpachi-Sensei', search_term=search, page='1',
bot='Gintoki')}]
@plugin.route('/stream/<download>/<server>/<channel>/<bot>/<packetId>/<filename>')
def stream(
server,
channel,
bot,
packetId,
filename,
download=False,
):
if '#' not in channel:
channel = '#' + channel
data = {
'server': server,
'channel': channel,
'bot': bot,
'packetId': int(packetId),
'packetName': filename,
}
# dl_path = plugin.get_setting('xg_dl_path',str)
# plugin.log.info(dl_path)
# from data import Networks
# networks = Networks()
# import socket
# server =....socket.gethostbyname(server)
fstring = plugin.get_setting('xg_dl_path', str) + filename.replace("'","_")
log(fstring)
log('EXISTS %s' % os.path.exists(fstring))
if bot == 'Ginpachi-Sensei': bot = 'Gintoki'
plugin.log.info(channel)
# if str(download) == 'True':
# pass
# else:
# return play_file(filename, url='', data=data)
if download == 'True' or not os.path.exists(fstring):
log('IRC DOWNLOAD')
sc = '#mg-chat' if channel == '#moviegods' else None
sc = '#zw-chat' if channel == '#Zombie-Warez' else None
c = xbot.Download(channel=channel, server=server,
numPaquet=int(packetId), nomRobot=bot, secondChannel=channel,
nickname=nick)
if channel == '#moviegods':
c.secondChannel = '#mg-chat'
if channel == '#Zombie-Warez':
c.secondChannel = '#zw-chat'
if channel == '#Ginpachi-Sensei':
c.secondChannel = ''
d = xbot.Grabator(
channel=channel,
secondChannel='',
server=server,
numPaquet=int(packetId),
nomRobot=bot,
nickname=nick,
objetDL=c
)
if channel == '#moviegods':
d.secondChannel = '#mg-chat'
if channel == '#Ginpachi-Sensei':
d.secondChannel = ''
t = threading.Thread(target=d.start)
t.start()
# x.start()
# t = threading.Thread(target=d.start)
# t.start()
# t.join()
streamlink = 'http://localhost:9085/vfs/%s' % fstring
if download.isdigit():
log('Start play process')
dialog = xbmcgui.DialogProgress()
size = float(download)
status = lambda x: (float(x) / size) * 100
dialog.create('Downloading File', 'Checking if it Exists...')
cancel = 0
tsys = copy.copy(hf.traditional)
tsys = [(tsys[-3][0], '')]
b = plugin.get_setting('bf_time', int)
up = dialog.update
log('Checking existence')
while not os.path.exists(fstring):
up(0)
if dialog.iscanceled():
cancel = 1
break
log('Found')
up(0, 'File Found')
xsize = os.path.getsize(fstring)
import timeit
start = timeit.default_timer()
wait = 0
ysize = 0
from VideoParser import VideoParser as VP
while wait <= 5:
up(int(status(hf.size(os.path.getsize(fstring), tsys))),
'Downloading File', '{} of {}'.format(hf.size(os.path.getsize(fstring),
hf.traditional),
size))
ysize = os.path.getsize(fstring) - xsize
wait = timeit.default_timer() - start
spd = (ysize / wait) / float(hf.alternative[3][0])
log('SPEED %.2f M/s' % spd)
# lngth = 0
# from multiprocessing.pool import ThreadPool
# p = ThreadPool(1)
# l = p.apply_async(VP().getVideoLength,(fstring,))
# while lngth == 0:
# lngth = l.get()
# log('VP Length %s' % lngth)
factor = b * (((size / 1420) * 2) / spd) if ysize != 0 else b
log('FACTOR %s' % factor)
factor = factor if factor <= 100 else 90
while status(hf.size(os.path.getsize(fstring),
tsys)) <= factor: # ((float(size)/5)/size)*.6*100:# while status(hf.size(os.path.getsize(fstring), tsys)) <= b:
up(int(status(hf.size(os.path.getsize(fstring), tsys))),
'Downloading File', '{} of {}'.format(hf.size(os.path.getsize(fstring),
hf.traditional),
size))
if dialog.iscanceled():
cancel = 1
break
log('Cancel: %s' % cancel)
if not cancel:
dialog.close()
plugin.set_resolved_url(fstring)
def get_meta():
dialog = xbmcgui.Dialog()
showcache = plugin.get_storage('showcache')
optionlist = ['tvshow', 'movie', 'Storage', 'none']
storagelist = []
try:
for show in showcache:
plugin.log.info(showcache)
storagelist = [x for x in showcache if x != 'last']
plugin.log.info(storagelist)
except Exception, e:
plugin.log.info('ERROR %s' % e)
imdb = ''
tvdb = ''
tmdb = ''
headers = {'Content-Type': 'application/json',
'trakt-api-version': '2',
'trakt-api-key': '05bcd2c0baf2685b8c196162d099e539033c21f7aa9fe1f87b234c2d62c2c1e4'}
index = dialog.select('Choose Video Type', optionlist)
stype = optionlist[index]
search_meta = []
option_list = []
if index == 3: return {}
plugin.log.info('INDEX: %s' % index)
if index == 0 or index == 2:
if stype == 'tvshow':
keyboard = xbmc.Keyboard('', 'Enter a Title', False)
keyboard.doModal()
if keyboard.isConfirmed():
title = keyboard.getText()
results = \
requests.get('https://api-v2launch.trakt.tv/search?query=%s&type=show'
% title, headers=headers).json()
# results = api.get_matching_shows(title)
for item in results:
option = {
'tvdb_id': item['show']['ids']['tvdb'],
'title': item['show']['title'],
'imdb_id': item['show']['ids']['imdb'],
'trakt_id': item['show']['ids']['trakt'],
'year': item['show']['year']
}
search_meta.append(option)
for option in search_meta:
disptitle = option['title'] + ' (' + str(option['year']) + ')'
option_list.append(disptitle)
index = dialog.select('Choose', option_list)
Show = search_meta[index]
shownep = api.get_show_and_episodes(Show['tvdb_id'])
showcache[str(Show['title'])] = {'title': Show['title'],
'data': [shownep[0].__dict__, [x.__dict__ for x in
shownep[1]]],
'day': datetime.datetime.today().toordinal()}
showcache['last'] = showcache[str(Show['title'])]
elif stype == 'Storage':
# xbmc.sleep(200)
# showcache.sync()
today = datetime.datetime.today().toordinal()
index = dialog.select('Stored Meta', storagelist)
sdata = showcache[storagelist[index]]
showcache['last'] = sdata
data = sdata['data']
if today - sdata['day'] <= 5:
shownep = [SEP(**data[0]), [SEP(**x) for x in data[1]]]
else:
shownep = api.get_show_and_episodes(data[0]['id'])
showcache[storagelist[index]]['data'] = [shownep[0].__dict__, [x.__dict__ for x in shownep[1]]]
plugin.log.info('STORAGE FOUND')
stype = 'tvshow'
Show = {'title': shownep[0].name, 'tvdb_id': shownep[0].id,
'imdb_id': shownep[0].imdb_id}
option2 = '-1'
season_list = []
for item in shownep[1]:
if option2 != item.season_number:
option2 = item.season_number
ep_list = []
for item2 in shownep[1]:
if item2.season_number == option2:
ep_list.append(item2)
start_ep = ep_list[0].absolute_number
end_ep = ep_list[-1].absolute_number
season_list.append('Season %s Episodes (%s - %s)'
% (option2, start_ep, end_ep))
index = dialog.select('Choose Season', season_list)
season = re.search('Season (.+?) Episodes',
season_list[index]).group(1)
episode_list = [[], []]
plugin.log.info('SEASON' + season)
for item in shownep[1]:
if item.season_number == season:
disptitle = '%sx%s (%s) %s' % (item.season_number,
item.episode_number, item.absolute_number,
item.name)
episode_list[0].append(disptitle)
episode_list[1].append(item)
index = dialog.select('Choose Episode', episode_list[0])
episode = episode_list[1][index]
showcache['last']['index'] = showcache['last']['data'][1].index(episode.__dict__)
# keyboard = xbmc.Keyboard('','Enter a Season',False)
# keyboard.doModal()
# if keyboard.isConfirmed(): season = keyboard.getText()
# keyboard = xbmc.Keyboard('','Enter an Episode',False)
# keyboard.doModal()
# if keyboard.isConfirmed(): episode = keyboard.getText()
# episode = shownep[1][episode]api.get_episode_by_season_ep(Show['tvdb_id'],season,episode)
try:
infoLabels = transform_ep_object(episode)
except Exception, e:
log(e)
infoLabels['TVShowTitle'] = Show['title']
imdb = Show['imdb_id']
tvdb = Show['tvdb_id']
img = infoLabels['cover_url']
infoLabels['backdrop_url'] = shownep[0].fanart_url
plugin.log.info('INFO Labels \t %s' % infoLabels)
elif stype == 'movie':
title = plugin.keyboard(heading='Enter a Title')
results = \
requests.get('https://api-v2launch.trakt.tv/search?query=%s&type=movie'
% title, headers=headers).json()
plugin.log.info('Results %s' % results)
for option in results:
disptitle = '%s (%s)' % (option['movie']['title'],
option['movie']['year'])
option_list.append(disptitle)
dialog = xbmcgui.Dialog()
index = dialog.select('Choose', option_list)
Movie = results[index]['movie']
plugin.log.info('Movie: %s' % Movie)
infoLabels = {'cover_url': Movie['images']['poster']['medium'], 'plot': Movie['overview'],
'backdrop_url': Movie['images']['fanart']['full'], 'year': Movie['year'], 'title': Movie['title']}
# if stype == 'tvshow':
# api_url = 'https://api-v2launch.trakt.tv/search?id_type=trakt-show&id=%s' % (Show['trakt_id'])
# request = requests.get(api_url, headers=headers)
# plugin.log.info('TRAKT JSON %s' % request.json())
# trakt_meta = request.json()[0]['show']
# plugin.log.info("Trakt_meta %s" % trakt_meta)
# infoLabels['TVShowTitle'] = trakt_meta['title']
# infoLabels['backdrop_url'] = trakt_meta['images']['fanart']['full']
plugin.log.info('infoLabels: %s' % infoLabels)
latest = infoLabels
latest['latest'] = 'latest'
table.delete(latest='latest')
table.upsert(latest, ['latest'])
return infoLabels
def transform_ep_object(episode):
meta = {'episode_id': episode.id, 'plot': api.check(episode.overview)}
if episode.guest_stars:
guest_stars = episode.guest_stars
if guest_stars.startswith('|'):
guest_stars = guest_stars[1:-1]
guest_stars = guest_stars.replace('|', ', ')
meta['plot'] = meta['plot'] + 'Guest Starring: ' \
+ guest_stars
meta['rating'] = float(api.check(episode.rating, 0))
meta['premiered'] = api.check(episode.first_aired)
meta['title'] = api.check(episode.name)
meta['poster'] = api.check(episode.image)
meta['director'] = api.check(episode.director)
meta['writer'] = api.check(episode.writer)
meta['season'] = int(api.check(episode.season_number, 0))
meta['episode'] = int(api.check(episode.episode_number, 0))
meta['cover_url'] = api.check(episode.image)
return meta
@plugin.route('/delete_file/<name>/<all_files>')
def delete_file(name, all_files=False):
plugin.log.info('NAME ' + name)
tmp_files = glob.glob(tmp_path)
dl_files = glob.glob(dl_path)
import shutil
if str(all_files) == 'True':
try:
for file in dl_files:
log('Deleting %s ...' % file)
try:
shutil.rmtree(file)
except Exception, e:
os.remove(file)
log('DELETE ALL FILES ERROR: %s' % e)
continue
except Exception, e:
log('DELETE ALL FILES ERROR: %s' % e)
pass
try:
for file in tmp_files:
shutil.rmtree(file)
except:
pass
tmpName = re.sub(r'[\W_]+', '', name)
tmpName = tmpName.lower()
plugin.log.info('Temp Name is' + tmpName)
try:
for filename in tmp_files:
plugin.log.info('Filepath is ' + filename)
if tmpName in filename.lower():
os.remove(filename)
except:
pass
try:
for filename in dl_files:
if tmpName in re.sub(r'[\W_]+', '', filename.lower()):
os.remove(filename)
except:
pass
@plugin.route('/webpages/search_ix/<query>/<page>')
def search_ix(
query='**just_search**',
page='0',
id=-1,
labs=None,
):
if labs is None:
labs = {}
page = int(page)
items = []
ix_url = 'http://ixirc.com/api/'
if query == '**just_search**':
query = plugin.keyboard()
results = requests.get(ix_url + '?q=%s&pn=%s' % (query,
page)).json()
total_pages = results['pc']
plugin.log.info('RESULTS %s', results)
results = results['results']
idx = 0
tsys = copy.copy(hf.traditional)
tsys = [(tsys[-3][0], '')]
for item in results:
try:
size = item['szf']
rsize = [float(size[:-3]) * x[0] for x in hf.alternative if x[1] == size[-3:]][0]
log('Size %s' % rsize)
items.append({
'label': item['name'] + ' || Size : %s' % item['szf'],
'info': {'title': item['name'],
'plot': 'Size: %s Network: %s Channel: %s Bot: %s' % (
item['szf'], item['nname'], item['cname'], item['uname'])},
'path': plugin.url_for(
'stream',
download=hf.size(rsize, tsys).replace(' MB', ''),
server=item['naddr'],
channel=item['cname'],
bot=item['uname'],
packetId=item['n'],
filename=item['name'],
),
'is_playable': True,
'context_menu': [('Assign Metadata',
actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=query,
page=page,
from_XG='IX',
name=False,
bot=False,
cache=False,
))), ('Just Download',
actions.background(plugin.url_for(
'stream',
download=True,
server=item['naddr'],
channel=item['cname'],
bot=item['uname'],
packetId=item['n'],
filename=item['name'],
))), ('Delete File',
actions.background(plugin.url_for('delete_file'
, name=item['name'], all_files=False))),
('Delete All Files',
actions.background(plugin.url_for('delete_file'
, name=item['name'], all_files=True)))],
})
except:
continue
try:
if str(idx) == str(id):
plugin.log.info('SUCCESS')
items[idx]['info'] = labs
items[idx]['thumbnail'] = labs['cover_url']
items[idx]['properties'] = \
{'Fanart_Image': labs['backdrop_url']}
except:
pass
plugin.log.info('IDX INFO %s' % items[idx]['info'])
idx += 1
if page < total_pages:
items.append({'label': 'Next Page >>',
'path': plugin.url_for('search_ix', query=query,
page=str(page + 1))})
return items
@plugin.route('/just_download/<url>/<data>')
def just_download(url, data=None):
if data is None:
data = {}
if str(data) != 'False':
headers['Content-Type'] = 'application/json'
r = requests.put(url, headers=headers, data=json.dumps(data))
else:
r = requests.post(url, headers=headers, data=data)
plugin.log.info('''URL %s
DATA %s
STATUS CODE %s
TEXT %s'''
% (r.url, data, r.status_code, r.text))
@plugin.route('/assign_metadata/<id>/<search_term>/<page>/<name>/<bot>/<from_XG>/<cache>'
)
def assign_metadata(
id,
search_term,
page,
name=False,
bot=False,
from_XG=False,
cache=False,
):
plugin.log.info('NAME %s \n BOT %s CACHE: %s' % (name, bot,
str(cache)))
if cache != 'nope':
meta_cache = plugin.get_storage('meta_cache')
if str(cache) == 'False':
labs = get_meta()
meta_cache = labs
# plugin.log.info('META_CACHE %s' % meta_cache)
elif cache == 'reapply':
labs = table.find_one(latest='latest')
log('META_CACHE: %s' % pp(labs))
elif cache == 'next' or cache == 'prev':
showcache = plugin.get_storage('showcache')
index = showcache['last']['index']
log('CURRENT EP INDEX %s' % index)
index = index + 1 if cache == 'next' else index - 1
episode = SEP(**showcache['last']['data'][1][index])
showcache['last']['index'] = index
try:
labs = transform_ep_object(episode)
except Exception, e:
log(e)
labs['TVShowTitle'] = showcache['last']['title']
labs['backdrop_url'] = showcache['last']['data'][0]['fanart_url']
elif cache != name:
labs = file_meta(cache)
if str(from_XG) == 'HI':
return hi10eps(show=search_term, url=name, labs=labs, id=id)
elif str(from_XG) == 'True':
plugin.log.info('GOING THROUGH XG')
return search(search_term=search_term, page=page, id=id,
labs=labs)
elif str(from_XG) == 'IX':
plugin.log.info('GOING THROUGH IX')
return search_ix(query=search_term, page=page, id=id, labs=labs)
elif str(name) != 'False':
plugin.log.info('GOING THROUGH LIST_PACKLIST')
return list_packlist(
name=name,
search_term=search_term,
bot=bot,
page=page,
labs=labs,
id=id,
cache='nope'
)
# @plugin.route('/enter_custom/')........
# def enter_custom():
# server = plugin.keyboard(heading='Enter server (Ex: irc.server.net)')
# channel = plugin.keyboard(heading = 'Enter channel (Ex: #channel)')
# bot = plugin.keyboard(heading = 'Enter bot name')
# packetId = plugin.keyboard(heading = 'Enter Packet Number')
# filename = plugin.keyboard(heading = 'Enter file name (Ex: Movie.mkv)')
# return stream(server=server,channel=channel,bot=bot,packetId=packetId,filename=filename)
@plugin.route('/haruhichan/<key>/<doMeta>/<filename>', name='haru')
@plugin.route('/haruhichan/<key>/')
def haruhichan(key='None', filename='', doMeta='F'):
url = 'http://intel.haruhichan.com/?s='
server = 'irc.rizon.net'
channel = 'intel'
items = []
if key == 'None':
key = plugin.keyboard(heading='Enter Search Term')
if doMeta == 'T':
labs = get_meta()
soup = BS(scraper.get(url + key).text)
results = soup.findAll(attrs={'class': re.compile('noselect')})
for pack in results:
p = pack.findAll('td')
bot = p[0].text
id = p[1].text
name = p[4].string
size = p[3].text
item = {'label': '%s || %s || %s' % (name, size, bot),
'path': plugin.url_for(
'stream',
download=False,
server=server,
channel=channel,
bot=bot,
packetId=id,
filename=name,
), 'context_menu': [('Assign Metadata',
actions.update_view(plugin.url_for('haru'
, doMeta='T', filename=name,
key=key))), ('Just Download',
actions.background(plugin.url_for(
'stream',
download=True,
server=server,
channel=channel,
bot=bot,
packetId=id,
filename=name,
))), ('Delete File',
actions.background(
plugin.url_for(
'delete_file',
name=name,
all_files=False))),
('Delete All Files',
actions.background(plugin.url_for('delete_file',
name=name, all_files=True)))]}
if name == filename:
item['info'] = labs
item['thumbnail'] = labs['cover_url']
item['properties'] = {'Fanart_Image': labs['backdrop_url']}
items.append(item)
return items
@plugin.route('/webpages/xweasel/<query>/<page>')
def xweasel(query='lala', page='1'):
# log('Table %s'% pp(list(table.all())))
# return
global network
lastsearch = plugin.get_storage('lastsearch')
log('PAGE %s QUERY %s' % (page, query))
page = int(page)
if query == 'lala':
query = plugin.keyboard(heading='Search', default=lastsearch['last'])
lastsearch['last'] = query
xrequest = plugin.get_storage('%s_%s' % (query, page), ttl=60)
if len(xrequest.keys()) == 0:
r1 = requests.get('http://www.xweasel.org/Search.php?Description=%s&Page=%s' % (query, page))
log("Request %s" % r1.status_code)
soup = BS(r1.text)
pages = len(soup.findAll('center')[-1].findChildren()) - 2
xrequest['pages'] = pages
results = soup.findAll('tr', attrs={'class': re.compile('row')})
log('RESULTS %s' % len(results))
if len(results) == 0: return
mtitle = (lambda x: re.findall(re.compile(r'(.*?[ .]\d{4})[ .a-zA-Z]*'),
re.sub(r'(\w*)([\\()\\](\b\w*)\S)', '', x))[0])
items = []
idx = 0
for item in results:
try:
i = list(eval(item['onmouseover'].replace('ShowToolTip', '')))
i = [x for x in i if x != '' and x != ' (Ready)' and x != ' (Full)' and x != ' (0/50)']
i = i[:-1]
filename, network, channel, bot, pack = i
except Exception, e:
log('ERROR: %s %s' % (e, list(eval(item['onmouseover'].replace('ShowToolTip', '')))))
try:
title = mtitle(filename)
title = title.replace('.', ' ')
except:
title = filename
network = 'irc.{}.net'.format(network)
log('NETWORK %s' % network)
log('Movie Item Title: %s' % title)
size = item.findAll('td')[1].text.replace(r' ', ' ')
speed = item.findAll('td')[4].text.replace(r' ', ' ')
log('Item Stats: Speed %s, Size %s' % (speed, size))
realsize = [float(size[:-3]) * x[0] for x in hf.alternative if x[1] == size[-3:]][0]
tsys = copy.copy(hf.traditional)
tsys = [(tsys[-3][0], '')]
mlabs = {}
if title != filename:
mlabs['Size'] = realsize
mlabs['Album'] = speed
mlabs['Artist'] = [bot]
mlabs['Genre'] = str(channel)
# mlabs['plot'] = '\n FILENAME {} \n CHANNEL {} \n BOT {} \n SPEED {} \n SIZE {}'.format(filename,channel,bot,speed,size)
# mlabs['Plot'] = str(filename + ' || Size: ' + size +' || Bot : ' + bot + ' || Speed: '+speed)
c = copy.copy(movie_meta(title))
c['plot'] += '\n {} \n CHANNEL {} \n BOT {} \n SPEED {} \n SIZE {}'.format(filename, channel, bot,
speed, size)
mlabs.update(c)
item = {
'label': str(filename + ' || Size: ' + size + ' || Bot : ' + bot + ' || Speed: ' + speed),
'path': plugin.url_for(
'stream',
download=hf.size(realsize, tsys).replace(' MB', ''),
server=network,
channel=channel,
bot=bot,
packetId=pack,
filename=filename,
),
'is_playable': True,
'context_menu': [('Assign Metadata',
actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=query,
page=page,
name=filename,
bot=bot,
from_XG=False,
cache=False,
))), ('Reapply Metadata',
actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=query,
page=page,
name=filename,
bot=bot,
from_XG=False,
cache=True,
))), ('Just Download',
actions.background(plugin.url_for(
'stream',
download=True,
server=network,
channel=channel,
bot=bot,
packetId=pack,
filename=filename,
))), ('Delete File',
actions.background(plugin.url_for('delete_file'
, name=filename,
all_files=False))),
('Delete All Files',
actions.background(plugin.url_for('delete_file'
, name=filename, all_files=True)))],
'info': mlabs if mlabs else '',
'thumbnail': mlabs['thumb'] if mlabs else '',
'properties': {'Fanart_Image': mlabs['backdrop_url']}
}
items.append(item)
try:
if str(idx) == str(id):
items[idx]['info'] = labs
items[idx]['thumbnail'] = labs['cover_url']
items[idx]['properties'] = \
{'Fanart_Image': labs['backdrop_url']}
except:
pass
log('ITEMS %s' % len(items))
idx += 1
xrequest['data'] = items
if page < xrequest['pages']:
xrequest['data'].append({'label': 'Next Page >>',
'path': plugin.url_for('xweasel', query=query,
page=str(page + 1))})
log('ITEMS %s' % len(xrequest['data']))
plugin.finish(items=xrequest['data'], sort_methods=['Size', 'Album', 'Genre', 'Artist'])
# @plugin.cached()
def movie_meta(title):
# cacheMovie = plugin.get_storage(title)
# if len(cacheMovie.keys()): return cacheMovie['labs']
sqtitle = table.find_one(stitle=title)
if sqtitle:
log('FUCK YEAH')
return sqtitle
headers = {'Content-Type': 'application/json',
'trakt-api-version': '2',
'trakt-api-key': '05bcd2c0baf2685b8c196162d099e539033c21f7aa9fe1f87b234c2d62c2c1e4'}
results = \
requests.get('https://api-v2launch.trakt.tv/search?query=%s&type=movie'
% title[:-5], headers=headers).json()
yr = title[-4:]
plugin.log.info('Results %s' % pp(results))
if len(results) == 0: return
Movie = results[0]['movie']
img_url = 'http://webservice.fanart.tv/v3/movies/%s?api_key=%s' % (Movie['ids']['imdb'], FA_api)
plugin.log.info('Movie: %s' % pp(Movie))
infoLabels = {}
img_dat = requests.get(img_url).json()
log('IMAGE DATA: %s' % pp(img_dat))
try:
infoLabels['poster'] = img_dat['movieposter'][0]['url']
except:
infoLabels['poster'] = ''
try:
infoLabels['cover_url'] = img_dat['movieposter'][0]['url']
except:
infoLabels['cover_url'] = ''
try:
infoLabels['plot'] = Movie['overview']
except:
infoLabels['plot'] = ''
try:
infoLabels['backdrop_url'] = img_dat['moviebackground'][0]['url']
except:
infoLabels['backdrop_url'] = ''
try:
infoLabels['year'] = Movie['year']
except:
infoLabels['year'] = ''
try:
infoLabels['title'] = Movie['title']
except:
infoLabels['title'] = ''
try:
infoLabels['thumb'] = img_dat['moviethumb'][0]['url']
except:
infoLabels['thumb'] = ''
try:
infoLabels['banner'] = img_dat['moviebanner'][0]['url']
except:
infoLabels['banner'] = ''
try:
infoLabels['fanart'] = img_dat['moviebackground'][0]['url']
except:
infoLabels['fanart'] = ''
try:
infoLabels['clearart'] = img_dat['hdmovieclearart'][0]['url']
except:
infoLabels['clearart'] = ''
try:
infoLabels['clearlogo'] = img_dat['hdmovieclearlogo'][0]['url']
except:
infoLabels['clearlogo'] = ''
# cacheMovie['labs'] = infoLabels
infoLabels['stitle'] = title
table.upsert(infoLabels, ['stitle'])
return infoLabels
@plugin.route('/hi10/', name='cloud10', options={'term': ''})
def hi10(term):
last = plugin.get_storage('lastsearch')
if not term:
term = plugin.keyboard(heading='Search', default=last['last'])
items = []
url = 'http://hi10anime.com/?s=%s' % term
u = requests.get(url)
log(u.status_code)
soup = BS(u.text)
results = soup.findAll(attrs={'class': 'entry-title'})
for r in results:
show = r.parent.find('a').text
link = r.a['href']
title = r.a.text
item = {
'label': title,
'path': plugin.url_for('hi10eps', url=link, show=show),
'info': {'TVShowTitle': show}
}
items.append(item)
return items
# @plugin.cached()
def hi_login(url):
log_url = 'https://hi10anime.com/wp-login.php'
hiuser = plugin.get_setting('hiusr', str)
hipwd = plugin.get_setting('hipwd', str)
data = {
'log': hiuser,
'pwd': hipwd
}
sess = scraper
s = sess.post(log_url, data=data)
log("Status: %s" % s.status_code)
return sess.get(url).text
@plugin.route('/hi10eps/<show>/<url>')
def hi10eps(show, url, id=None, labs=None):
soup = BS(hi_login(url))
bc = soup.findAll(attrs={'class': 'showLinksTable'})#soup.findAll(attrs={'class': 'postMakerTABLE'})
typ = 'column'
try:
eptest = bc[2].findAll(attrs={'class': 'postMakerTR'})[2:]
except Exception, ed:
# log(e.__dict__)
log('ERROR %s LINE: %s' % (ed, sys.exc_info()[2].tb_lineno))
eptest = soup.findAll('a', href=re.compile('mkv'))
typ = 'single'
try:
aid = soup.find('a', attrs={'title': 'AniDB'})
aid = aid['href'].split('aid=')[1]
except Exception, ed:
# log(e.__dict__)
log('ERROR %s LINE: %s' % (ed, sys.exc_info()[2].tb_lineno))
aid = ''
items = []
img = soup.find('p').img['src']
idx = 0
prev_link = ''
for e in eptest:
if typ == 'column':
link = e.find('a')['href']
link = 'https://' + link[link.find('hi10'):]
c = [x for x in e.contents if x != '\n']
episode = c[1].text.split('v')[0]
else:
link = e['href']
link = 'https://' + link[link.find('hi10'):]
episode = e.previous.previous
if link == prev_link:
continue
prev_link = link
try:
episode = int(episode)
info = gethimeta(episode, show, aid)
label = info['title']
except Exception, e:
log('ERROR %s LINE: %s' % (e, sys.exc_info()[2].tb_lineno))
try:
fname = link.rsplit('/')[-1][:-4]
log(fname)
info = file_meta(fname)
label = info['title']
except Exception, f:
log('ERROR %s LINE: %s' % (f, sys.exc_info()[2].tb_lineno))
label = link.rsplit('/')[-1][:-4]
info = {'TVShowTitle': show, 'cover_url': img, 'backdrop_url': img}
try:
if str(idx) == str(id) and labs:
info = labs
except Exception, e:
log('ERROR %s LINE: %s' % (e, sys.exc_info()[2].tb_lineno))
item = {
'label': label,
'path': link,
'info': info,
'thumbnail': info['cover_url'],
'context_menu': [('Assign Metadata',
actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=show,
page=False,
name=url,
bot=False,
from_XG='HI',
cache=False,
))), ('Reapply Metadata',
actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=show,
page=False,
name=url,
bot=False,
from_XG='HI',
cache='reapply',
))), ('Next Episode',
actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=show,
page=False,
name=url,
bot=False,
from_XG='HI',
cache='next',
))), ('Previous Episode',
actions.update_view(plugin.url_for(
'assign_metadata',
id=idx,
search_term=show,
page=False,
name=url,
bot=False,
from_XG='HI',
cache='prev',
)))],
'properties': {'Fanart_Image': info['backdrop_url']},
'info_type': 'video',
'is_playable': True}
idx += 1
log(pp(item))
items.append(item)
for i in items:
log(i['path'])
return items
def gethimeta(episode, show, aid=''):
shw = plugin.get_storage(show)
if 'anidb' not in shw.keys() and aid:
log('REQUESTING ANIDB DATA')
r = requests.get(
'http://api.anidb.net:9001/httpapi?request=anime&client=anidbtvdbmeta&clientver=1&protover=1&aid=%s' % aid)
log("Status %s\n" % r.status_code)
anitext = r.text
shw['anidb'] = anitext
else:
anitext = shw['anidb']
soup = BS(anitext)
year = soup.find('startdate').text[:4]
v = [x for x in soup.findAll('epno') if x.text == str(episode)][0]
if 'shownep' not in shw.keys():
title = ' '.join([show, year])
log(title)
id = api.get_matching_shows(show)
log(id)
shw['id'] = id[0][0]
e = api.get_show_and_episodes(shw['id'])
shw['shownep'] = [e[0].__dict__, [i.__dict__ for i in e[1]]]
else:
e = [SEP(**shw['shownep'][0]), [SEP(**i) for i in shw['shownep'][1]]]
airdate = api.convert_date(v.parent.airdate.text)
ep = [i for i in e[1] if
2 >= (lambda x: x.days)(
(airdate - api.convert_date(i.first_aired if i.first_aired else '1963-01-01'))) >= -2][0]
try:
info = transform_ep_object(ep)
except Exception, e:
log(e)
info['TVShowTitle'] = e[0].name
info['backdrop_url'] = e[0].fanart_url
return info
if __name__ == '__main__':
plugin.run()
| gpl-2.0 | 2,370,805,098,750,441,500 | 39.784108 | 141 | 0.451372 | false |
alkurbatov/squirrel | src/convert.py | 1 | 1073 | # This file is a part of Squirrel project
#
# Copyright (C) 2014, Alexander Kurbatov <[email protected]>
#
# Squirrel is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Squirrel is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
def to_seconds(time):
s = 0
d = re.search(r"(?i)([0-9]+)d", time)
if d:
s += int(d.group(1)) * 24 * 60 * 60
h = re.search(r"(?i)([0-9]+)h", time)
if h:
s += int(h.group(1)) * 60 * 60
m = re.search(r"(?i)([0-9]+)m", time)
if m:
s += int(m.group(1)) * 60
return s
| gpl-3.0 | 9,016,646,689,706,981,000 | 28.805556 | 70 | 0.657036 | false |
dyermd/legos | scripts/QC/merger.py | 1 | 14082 | #! /usr/bin/env python
# Goal: Merge and run coverage analysis on the two Samples generated.
# Output: A mered bam file, and coverage analysis on the merged bam file.
from optparse import OptionParser
import os
import os.path
import sys
import re
import datetime
import json
from QC_Run import QC_Run
from tools import *
class Merger:
# @param bams_to_merge a list of the bam files to merge together
# @param merged_dir the directory in which to place the merged bam file
# @param sample_name the name of the sample. Used for the SM tag
# @param cleanup Flag to delete the temporary files or not. Default: false
def __init__(self, sample_json=None):
if sample_json:
self.sample_json = json.load(open(sample_json))
self.merge_dir = ''
self.bams_to_merge = []
self.runs_to_merge = []
self.QC_Run = QC_Run(self.sample_json)
# merge the following runs
def merge(self):
# this could be just a temporary fix
if os.path.isfile(self.path_to_merged_bam):
print "%s already exists. Not making it again."%self.path_to_merged_bam
else:
print "Sample %s is merging the following runs: %s"%(self.sample_name, self.bams_to_merge)
merge_command = "java -jar /opt/picard/picard-tools-current/MergeSamFiles.jar "
# Add each run's bam file to mergeJob.sh
for bam in self.bams_to_merge:
if not os.path.isfile(bam) or bam[-4:] != ".bam":
print "ERROR: the bam file '%s' does not exist!"%bam
sys.exit(4)
merge_command += "INPUT=%s "%bam
# make sure the merged_dir exists, or make it.
runCommandLine("mkdir -p %s"%self.merged_dir)
#if not os.path.isdir(merged_dir):
#print "ERROR: the output dir '%s' does not exist!"%bam
#sys.exit(4)
# Now set the output file, and then run the merge command
merge_command += " OUTPUT=%s/merged_badHeader.bam "%self.merged_dir
if runCommandLine(merge_command) != 0:
print "ERROR: %s something went wrong with merging!"%self.sample_name
sys.exit(1)
#echo "fixing header for %s/merged_badHeader.bam"
correct_header_command = "samtools view -H %s/merged_badHeader.bam > %s/merged.header.sam "%(self.merged_dir, self.merged_dir)
if runCommandLine(correct_header_command) != 0:
print "ERROR: samtools view -H failed!"
sys.exit(1)
# A better way would be to check to see if the SM tags already match. Then we would be able to use ionstats and such.
SM_check_command = "grep -Eo 'SM:[a-zA-Z0-9_&/-]*"
# NEED TO TEST THIS COMMAND. Is there anything that comes before the next : that is important?
# Change the SM: tag so that it matches for every run merged. (There should be one SM tag for each run merged)
# This was the old command. We will keep using this, and then if there are problems, we can manually correct them.
sed_command = 'sed "s/SM:[a-zA-Z0-9_&/-]*/SM:%s/" %s/merged.header.sam > %s/merged.headerCorrected.sam'%(self.sample_name, self.merged_dir, self.merged_dir)
# this updated command will change the SM tag to match everything up to the next : after the SM tag.
# this command deletes the KS: tag!! not good! I don't know why but some headers are tab delimited, and some are not it seems.
#sed_command = 'sed -E "s/SM:[^:]*:/SM:%s:/" %s/merged.header.sam > %s/merged.headerCorrected.sam'%(self.sample_name, self.merged_dir, self.merged_dir)
if runCommandLine(sed_command) != 0:
print "ERROR: sed command failed!"
sys.exit(1)
# write the new header to merged.bam
reheader_command = "samtools reheader %s/merged.headerCorrected.sam %s/merged_badHeader.bam > %s "%(self.merged_dir, self.merged_dir, self.path_to_merged_bam)
if runCommandLine(reheader_command) != 0:
print "ERROR: sed command failed!"
sys.exit(1)
# set some extra variables for the JSON file.
self.merged_json = "%s/merged.json"%self.merged_dir
# if there is already an index file from a previous merge try, delete it.
if os.path.isfile(self.path_to_merged_bam + ".bai"):
os.remove(self.path_to_merged_bam + ".bai")
# IF specified, cleanup the temporary files
#if self.cleanup:
# Need to cleanup here inorder for TVC to work. there can only be one bam file in the merged dir.
os.remove("%s/merged_badHeader.bam"%self.merged_dir)
os.remove("%s/merged.headerCorrected.sam"%self.merged_dir)
os.remove("%s/merged.header.sam"%self.merged_dir)
print "%s finished merging "%self.merged_dir
# Update the final merged run status
def update_merged_run_status(self, run, merged_perc_avail_bases=0):
pass_fail_merged_status = 'pass'
run_json = json.load(open(run))
if run_json['run_type'] == 'germline':
merged_perc_aval_bases = run_json['run_data']['amp_cov']
print merged_perc_avail_bases, self.sample_json['analysis']['settings']['cutoffs']['merged_amp_cov']
# check to see if >90% of the bases are shared between the tumor normal comparison
if 'merged_amp_cov' in self.sample_json['analysis']['settings']['cutoffs'] and merged_perc_avail_bases != '':
if merged_perc_avail_bases < self.sample_json['analysis']['settings']['cutoffs']['merged_amp_cov']:
pass_fail_merged_status = 'REQUEUE'
# write the final statuses here
run_json['pass_fail_merged_status'] = pass_fail_merged_status
run_json['merged_perc_avail_bases'] = merged_perc_avail_bases
write_json(run, run_json)
# @param runs the runs of a sample
# @param run_name either '', 'Normal/' or 'Tumor/'
# @param pref the prefix of this type of merge. either 'normal_' 'tumor_' or ''
# @returns a list of the passing bam files to merge, and the path to the merged dir.
def check_merge(self, runs, run_name='', pref=''):
# vars to return
merge = False
self.bams_to_merge = []
self.runs_to_merge = []
# Use this count so that we won't have to write over past merges if there are multiple merges.
if 'merged_%scount'%pref not in self.sample_json:
self.sample_json['merged_%scount'%pref] = 0
# first check to see if all of the runs pass.
# Get all of the passing bam files for this sample.
pending_runs, passing_runs = self.QC_Run.get_runs_status(runs)
if len(pending_runs) != 0:
print "Not merging. After QC_runs, runs should either be 'pass' or 'fail', not 'pending'. Pending runs: ", pending_runs
elif len(passing_runs) < 1:
# if none of the runs are passing, then don't do anything.
pass
elif self.sample_json['sample_status'] != "pending_merge" and self.sample_json['sample_status'] != "merged":
# If any runs of the sample are not ready to be merged either because of 3x3 table error rate questions or other reasons, don't merge this sample.
print "%s the 'sample_status' is '%s'. Needs to be 'pending_merge' to merge the runs."%(self.sample_json['sample_name'], self.sample_json['sample_status'])
elif self.sample_json['sample_status'] == 'pending_merge':
# Merge these runs.
# First get the passing bams from the passing runs.
for run in passing_runs:
run_json = json.load(open(run))
self.bams_to_merge.append("%s/%s"%(run_json['run_folder'], run_json['analysis']['files'][0]))
self.runs_to_merge.append(run_json['run_name'])
# sort the run names
self.runs_to_merge.sort()
# If this sample has already been merged: If the runs to generate the merged bam don't match the current list:
# then delete the last created bam file and merge these runs
# else don't remerge these files
if len(self.bams_to_merge) == 1:
# There is only one run, so don't merge it. Set the "final_%sjson"%pref flag to show what the final run is
self.sample_json["final_%sjson"%pref] = run
# use the 'merged_json' flag rather than the 'final_json' flag because 'final_json' can be set by a single non-merged run.
elif 'merged_%sjson'%pref in self.sample_json and os.path.isfile(self.sample_json['merged_%sjson'%pref]):
merged_json_data = json.load(open(self.sample_json['merged_%sjson'%pref]))
# If the runs used to generate the current merged.bam file dont match the current bams_to_merge, then merge them. Otherwise don't
if merged_json_data['json_type'] == 'merged' and set(self.bams_to_merge) != set(merged_json_data['bams_used_to_merge']):
# in order to manage space, delete the last merged folder that was created.
if self.sample_json['analysis']['settings']['cleanup'] == True:
# IDEA delete the entire folder? Or just the bam file?
merged_bam = "%s/%s"%(merged_json_data['run_folder'], merged_json_data['analysis']['files'][0])
print " Deleting the old merged bam file: %s"%merged_bam
os.remove(merged_bam)
# Add one to the merged_count
self.sample_json['merged_%scount'%pref] += 1
# set new path to the merged_json
self.merged_dir = "%s/%sMerged_%d"%(self.sample_json['sample_folder'], run_name, self.sample_json['merged_%scount'%pref])
merge = True
else:
# Don't merge these runs because they've already been merged.
print "%s the runs: '%s' have already been merged"%(self.sample_json['sample_name'], self.bams_to_merge)
else:
# Merge these runs
self.merged_dir = "%s/%sMerged"%(self.sample_json['sample_folder'], run_name)
# Add one to the merged_count
self.sample_json['merged_%scount'%pref] += 1
merge = True
return merge
# merge the runs of a sample
# @param runs the bam files to merge
# @param merged_dir the ouptut_dir in which to place the merged bam file
# @param pref the prefix (either '', 'normal_', or 'tumor')
# @param run_type either germline, normal, or tumor.
# @param run_name either Merged, Normal_Merged or Tumor_Merged. Used for the titles of the 3x3 tables.
def merge_runs(self, run_type, run_name='', pref=''):
# if the file already exists, then merging must have finished, and don't merge again.
self.merged_json = "%s/merged.json"%self.merged_dir
if os.path.isfile(self.merged_json):
print "%s already exists so not merging the bam files again"%self.merged_json
else:
self.sample_name = self.sample_json['sample_name']
# get today's date to format the mreged.bam file name
curr_date = datetime.date.today()
# the name follows this format: A_227_Tumor_Merged_02152015
run_name = "%s_%sMerged_%02d%02d%s"%(self.sample_name, run_name, curr_date.month, curr_date.day, curr_date.year)
merged_bam = "%s.bam"%(run_name)
self.path_to_merged_bam = "%s/%s"%(self.merged_dir, merged_bam)
self.merge()
# now set the json files
# create the merged_bam's json file here so that the merger.py script can run on its own if necessary.
merged_json = {
'analysis': {
'files': [merged_bam]
},
'bams_used_to_merge':self.bams_to_merge,
'sample_name': self.sample_name,
'merged_bam': self.path_to_merged_bam,
'json_file': self.merged_json,
"json_type": "merged",
"pass_fail_status": "pending",
"project": self.sample_json['project'],
"run_folder": self.merged_dir,
"run_name": run_name,
"run_num": self.sample_json['merged_%scount'%pref],
"run_type": run_type,
"runs_used_to_merge": ', '.join(self.runs_to_merge),
"sample": self.sample_json['sample_name'],
"sample_folder": self.sample_json['sample_folder'],
"sample_json": self.sample_json['json_file']
}
#write new json file
write_json(self.merged_json, merged_json)
# QC the merged run.
self.QC_Run.runTVC_COV(self.merged_json, pref)
self.QC_Run.getRunInfo(self.merged_json, pref)
# Update the merge pass/fail status based on the metrics gathered by QC_getRunInfo.sh
self.QC_Run.update_run_status(self.merged_json, 1)
# Also store the path to this merged bam file in the sample's json file. Not really necessary, but it seems like a good idea.
#if 'merged' not in self.sample_json['analysis']['files']:
# self.sample_json['analysis']['files']['merged'] = {}
#self.sample_json['analysis']['files']['merged']['%sbam'%pref] = merger.path_to_merged_bam
# store the path to this merged bam folder in the sample's json file.
#self.sample_json['merged_%sjson'%pref] = merged_dir
# If the merge_json passes the cutoffs, set it as the final_json
merge_json = json.load(open(self.merged_json))
# add the path to this merge even if it doesn't pass
self.sample_json["merged_%sjson"%pref] = self.merged_json
if merge_json['pass_fail_status'] == 'pass':
# Add a path to the final merged_json
self.sample_json["final_%sjson"%pref] = self.merged_json
# write the modified sample_json file
write_json(self.sample_json['json_file'], self.sample_json)
# If we need this script to run on its own, update it when it is needed
#if __name__ == '__main__':
#
# # set up the option parser
# parser = OptionParser()
#
# # add the options to parse
# parser.add_option('-j', '--json', dest='json', help='The samples json file. Will be used to get the passing bams.')
# parser.add_option('-o', '--merged_dir', dest='output', help='The output file. If no output file is specified, output will be written to the screen')
# parser.add_option('-s', '--sample_name', dest='sample', help='The name of the sample. Will be used to fix the SM tag of the merged BAM file')
# parser.add_option('-b', '--bams', dest='bams', action='append', help='Use a -b for for each bam to include in merging')
# parser.add_option('-c', '--cleanup', dest='cleanup', action='store_true', help='option to cleanup the temporary files used in merging and such.')
#
# (options, args) = parser.parse_args()
#
# if options.json and (not options.output and not options.sample and not options.bams):
# Merger(options.json)
# # I don't have time to implement these other options yet...
# #elif not options.json and (options.output and options.sample and options.bams):
## merger = Merger()
## merger.merge()
## Merger(options.bams, options.output, options.sample)
# else:
# print "USAGE_ERROR: -j or (-o, -s and -b) are required. If the json file is provided, do not provide the other options. If the other options are provided, do not provide a json file."
# print "only -j is implemented so far..."
# parser.print_help()
# sys.exit(1)
#
| gpl-2.0 | 7,565,517,798,160,435,000 | 48.066202 | 186 | 0.684704 | false |
taoistly/dec | dec.py | 1 | 11798 | import shutil
import os
import time
import suffixtree
def scan_kmer(read):
global K
offset, seq1, seq2 = read.split("\t")
ret = []
for base in seq1:
if base not in "ACGTN": return ret
for base in seq2:
if base not in "ACGTN": return ret
for idx in range(len(seq1) - K + 1):
ret += [(seq1[idx:idx + K], (offset, idx + 1))]
for idx in range(len(seq2) - K + 1):
ret += [(seq2[idx:idx + K], (offset, -idx - 1))]
return filter(lambda x: "N" not in x[0], ret)
def align_by_anchor(kv):
global ref_file_path, K, MATCH_RATE, JUDGE_THRESHOLD
class anchored_read(object):
__slots__ = ('id', 'kmeridx', 'reversed', 'offset1', 'offset2', 'seq1', 'seq2')
def __init__(self, id, kmeridx, readFile1, readFile2):
self.id = int(id)
self.kmeridx = kmeridx
self.reversed = False
readFile1.seek(self.id)
readFile2.seek(self.id)
self.seq1 = readFile1.readline().strip()
self.seq2 = readFile2.readline().strip()
def reverse(self):
self.reversed = not self.reversed
self.kmeridx = -self.kmeridx
self.seq1, self.seq2 = self.seq2, self.seq1
def match_one_seq(self, seq1, seq2, offset, heuristic):
""" to compute match score for same end with given offset
seq1 and seq2 are one end of reads to compare,
offset==3 means seq1[0] match seq2[3] (r2[offset])
0123---
0123456
"""
if offset < 0:
seq1, seq2 = seq2, seq1
offset = -offset
overlap = min(len(seq1), len(seq2) - offset)
match_count = 0
for i in range(overlap):
match_count += (seq1[i] == seq2[i + offset])
if heuristic and i == 4 and match_count < 4: return
if float(match_count) / overlap < MATCH_RATE or match_count < K: return
return match_count - (overlap - match_count) # * (overlap - match_count)
def match(self, read):
""" "read" is the r1 on top, kmer locates at "a";
"self" is the r2(anchor) on bottom, kmer locates at "b";
"""
x = self.kmeridx - read.kmeridx
anchored_score = self.match_one_seq(read.seq1, self.seq1, x, False)
if not anchored_score: return
best_match = None
for y in range(-len(read.seq2) + 1, len(self.seq2)):
match_result = self.match_one_seq(read.seq2, self.seq2, y, True)
if not match_result: continue
score = anchored_score + match_result # - 0.05 * (x - y) * (x - y)
if not best_match or best_match[0] < score:
best_match = (score, x, y)
return best_match
class anchor_aligment(list):
def __init__(self, read):
super(anchor_aligment, self).__init__()
self.anchor(read, 0, 0)
def anchor(self, read, left_offset, right_offset):
self += [read]
read.offset1, read.offset2 = (left_offset, right_offset)
def match(self, read):
return self[0].match(read)
def report_doubt(self, kmer, col_id):
doubts = []
seq1_left, seq1_right = 0, len(self[0].seq1)
seq2_left, seq2_right = 0, len(self[0].seq2)
for read in self:
seq1_left = min(seq1_left, read.offset1)
seq1_right = max(seq1_right, read.offset1 + len(read.seq1))
seq2_left = min(seq2_left, read.offset2)
seq2_right = max(seq2_right, read.offset2 + len(read.seq2))
for col_idx in range(seq1_left, seq1_right):
basecount = {i: 0 for i in "ACGTN"}
for read in self:
if 0 <= col_idx - read.offset1 < len(read.seq1):
basecount[read.seq1[col_idx - read.offset1]] += 1
if basecount.values().count(0) == 4: continue
for read in self:
if 0 <= col_idx - read.offset1 < len(read.seq1) and basecount[read.seq1[col_idx - read.offset1]] <= JUDGE_THRESHOLD:
doubts += [(kmer + str(col_id),
(read.seq1[col_idx - read.offset1],
(read.id + col_idx - read.offset1) * [1, -1][read.reversed],
)
)]
for i in basecount:
if basecount[i] > JUDGE_THRESHOLD:
doubts += [(kmer + str(col_id), (i.lower(), basecount[i]))]
col_id += 1
for col_idx in range(seq2_left, seq2_right):
basecount = {i: 0 for i in "ACGTN"}
for read in self:
if 0 <= col_idx - read.offset2 < len(read.seq2):
basecount[read.seq2[col_idx - read.offset2]] += 1
if basecount.values().count(0) == 4: continue
for read in self:
if 0 <= col_idx - read.offset2 < len(read.seq2) and basecount[read.seq2[col_idx - read.offset2]] <= JUDGE_THRESHOLD:
doubts += [(kmer + str(col_id),
(read.seq2[col_idx - read.offset2],
(read.id + col_idx - read.offset2) * [-1, 1][read.reversed],
)
)]
for i in basecount:
if basecount[i] > JUDGE_THRESHOLD:
doubts += [(kmer + str(col_id), (i.lower(), basecount[i]))]
col_id += 1
return doubts, col_id
def print_pileup(self):
pileup = ""
seq1_left, seq1_right = 0, len(self[0].seq1)
seq2_left, seq2_right = 0, len(self[0].seq2)
for read in self:
seq1_left = min(seq1_left, read.offset1)
seq1_right = max(seq1_right, read.offset1 + len(read.seq1))
seq2_left = min(seq2_left, read.offset2)
seq2_right = max(seq2_right, read.offset2 + len(read.seq2))
for read in self:
pileup += str(read.id).center(10) + "." * (read.offset1 - seq1_left)
pileup += read.seq1 + "." * (seq1_right - read.offset1 - len(read.seq1))
pileup += "..." + "." * (read.offset2 - seq2_left) + read.seq2
pileup += "." * (seq2_right - read.offset2 - len(read.seq2)) + "\n"
return pileup
# load reads from disk
readFile1 = open(ref_file_path[0])
readFile2 = open(ref_file_path[1])
Ast = suffixtree.SuffixTree()
Ust = suffixtree.SuffixTree()
alignment_group = []
for value in kv[1]:
read = anchored_read(value[0], value[1], readFile1, readFile2)
best, bestidx = None, -1
if read.kmeridx < 0: read.reverse()
Avote = Ast.tid_vote(read.seq1)
Uvote = Ust.tid_vote(read.seq2)
vote = [(tid, Avote[tid], Uvote[tid]) for tid in range(len(alignment_group))]
if vote:
bestidx, Abest, Ubest = max(vote, key=lambda x: x[1][1] + x[2][1])
if read.match_one_seq(read.seq1, alignment_group[bestidx][0].seq1, Abest[0], False) and \
read.match_one_seq(read.seq2, alignment_group[bestidx][0].seq2, Ubest[0], False):
best = (bestidx, Abest[0], Ubest[0], read.reversed)
# for i, alignment in enumerate(alignment_group):
# match_result = alignment.match(read)
# if match_result and (not best or best[0] < match_result[0]):
# best, bestidx = match_result, i
if not best:
Ast.append(read.seq1, len(alignment_group))
Ust.append(read.seq2, len(alignment_group))
alignment_group += [anchor_aligment(read)]
else:
alignment_group[bestidx].anchor(read, best[1], best[2])
report, col = [], 0
log = "===%s===\n" % (kv[0])
for alignment in alignment_group:
doubts, col = alignment.report_doubt(kv[0], col)
report += doubts
log += alignment.print_pileup()
# logfile = open("/temp/refread/log" + kv[0], "w")
# logfile.write(log)
# logfile.close()
return report
def merge_in_partition(kv_iter):
ufset = {}
posdict = {}
basecount = {}
def find(k):
if ufset[k] != k: ufset[k] = find(ufset[k])
return ufset[k]
for v, k in kv_iter:
if v not in ufset:
ufset[v] = v
basecount[v] = [0, 0, 0, 0]
else:
v = find(v)
if k[0] == "a": basecount[v][0] += k[1]
elif k[0] == "c": basecount[v][1] += k[1]
elif k[0] == "g": basecount[v][2] += k[1]
elif k[0] == "t": basecount[v][3] += k[1]
elif k not in posdict:
posdict[k] = v
if k[0] == "A": basecount[v][0] += 1
if k[0] == "C": basecount[v][1] += 1
if k[0] == "G": basecount[v][2] += 1
if k[0] == "T": basecount[v][3] += 1
else:
u = find(posdict[k])
ufset[v] = u
basecount[u] = [basecount[u][i] + basecount[v][i] for i in range(4)]
for k in posdict:
u = find(posdict[k])
if k[0] == "A" and basecount[u][0] > JUDGE_THRESHOLD: continue
if k[0] == "C" and basecount[u][1] > JUDGE_THRESHOLD: continue
if k[0] == "G" and basecount[u][2] > JUDGE_THRESHOLD: continue
if k[0] == "T" and basecount[u][3] > JUDGE_THRESHOLD: continue
yield (u, k)
for v in ufset:
if ufset[v] == v:
count = basecount[v]
if count[0] > JUDGE_THRESHOLD: yield (v, ('a', count[0]))
if count[1] > JUDGE_THRESHOLD: yield (v, ('c', count[1]))
if count[2] > JUDGE_THRESHOLD: yield (v, ('g', count[2]))
if count[3] > JUDGE_THRESHOLD: yield (v, ('t', count[3]))
def judge(key_values):
values = list(key_values[1])
ACGT = {i: 0 for i in "ACGT"}
for value in values:
if value[0] != "N":
if value[0] in "ACGT": ACGT[value[0]] += 1
if value[0] in "acgt": ACGT[value[0].upper()] += value[1]
ref, refcnt = max(ACGT.items(), key=lambda x: x[1])
if refcnt <= JUDGE_THRESHOLD: return
for value in values:
if value[0] in "acgt" or ACGT[value[0]] > JUDGE_THRESHOLD and value[0] != 'N': continue
yield str(value[1]) + "\t" + ref
def run(sc, inputfile="/home/x/xieluyu/reads/lineno_seq1_seq2.txt", outputdir="/home/x/xieluyu/output"):
global P1, P2, P3, P4
if os.path.exists(outputdir): shutil.rmtree(outputdir)
start = time.time()
reads_file = sc.textFile(inputfile, 64)
P1 = reads_file.flatMap(scan_kmer)
P2 = P1.groupByKey(1024).filter(lambda kv: len(kv[1]) > 1).flatMap(align_by_anchor)
P3 = P2.mapPartitions(merge_in_partition)
num_partitions = P2.getNumPartitions()
while num_partitions != 1:
num_partitions = (num_partitions - 1) / 4 + 1
P3 = P3.partitionBy(num_partitions).mapPartitions(merge_in_partition)
P4 = P3.groupByKey().flatMap(judge)
P4.saveAsTextFile("file://" + outputdir)
end = time.time()
print "elapse:", end - start, "seconds"
# ref_file_path = ["/temp/refread/Phix1.fq", "/temp/refread/Phix2.fq"]
ref_file_path = ["/temp/refread/Ecoli1.fq", "/temp/refread/Ecoli2.fq"]
K = 14
MATCH_RATE = 0.9
JUDGE_THRESHOLD = 2
if __name__ == '__main__':
from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName("dec") # .setMaster("local[4]")
sc = SparkContext(conf=conf)
run(sc)
| mit | -735,112,777,300,059,300 | 41.746377 | 136 | 0.515765 | false |
guaq/paikkis | model.py | 1 | 3427 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
0 paikannimi
1 nimen kielikoodi
2 kielen nimi
3 paikkatyypin koodi
4 paikkatyypin selite
5 kkj/pkj pohjoinen
6 kkj/pkj itä
7 kkj/ykj pohjoinen
8 kkj/ykj itä
9 etrs/tm35fin pohjoinen
10 etrs/tm35fin itä
11 kuntakoodi
12 kunnan nimi
13 seutukuntakoodi
14 seutukunnan nimi
15 maakunnan koodi
16 maakunnan nimi
17 suuraluekoodi
18 suuralueen nimi
19 läänikoodi
20 läänin nimi
21 lehtijaon 5x5 tunnus
22 pelastuslehtijaon tunnus
23 etrs-tm35 -tunnus
24 nimen kielen virallisuuskoodi
25 nimen kielen virallisuusselite
26 nimen kielen enemmistöasemakoodi
27 nimen kielen enemmistöselitys
28 paikannimenlähdekoodi
29 paikannimen lähdeselitys
30 paikka-id
31 paikannimen id
Suomi-englanti:
http://www.google.fi/url?sa=t&rct=j&q=&esrc=s&source=web&cd=18&ved=0CEUQFjAHOAo&url=http%3A%2F%2Fwww.pohjois-karjala.fi%2Fdman%2FDocument.phx%2F~maakuntaliitto%2FJulkiset%2FEUFUND%2FHankesanasto%3FfolderId%3D~maakuntaliitto%252FJulkiset%252FEUFUND%26cmd%3Ddownload&ei=-RKIUISCGMKA4gS9roHYCg&usg=AFQjCNEqVl4XU868FwPn8C-_qlnozH81Vw&cad=rja
"""
from __future__ import print_function
import sys
import codecs
import sqlite3
from coordinates import Translate, COORD_TYPE_WGS84, COORD_TYPE_ETRSTM35FIN
o8 = codecs.getwriter('utf-8')(sys.stdout)
e8 = codecs.getwriter('utf-8')(sys.stderr)
# Input: dictionary with ['type'] is coordinate system type identifier
# ['N'] is coordinate Northing / Lat
# ['E'] in coordinate Easting / Lon
# type identifier of the coordinate system to transform the input
# coordinates to
# Output: dictionary with ['type'] is coordinate system type identifier
# ['N'] is coordinate Northing / Lat
# ['E'] in coordinate Easting / Lon
class Place(object):
def __init__(self, lst):
self.name = lst[0]
wgs84_coords = Translate({'type': COORD_TYPE_ETRSTM35FIN,
'N': float(lst[9]), 'E': float(lst[10])}, COORD_TYPE_WGS84)
self.lat = wgs84_coords['N']
self.lon = wgs84_coords['E']
self.type_id = lst[3]
self.municipality_id = lst[11]
self.sub_region_id = lst[13]
self.NUTS3_region_id = lst[15]
self.NUTS2_region_id = lst[17]
self.id = lst[30]
def __repr__(self):
return "<Place %s %s>" % (self.id, str(self))
def __str__(self):
return unicode(self).encode('ASCII', 'backslashreplace')
def __unicode__(self):
return u"{0}, {1}; {2}, {3}".format(self.name, self.municipality_id, self.lat, self.lon)
def insert_stmt(self):
return (u"INSERT INTO places (name, municipality_id, id, lat, lon, type_id, sub_region_id, NUTS2_region_id, NUTS3_region_id) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
(self.name,
self.municipality_id,
self.id,
self.lat,
self.lon,
self.type_id,
self.sub_region_id,
self.NUTS2_region_id,
self.NUTS3_region_id))
def insert_fts_stmt(self):
return (u"INSERT INTO places_fts (id, name) VALUES (?, ?)",
(self.id,
self.name))
| mit | -5,161,143,840,188,040,000 | 32.821782 | 338 | 0.605679 | false |
yusihao51/Minecraft | views.py | 1 | 24491 | # -*- coding: utf-8 -*-
# Imports, sorted alphabetically.
# Python packages
import os
import socket
import subprocess
import sys
import datetime
from math import sin, pi
# Third-party packages
import pyglet
from pyglet.text import Label
from pyglet.gl import *
# Modules from this project
import globals as G
from gui import frame_image, Rectangle, backdrop, Button, button_image, \
button_highlighted, ToggleButton, TextWidget, ScrollbarWidget, \
button_disabled, resize_button_image
from textures import TexturePackList
from utils import image_sprite, load_image
__all__ = (
'View', 'MainMenuView', 'OptionsView', 'ControlsView', 'TexturesView', 'MultiplayerView'
)
class Layout(object):
def __init__(self, x, y):
self.components = []
self._position = x, y
self.width, self.height = 0, 0
def add(self, component):
self.components.append(component)
def _set_component_position(self, component, x, y):
try:
component.position = x, y
except AttributeError:
try:
component.resize(x, y, component.width, component.height)
except AttributeError:
component.x, component.y = x, y
@property
def position(self):
return _position
@position.setter
def position(self, value):
self._position = value
class VerticalLayout(Layout):
def add(self, component):
self.components.append(component)
self.height += component.height + 10
self.width = max(component.width, self.width)
self._put_components()
def _put_components(self):
c_x, c_y = self._position[0], self._position[-1] + self.height
for component in self.components:
self._set_component_position(component, c_x, c_y)
c_y -= component.height + 10
@property
def position(self):
return _position
@position.setter
def position(self, value):
self._position = value
self._put_components()
class HorizontalLayout(Layout):
def add(self, component):
self.components.append(component)
self.width += component.width + 10
self.height = max(component.height, self.height)
self._put_components()
def _put_components(self):
c_x, c_y = self._position[0], self._position[-1]
for component in self.components:
self._set_component_position(component, c_x, c_y)
c_x += component.width + 10
@property
def position(self):
return _position
@position.setter
def position(self, value):
self._position = value
self._put_components()
class View(pyglet.event.EventDispatcher):
def __init__(self, controller):
super(View, self).__init__()
self.controller = controller
self.batch = pyglet.graphics.Batch()
self.buttons = []
def setup(self):
pass
def add_handlers(self):
self.setup()
self.controller.window.push_handlers(self)
def pop_handlers(self):
self.controller.window.set_mouse_cursor(None)
self.controller.window.pop_handlers()
def update(self, dt):
pass
def clear(self):
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
def on_mouse_press(self, x, y, button, modifiers):
self.dispatch_event('on_mouse_click', x, y, button, modifiers)
def on_mouse_motion(self, x, y, dx, dy):
cursor = None
for button in self.buttons:
if button.enabled:
if button.highlighted:
button.highlighted = False
button.draw()
if button.hit_test(x, y):
button.highlighted = True
button.draw()
cursor = self.controller.window.get_system_mouse_cursor(pyglet.window.Window.CURSOR_HAND)
self.controller.window.set_mouse_cursor(cursor)
def on_draw(self):
self.clear()
glColor3d(1, 1, 1)
self.controller.set_2d()
self.batch.draw()
View.register_event_type('on_mouse_click')
class MenuView(View):
def setup(self):
self.group = pyglet.graphics.OrderedGroup(3)
self.labels_group = pyglet.graphics.OrderedGroup(4)
self.layout = Layout(0, 0)
image = frame_image
self.frame_rect = Rectangle(0, 0, image.width, image.height)
self.background = G.texture_pack_list.selected_texture_pack.load_texture(['gui', 'background.png'])
self.background = self.background.get_texture()
self.background.height = 64
self.background.width = 64
self.frame = Rectangle(0, 0, image.width, image.height)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
def Button(self, x=0, y=0, width=400, height=40, image=button_image, image_highlighted=button_highlighted, caption="Unlabeled", batch=None, group=None, label_group=None, font_name='ChunkFive Roman', on_click=None, enabled=True):
button = Button(self, x=x, y=y, width=width, height=height, image=resize_button_image(image, 400, width), image_highlighted=resize_button_image(image_highlighted, 400, width), caption=caption, batch=(batch or self.batch), group=(group or self.group), label_group=(label_group or self.labels_group), font_name=font_name, enabled=enabled)
if on_click:
button.push_handlers(on_click=on_click)
return button
def ToggleButton(self, x=0, y=0, width=400, height=40, image=button_image, image_highlighted=button_highlighted, caption="Unlabeled", batch=None, group=None, label_group=None, font_name='ChunkFive Roman', on_click=None, on_toggle=None, enabled=True):
button = ToggleButton(self, x=x, y=y, width=width, height=height, image=resize_button_image(image, 400, width), image_highlighted=resize_button_image(image_highlighted, 400, width), caption=caption, batch=(batch or self.batch), group=(group or self.group), label_group=(label_group or self.labels_group), font_name=font_name, enabled=enabled)
if on_click:
button.push_handlers(on_click=on_click)
if on_toggle:
button.push_handlers(on_toggle=on_toggle)
return button
def Scrollbar(self, x=0, y=0, width=400, height=40, sb_width=40, sb_height=40, style=1, background_image=button_disabled, scrollbar_image=button_image, caption="Test", font_size=12, font_name=G.DEFAULT_FONT, batch=None, group=None, label_group=None, pos=0, on_pos_change=None):
sb = ScrollbarWidget(self.controller.window, x=x, y=y, width=width, height=height,
sb_width=sb_width, sb_height=sb_height,
style=style,
background_image=resize_button_image(background_image, 400, width),
scrollbar_image=resize_button_image(scrollbar_image, 400, sb_width),
caption=caption, font_size=font_size, font_name=font_name,
batch=(batch or self.batch), group=(group or self.group), label_group=(label_group or self.labels_group),
pos=pos, on_pos_change=on_pos_change)
return sb
def draw_background(self):
glBindTexture(self.background.target, self.background.id)
glEnable(self.background.target)
glColor4f(0.3, 0.3, 0.3, 1.0)
width = float(self.controller.window.get_size()[0])
height = float(self.controller.window.get_size()[1])
bg_width = self.background.width
bg_height = self.background.height
vert_list = [0.0, 0.0, 0.0, width, 0.0, 0.0, width, height, 0.0, 0.0, height, 0.0]
uv_list = [0.0, 0.0, width / bg_width, 0.0, width / bg_width, height / bg_height, 0.0, height / bg_height]
l = pyglet.graphics.vertex_list(4,
('v3f/static', vert_list),
('t2f/static', uv_list),
)
l.draw(GL_QUADS)
glDisable(self.background.target)
def on_draw(self):
self.clear()
glColor3d(1, 1, 1)
self.draw_background()
self.controller.set_2d()
self.batch.draw()
def on_resize(self, width, height):
self.frame.x, self.frame.y = (width - self.frame.width) / 2, (height - self.frame.height) / 2
self.layout.position = (width - self.layout.width) / 2, self.frame.y
class MainMenuView(MenuView):
def setup(self):
self.group = pyglet.graphics.OrderedGroup(3)
self.labels_group = pyglet.graphics.OrderedGroup(4)
image = frame_image
self.layout = VerticalLayout(0, 0)
# Custom background
self.background = None
self.frame_rect = Rectangle(0, 0, self.controller.window.get_size()[0], image.height)
self.frame = Rectangle(0, 0, self.controller.window.get_size()[0], image.height)
width, height = self.controller.window.width, self.controller.window.height
self.label = Label(G.APP_NAME, font_name='ChunkFive Roman', font_size=50, x=width/2, y=self.frame.y + self.frame.height,
anchor_x='center', anchor_y='top', color=(255, 255, 255, 255), batch=self.batch,
group=self.labels_group)
self.label.height = self.label.content_height
self.layout.add(self.label)
button = self.Button(caption=G._("Singleplayer"),on_click=self.controller.start_singleplayer_game)
self.layout.add(button)
self.buttons.append(button)
button = self.Button(caption=G._("Multiplayer"),on_click=self.controller.multiplayer)
self.layout.add(button)
self.buttons.append(button)
button = self.Button(caption=G._("Options..."),on_click=self.controller.game_options)
self.layout.add(button)
self.buttons.append(button)
button = self.Button(caption=G._("Exit game"),on_click=self.controller.exit_game)
self.layout.add(button)
self.buttons.append(button)
# Splash text
self.splash_text = 'Hello!'
now = datetime.datetime.now()
if now.month == 1 and now.day == 1:
self.splash_text = 'Happy new year!'
self.splash_text_label = Label(self.splash_text, font_name='Arial', font_size=30, x=self.label.x, y=self.label.y,
anchor_x='center', anchor_y='top', color=(255, 255, 0, 255),
group=self.labels_group)
self.on_resize(width, height)
# Panorama
self.panorama = [G.texture_pack_list.selected_texture_pack.load_texture(['title', 'bg', 'panorama' + str(x) + '.png']) for x in range(6)]
self.panorama_timer = 0
pyglet.clock.schedule_interval(self.update_panorama_timer, .05)
self.blur_texture = pyglet.image.Texture.create(256, 256)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
def update_panorama_timer(self, dt):
self.panorama_timer += 1
def draw_panorama(self):
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluPerspective(120.0, 1.0, 0.05, 10.0)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
glColor4f(1.0, 1.0, 1.0, 1.0)
glRotatef(180.0, 1.0, 0.0, 0.0)
glEnable(GL_BLEND)
glDisable(GL_ALPHA_TEST)
glDisable(GL_CULL_FACE)
glDepthMask(False)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glPushMatrix()
glRotatef(sin(float(self.panorama_timer) / 400.0) * 25.0 + 20.0, 1.0, 0.0, 0.0)
glRotatef(-float(self.panorama_timer) * 0.1, 0.0, -1.0, 0.0)
# 6 faces
for i in range(6):
glPushMatrix()
if i == 1:
glRotatef(90.0, 0.0, 1.0, 0.0)
elif i == 2:
glRotatef(180.0, 0.0, 1.0, 0.0)
elif i == 3:
glRotatef(-90.0, 0.0, 1.0, 0.0)
elif i == 4:
glRotatef(90.0, 1.0, 0.0, 0.0)
elif i == 5:
glRotatef(-90.0, 1.0, 0.0, 0.0)
glBindTexture(self.panorama[i].texture.target, self.panorama[i].texture.id)
glEnable(self.panorama[i].texture.target)
vert_list = [-1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 1.0]
uv_list = [0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]
l = pyglet.graphics.vertex_list(4,
('v3f/static', vert_list),
('t2f/static', uv_list),
)
l.draw(GL_QUADS)
glDisable(self.panorama[i].texture.target)
glPopMatrix()
glPopMatrix()
glColorMask(True, True, True, False)
glColorMask(True, True, True, True)
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
glDepthMask(True)
glEnable(GL_CULL_FACE)
glEnable(GL_ALPHA_TEST)
glEnable(GL_DEPTH_TEST)
def render_to_texture(self):
glViewport(0, 0, 256, 256)
self.draw_panorama()
glBindTexture(GL_TEXTURE_2D, self.blur_texture.id)
glCopyTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, 0, 0, 256, 256, 0)
glClearColor(0.0, 0.0, 0.5, 0.5)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glViewport(0, 0, self.controller.window.get_size()[0], self.controller.window.get_size()[1])
def draw_blur(self, times=5):
alpha = 0.5
glDisable(GL_TEXTURE_GEN_S)
glDisable(GL_TEXTURE_GEN_T)
glEnable(GL_TEXTURE_2D)
glDisable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE)
glBindTexture(GL_TEXTURE_2D, self.blur_texture.id)
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
glOrtho(0, self.controller.window.get_size()[0] , self.controller.window.get_size()[1] , 0, -1, 1 )
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
alphainc = alpha / float(times)
spost = 0
width = self.controller.window.get_size()[0]
height = self.controller.window.get_size()[1]
glBegin(GL_QUADS)
for _ in range(times):
glColor4f(1.0, 1.0, 1.0, alpha)
glTexCoord2f(0, 1)
glVertex2f(0, 0)
glTexCoord2f(0, 0)
glVertex2f(0, height)
glTexCoord2f(1, 0)
glVertex2f(width, height)
glTexCoord2f(1, 1)
glVertex2f(width, 0)
alpha = alpha - alphainc
if alpha < 0:
alpha = 0
glEnd()
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
glEnable(GL_DEPTH_TEST)
glDisable(GL_TEXTURE_2D)
glDisable(GL_BLEND)
glBindTexture(GL_TEXTURE_2D, 0)
def draw_splash_text(self):
glPushMatrix()
glTranslatef(float(self.controller.window.get_size()[0] / 2 - self.label.content_width / 2), -float(self.controller.window.get_size()[1] / 3), 0.0)
glRotatef(20.0, 0.0, 0.0, 1.0)
self.splash_text_label.draw()
glPopMatrix()
def on_resize(self, width, height):
MenuView.on_resize(self, width, height)
self.label.y = self.frame.y + self.frame.height - 15
self.label.x = width / 2
self.splash_text_label.x = self.label.x
self.splash_text_label.y = self.label.y
def on_draw(self):
self.clear()
glColor3d(1, 1, 1)
#self.render_to_texture()
self.draw_panorama()
#self.draw_blur()
self.controller.set_2d()
self.batch.draw()
self.draw_splash_text()
class OptionsView(MenuView):
def setup(self):
MenuView.setup(self)
width, height = self.controller.window.width, self.controller.window.height
self.layout = VerticalLayout(0, 0)
textures_enabled = len(G.texture_pack_list.available_texture_packs) > 1
self.text_input = TextWidget(self.controller.window, G.USERNAME, 0, 0, width=160, height=20, font_name='Arial', batch=self.batch)
self.controller.window.push_handlers(self.text_input)
self.text_input.focus()
self.text_input.caret.mark = len(self.text_input.document.text) # Don't select the whole text
def text_input_callback(symbol, modifier):
G.USERNAME = self.text_input.text
self.text_input.push_handlers(key_released=text_input_callback)
hl = HorizontalLayout(0, 0)
sb = self.Scrollbar(x=0, y=0, width=300, height=40, sb_width=20, sb_height=40, caption="Music")
hl.add(sb)
def change_sound_volume(pos):
print G.EFFECT_VOLUME
G.EFFECT_VOLUME = float(float(pos) / 100)
sb = self.Scrollbar(x=0, y=0, width=300, height=40, sb_width=20, sb_height=40, caption="Sound", pos=int(G.EFFECT_VOLUME * 100), on_pos_change=change_sound_volume)
hl.add(sb)
self.layout.add(hl)
hl = HorizontalLayout(0, 0)
button = self.Button(width=300, caption=G._("Controls..."), on_click=self.controller.controls)
hl.add(button)
self.buttons.append(button)
button = self.Button(width=300, caption=G._("Textures"), on_click=self.controller.textures, enabled=textures_enabled)
hl.add(button)
self.buttons.append(button)
self.layout.add(hl)
button = self.Button(width=610, caption=G._("Done"), on_click=self.controller.main_menu)
self.layout.add(button)
self.buttons.append(button)
self.label = Label('Options', font_name='ChunkFive Roman', font_size=25, x=width/2, y=self.frame.y + self.frame.height,
anchor_x='center', anchor_y='top', color=(255, 255, 255, 255), batch=self.batch,
group=self.labels_group)
self.on_resize(width, height)
def on_resize(self, width, height):
MenuView.on_resize(self, width, height)
self.text_input.resize(x=self.frame.x + (self.frame.width - self.text_input.width) / 2 + 5, y=self.frame.y + (self.frame.height) / 2 + 75, width=150)
class ControlsView(MenuView):
def setup(self):
MenuView.setup(self)
width, height = self.controller.window.width, self.controller.window.height
self.layout = VerticalLayout(0, 0)
self.key_buttons = []
for identifier in ('move_backward', 'move_forward', 'move_left', 'move_right'):
button = self.ToggleButton(width=200, caption=pyglet.window.key.symbol_string(getattr(G, identifier.upper() + '_KEY')))
button.id = identifier
self.buttons.append(button)
self.key_buttons.append(button)
self.button_return = self.Button(caption=G._("Done"),on_click=self.controller.game_options)
self.buttons.append(self.button_return)
self.on_resize(width, height)
def on_resize(self, width, height):
self.background.scale = 1.0
self.background.scale = max(float(width) / self.background.width, float(height) / self.background.height)
self.background.x, self.background.y = 0, 0
self.frame.x, self.frame.y = (width - self.frame.width) / 2, (height - self.frame.height) / 2
default_button_x = button_x = self.frame.x + 30
button_y = self.frame.y + (self.frame.height) / 2 + 10
i = 0
for button in self.key_buttons:
button.position = button_x, button_y
if i%2 == 0:
button_x += button.width + 20
else:
button_x = default_button_x
button_y -= button.height + 20
i += 1
button_x = self.frame.x + (self.frame.width - self.button_return.width) / 2
self.button_return.position = button_x, button_y
def on_key_press(self, symbol, modifiers):
active_button = None
for button in self.buttons:
if isinstance(button, ToggleButton) and button.toggled:
active_button = button
break
if not active_button:
return
active_button.caption = pyglet.window.key.symbol_string(symbol)
active_button.toggled = False
G.config.set("Controls", active_button.id, pyglet.window.key.symbol_string(symbol))
G.save_config()
class TexturesView(MenuView):
def setup(self):
MenuView.setup(self)
width, height = self.controller.window.width, self.controller.window.height
self.layout = VerticalLayout(0, 0)
self.texture_buttons = []
self.current_toggled = None
texture_packs = G.texture_pack_list.available_texture_packs
for texture_pack in texture_packs:
button = self.ToggleButton(caption=texture_pack.texture_pack_file_name,on_toggle=self.on_button_toggle)
button.id = texture_pack.texture_pack_file_name
button.toggled = G.texture_pack_list.selected_texture_pack == texture_pack
if button.toggled:
self.current_toggled = button
self.buttons.append(button)
self.layout.add(button)
self.texture_buttons.append(button)
self.button_return = self.Button(caption="Done",on_click=self.controller.game_options)
self.buttons.append(self.button_return)
self.layout.add(self.button_return)
self.on_resize(width, height)
def on_button_toggle(self):
for button in self.texture_buttons:
if button != self.current_toggled and button.toggled:
self.current_toggled.toggled = False
self.current_toggled = button
G.config.set("Graphics", "texture_pack", button.id)
G.TEXTURE_PACK = button.id
for block in G.BLOCKS_DIR.values():
block.update_texture() #Reload textures
G.save_config()
def on_resize(self, width, height):
MenuView.on_resize(self, width, height)
self.background.scale = 1.0
self.background.scale = max(float(width) / self.background.width, float(height) / self.background.height)
self.background.x, self.background.y = 0, 0
self.frame.x, self.frame.y = (width - self.frame.width) / 2, (height - self.frame.height) / 2
class MultiplayerView(MenuView):
def setup(self):
MenuView.setup(self)
width, height = self.controller.window.width, self.controller.window.height
self.layout = VerticalLayout(0, 0)
self.text_input = TextWidget(self.controller.window, G.IP_ADDRESS, 0, 0, width=160, height=20, font_name='Arial', batch=self.batch)
self.controller.window.push_handlers(self.text_input)
self.text_input.focus()
def text_input_callback(symbol, modifier):
G.IP_ADDRESS = self.text_input.text
self.text_input.push_handlers(key_released=text_input_callback)
button = self.Button(caption=G._("Connect to server"), on_click=self.controller.start_multiplayer_game)
self.layout.add(button)
self.buttons.append(button)
button= self.Button(caption=G._("Launch server"), on_click=self.launch_server)
self.layout.add(button)
self.buttons.append(button)
button= self.Button(caption=G._("Done"), on_click=self.controller.main_menu)
self.layout.add(button)
self.buttons.append(button)
self.label = Label('Play Multiplayer', font_name='ChunkFive Roman', font_size=25, x=width/2, y=self.frame.y + self.frame.height,
anchor_x='center', anchor_y='top', color=(255, 255, 255, 255), batch=self.batch,
group=self.labels_group)
self.on_resize(width, height)
def launch_server(self):
if os.name == 'nt':
subprocess.Popen([sys.executable, "server.py"], creationflags=subprocess.CREATE_NEW_CONSOLE)
else:
subprocess.Popen([sys.executable, "server.py"])
localip = [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][0]
self.text_input.text = localip
G.IP_ADDRESS = localip
def on_resize(self, width, height):
MenuView.on_resize(self, width, height)
self.text_input.resize(x=self.frame.x + (self.frame.width - self.text_input.width) / 2 + 5, y=self.frame.y + (self.frame.height) / 2 + 75, width=150)
| mit | 5,070,663,446,137,972,000 | 37.507862 | 350 | 0.613695 | false |
danieljabailey/inkscape_experiments | share/extensions/fractalize.py | 1 | 3762 | #!/usr/bin/env python
'''
Copyright (C) 2005 Carsten Goetze [email protected]
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
import random, math, inkex, simplepath
def calculateSubdivision(x1,y1,x2,y2,smoothness):
""" Calculate the vector from (x1,y1) to (x2,y2) """
x3 = x2 - x1
y3 = y2 - y1
""" Calculate the point half-way between the two points """
hx = x1 + x3/2
hy = y1 + y3/2
""" Calculate normalized vector perpendicular to the vector (x3,y3) """
length = math.sqrt(x3*x3 + y3*y3)
if length != 0:
nx = -y3/length
ny = x3/length
else:
nx = 1
ny = 0
""" Scale perpendicular vector by random factor """
r = random.uniform(-length/(1+smoothness),length/(1+smoothness))
nx = nx * r
ny = ny * r
""" add scaled perpendicular vector to the half-way point to get the final
displaced subdivision point """
x = hx + nx
y = hy + ny
return [x, y]
class PathFractalize(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option("-s", "--subdivs",
action="store", type="int",
dest="subdivs", default="6",
help="Number of subdivisons")
self.OptionParser.add_option("-f", "--smooth",
action="store", type="float",
dest="smooth", default="4.0",
help="Smoothness of the subdivision")
def effect(self):
for id, node in self.selected.iteritems():
if node.tag == inkex.addNS('path','svg'):
d = node.get('d')
p = simplepath.parsePath(d)
a = []
first = 1
for cmd,params in p:
if cmd != 'Z':
if first == 1:
x1 = params[-2]
y1 = params[-1]
a.append(['M',params[-2:]])
first = 2
else :
x2 = params[-2]
y2 = params[-1]
self.fractalize(a,x1,y1,x2,y2,self.options.subdivs,self.options.smooth)
x1 = x2
y1 = y2
a.append(['L',params[-2:]])
node.set('d', simplepath.formatPath(a))
def fractalize(self,a,x1,y1,x2,y2,s,f):
subdivPoint = calculateSubdivision(x1,y1,x2,y2,f)
if s > 0 :
""" recursively subdivide the segment left of the subdivision point """
self.fractalize(a,x1,y1,subdivPoint[-2],subdivPoint[-1],s-1,f)
a.append(['L',subdivPoint])
""" recursively subdivide the segment right of the subdivision point """
self.fractalize(a,subdivPoint[-2],subdivPoint[-1],x2,y2,s-1,f)
if __name__ == '__main__':
e = PathFractalize()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
| gpl-2.0 | 2,135,665,781,079,177,200 | 37.783505 | 99 | 0.542265 | false |
Octonius/boxv2 | boxv2/session.py | 1 | 18801 | from .request import BoxRestRequest
from .upload import MultipartUploadWrapper
from .exceptions import BoxError, BoxHttpResponseError
class BoxSession(object):
"""Manage files and folder from Box.
When you instanciate this class you have to provide at least the Refresh Token (found with :class:`BoxAuthenticateFlow`). If the Access Token is not provided a request will be made to Box to get a new one (and a new Refresh Token will be generated).
The Access Token expires every hour. When you use this class with an Access Token expired, a new one will be requested automatically.
Use the "tokens_changed" callback to backup the Access Token and the Refresh Token each time they change. If you do not backup them, you will have to follow the authenticate flow again (with :class:`BoxAuthenticateFlow`).
Usage:
>>> def tokens_changed(refresh_token, access_token):
... save_to_file(refresh_token, access_token)
...
>>> box = BoxSession('my_id', 'my_secret', refresh_token, access_token, tokens_changed)
>>> print box.get_folder_info(0)
"""
def __init__(self, client_id, client_secret,
last_refresh_token,
last_access_token=None,
tokens_changed=None,
timeout=None):
"""Constructor
Args:
client_id (str): Client ID provided by Box.
client_secret (str): Client Secret provided by Box.
last_refresh_token (str): Refresh Token found with the class :class:`BoxAuthenticateFlow`.
last_access_token (str): Access Token found with the class :class:`BoxAuthenticateFlow`. If None, a new Access Token will be requested to Box.
tokens_changed (func): Function called each time the Refresh Token and the Access Token is refreshed (because of expiration). Use this to backup your Refresh Token and the Access Token in order to reuse this class without using :class:`BoxAuthenticateFlow` class for getting tokens.
timeout (float): Stop waiting for a response after a given number of seconds with the timeout parameter. If None, waiting forever. http://www.python-requests.org/en/latest/user/quickstart/#timeouts
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
self.box_request = BoxRestRequest(client_id, client_secret, timeout)
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = last_refresh_token
self.access_token = last_access_token
self.box_request.access_token = last_access_token
self.tokens_changed = tokens_changed
if self.access_token == None:
self.__refresh_access_token()
def __check_response(self, response, stream=False):
if stream:
log_debug('Response from box.com: %s. {Streamed content}' % (response,))
else:
log_debug('Response from box.com: %s. %s' %(response, response.text))
try:
if stream:
att = response
elif response.text is not None and len(response.text) > 0:
att = response.json()
else:
att = {}
except Exception, ex:
raise BoxHttpResponseError(ex)
if response.status_code >= 400:
raise BoxError(response.status_code, att)
else:
return att
def __refresh_access_token(self):
log_debug('Access token expired, refreshing it from refresh token')
resp = self.box_request.refresh_access_token(self.refresh_token)
self.__log_debug_request(resp)
att = self.__check_response(resp)
self.access_token = att['access_token']
self.refresh_token = att['refresh_token']
self.box_request.access_token = self.access_token
if self.tokens_changed:
self.tokens_changed(self.refresh_token, self.access_token)
def __request(self, method, command, data=None,
querystring=None, files=None, headers=None,
stream=None,
json_data=True,
raise_if_token_expired=False):
resp = self.box_request.request(method, command,
data, querystring,
files, headers, stream, json_data)
self.__log_debug_request(resp)
try:
att = self.__check_response(resp, stream)
except BoxError, ex:
if ex.status != 401:
raise
self.__refresh_access_token()
if raise_if_token_expired:
raise
resp = self.box_request.request(method, command,
data, querystring,
files, headers, stream, json_data)
self.__log_debug_request(resp)
att = self.__check_response(resp, stream)
return att
def __log_debug_request(self, resp):
if hasattr(resp.request, 'data'):
data_req = resp.request.data
else:
data_req = ''
log_debug('Request made to box.com: %s %s\nHEADERS:\n%s\nDATA:\n%s\nBODY:\n%s' %
(resp.request.method,
resp.request.url,
resp.request.headers,
data_req,
resp.request.body))
def find_id_in_folder(self, name, parent_folder_id=0):
"""Find a folder or a file ID from its name, inside a given folder.
Args:
name (str): Name of the folder or the file to find.
parent_folder_id (int): ID of the folder where to search.
Returns:
int. ID of the file or folder found. None if not found.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
if name is None or len(name) == 0:
return parent_folder_id
offset = 0
resp = self.get_folder_items(parent_folder_id,
limit=1000, offset=offset,
fields_list=['name'])
total = int(resp['total_count'])
while offset < total:
found = self.__find_name(resp, name)
if found is not None:
return found
offset += int(len(resp['entries']))
resp = self.get_folder_items(parent_folder_id,
limit=1000, offset=offset,
fields_list=['name'])
return None
def __find_name(self, response, name_to_find):
for entry in response['entries']:
if entry['name'] == name_to_find:
return int(entry['id'])
return None
def get_folder_info(self, folder_id):
"""Get info on a folder
Args:
folder_id (int): ID of the folder.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
return self.__request("GET", "folders/%s" % (folder_id, ))
def create_folder(self, name, parent_folder_id=0):
"""Create a folder
If the folder exists, a BoxError will be raised.
Args:
folder_id (int): Name of the folder.
parent_folder_id (int): ID of the folder where to create the new one.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
return self.__request("POST", "folders",
data={ "name": name,
"parent": {"id": unicode(parent_folder_id)} })
def delete_folder(self, folder_id, recursive=True):
"""Delete an existing folder
Args:
folder_id (int): ID of the folder to delete.
recursive (bool): Delete all subfolder if True.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
return self.__request("DELETE", "folders/%s" % (folder_id, ),
querystring={'recursive': unicode(recursive).lower()})
def get_folder_items(self, folder_id,
limit=100, offset=0, fields_list=None):
"""Get files and folders inside a given folder
Args:
folder_id (int): Where to get files and folders info.
limit (int): The number of items to return.
offset (int): The item at which to begin the response.
fields_list (list): List of attributes to get. All attributes if None.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
qs = { "limit": limit,
"offset": offset }
if fields_list:
qs['fields'] = ','.join(fields_list)
return self.__request("GET", "folders/%s/items" % (folder_id, ),
querystring=qs)
def upload_file(self, name, folder_id, file_path):
"""Upload a file into a folder.
Use function for small file otherwise there is the chunk_upload_file() function
Args::
name (str): Name of the file on your Box storage.
folder_id (int): ID of the folder where to upload the file.
file_path (str): Local path of the file to upload.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
try:
return self.__do_upload_file(name, folder_id, file_path)
except BoxError, ex:
if ex.status != 401:
raise
#tokens had been refreshed, so we start again the upload
return self.__do_upload_file(name, folder_id, file_path)
def __do_upload_file(self, name, folder_id, file_path):
file_obj = open(file_path, 'rb')
try:
return self.__request("POST", "files/content",
files = {'filename': (name, file_obj)},
data = {'parent_id': unicode(folder_id)},
json_data = False,
raise_if_token_expired=True)
finally:
file_obj.close()
def chunk_upload_file(self, name, folder_id, file_path,
progress_callback=None,
chunk_size=1024*1024*1):
"""Upload a file chunk by chunk.
The whole file is never loaded in memory.
Use this function for big file.
The callback(transferred, total) to let you know the upload progress.
Upload can be cancelled if the callback raise an Exception.
>>> def progress_callback(transferred, total):
... print 'Uploaded %i bytes of %i' % (transferred, total, )
... if user_request_cancel:
... raise MyCustomCancelException()
Args:
name (str): Name of the file on your Box storage.
folder_id (int): ID of the folder where to upload the file.
file_path (str): Local path of the file to upload.
progress_callback (func): Function called each time a chunk is uploaded.
chunk_size (int): Size of chunks.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
try:
return self.__do_chunk_upload_file(name, folder_id, file_path,
progress_callback,
chunk_size)
except BoxError, ex:
if ex.status != 401:
raise
#tokens had been refreshed, so we start again the upload
return self.__do_chunk_upload_file(name, folder_id, file_path,
progress_callback,
chunk_size)
def __do_chunk_upload_file(self, name, folder_id, file_path,
progress_callback,
chunk_size):
file_obj = open(file_path, 'rb')
try:
muw = MultipartUploadWrapper({'parent_id': unicode(folder_id),
'filename': (name, file_obj)},
progress_callback=progress_callback,
chunk_size=chunk_size)
return self.__request("POST", "files/content",
data = muw,
headers = muw.content_type_header,
json_data = False,
raise_if_token_expired=True)
finally:
file_obj.close()
def get_file_info(self, file_id):
"""Get info on a file
Args:
file_id (int): ID of the folder.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
return self.__request("GET", "files/%s" % (file_id, ))
def download_file(self, file_id, dest_file_path,
progress_callback=None,
chunk_size=1024*1024*1):
"""Download a file.
The whole file is never loaded in memory.
The callback(transferred, total) to let you know the download progress.
Download can be cancelled if the callback raise an Exception.
>>> def progress_callback(transferred, total):
... print 'Downloaded %i bytes of %i' % (transferred, total, )
... if user_request_cancel:
... raise MyCustomCancelException()
Args:
file_id (int): ID of the file to download.
dest_file_path (str): Local path where to store the downloaded filed.
progress_callback (func): Function called each time a chunk is downloaded.
chunk_size (int): Size of chunks.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
with open(dest_file_path, 'wb') as fp:
req = self.__request("GET", "files/%s/content" % (file_id, ),
stream=True,
json_data=False)
total = -1
if hasattr(req, 'headers'):
lower_headers = {k.lower():v for k,v in req.headers.items()}
if 'content-length' in lower_headers:
total = lower_headers['content-length']
transferred = 0
for chunk in req.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
if progress_callback:
progress_callback(transferred, total)
fp.write(chunk)
fp.flush()
transferred += len(chunk)
if progress_callback:
progress_callback(transferred, total)
def delete_file(self, file_id):
"""Delete an existing file
Args:
file_id (int): ID of the file to delete.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
return self.__request("DELETE", "files/%s" % (file_id, ))
def search(self, **kwargs):
"""Searches for files/folders
Args:
kwargs (dict): A dictionary containing necessary parameters (check
https://developers.box.com/docs/#search
for list of parameters)
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
query_string = {}
for key, value in kwargs.iteritems():
query_string[key] = value
return self.__request("GET","search",querystring=query_string)
def get_user_info(self):
"""Gets the user's information
Args:
Returns:
dict. Response from box
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
return self.__request("GET","users/me")
show_debug_messages = False
def log_debug(message):
if show_debug_messages == False:
return
print '------------------------'
print message
| mit | -432,496,499,673,475,100 | 35.225434 | 294 | 0.55013 | false |
phenoxim/nova | nova/tests/unit/api/openstack/compute/test_agents.py | 1 | 19278 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob.exc
from nova.api.openstack.compute import agents as agents_v21
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
fake_agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'http://example.com/path/to/resource1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'http://example.com/path/to/resource2',
'md5hash': 'add6bb58e139be103324d04d82d8f547',
'id': 3},
{'hypervisor': 'xen', 'os': 'win',
'architecture': 'power',
'version': '7.0',
'url': 'http://example.com/path/to/resource3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'id': 4},
]
def fake_agent_build_get_all(context, hypervisor):
agent_build_all = []
for agent in fake_agents_list:
if hypervisor and hypervisor != agent['hypervisor']:
continue
agent_build_ref = models.AgentBuild()
agent_build_ref.update(agent)
agent_build_all.append(agent_build_ref)
return agent_build_all
def fake_agent_build_update(context, agent_build_id, values):
pass
def fake_agent_build_destroy(context, agent_update_id):
pass
def fake_agent_build_create(context, values):
values['id'] = 1
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
return agent_build_ref
class AgentsTestV21(test.NoDBTestCase):
controller = agents_v21.AgentController()
validation_error = exception.ValidationError
def setUp(self):
super(AgentsTestV21, self).setUp()
self.stub_out("nova.db.agent_build_get_all", fake_agent_build_get_all)
self.stub_out("nova.db.agent_build_update", fake_agent_build_update)
self.stub_out("nova.db.agent_build_destroy", fake_agent_build_destroy)
self.stub_out("nova.db.agent_build_create", fake_agent_build_create)
self.req = self._get_http_request()
def _get_http_request(self):
return fakes.HTTPRequest.blank('')
def test_agents_create(self):
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
response = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1}}
res_dict = self.controller.create(self.req, body=body)
self.assertEqual(res_dict, response)
def _test_agents_create_key_error(self, key):
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
body['agent'].pop(key)
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_agents_create_without_hypervisor(self):
self._test_agents_create_key_error('hypervisor')
def test_agents_create_without_os(self):
self._test_agents_create_key_error('os')
def test_agents_create_without_architecture(self):
self._test_agents_create_key_error('architecture')
def test_agents_create_without_version(self):
self._test_agents_create_key_error('version')
def test_agents_create_without_url(self):
self._test_agents_create_key_error('url')
def test_agents_create_without_md5hash(self):
self._test_agents_create_key_error('md5hash')
def test_agents_create_with_wrong_type(self):
body = {'agent': None}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_agents_create_with_empty_type(self):
body = {}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_agents_create_with_existed_agent(self):
def fake_agent_build_create_with_exited_agent(context, values):
raise exception.AgentBuildExists(**values)
self.stub_out('nova.db.agent_build_create',
fake_agent_build_create_with_exited_agent)
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
self.req, body=body)
def _test_agents_create_with_invalid_length(self, key):
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
body['agent'][key] = 'x' * 256
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_agents_create_with_invalid_length_hypervisor(self):
self._test_agents_create_with_invalid_length('hypervisor')
def test_agents_create_with_invalid_length_os(self):
self._test_agents_create_with_invalid_length('os')
def test_agents_create_with_invalid_length_architecture(self):
self._test_agents_create_with_invalid_length('architecture')
def test_agents_create_with_invalid_length_version(self):
self._test_agents_create_with_invalid_length('version')
def test_agents_create_with_invalid_length_url(self):
self._test_agents_create_with_invalid_length('url')
def test_agents_create_with_invalid_length_md5hash(self):
self._test_agents_create_with_invalid_length('md5hash')
def test_agents_delete(self):
self.controller.delete(self.req, 1)
def test_agents_delete_with_id_not_found(self):
with mock.patch.object(db, 'agent_build_destroy',
side_effect=exception.AgentBuildNotFound(id=1)):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, self.req, 1)
def test_agents_delete_string_id(self):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete, self.req, 'string_id')
def _test_agents_list(self, query_string=None):
req = fakes.HTTPRequest.blank('', use_admin_context=True,
query_string=query_string)
res_dict = self.controller.index(req)
agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'http://example.com/path/to/resource1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'http://example.com/path/to/resource2',
'md5hash': 'add6bb58e139be103324d04d82d8f547',
'agent_id': 3},
{'hypervisor': 'xen', 'os': 'win',
'architecture': 'power',
'version': '7.0',
'url': 'http://example.com/path/to/resource3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'agent_id': 4},
]
self.assertEqual(res_dict, {'agents': agents_list})
def test_agents_list(self):
self._test_agents_list()
def test_agents_list_with_hypervisor(self):
req = fakes.HTTPRequest.blank('', use_admin_context=True,
query_string='hypervisor=kvm')
res_dict = self.controller.index(req)
response = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'http://example.com/path/to/resource1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
]
self.assertEqual(res_dict, {'agents': response})
def test_agents_list_with_multi_hypervisor_filter(self):
query_string = 'hypervisor=xen&hypervisor=kvm'
req = fakes.HTTPRequest.blank('', use_admin_context=True,
query_string=query_string)
res_dict = self.controller.index(req)
response = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'http://example.com/path/to/resource1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
]
self.assertEqual(res_dict, {'agents': response})
def test_agents_list_query_allow_negative_int_as_string(self):
req = fakes.HTTPRequest.blank('', use_admin_context=True,
query_string='hypervisor=-1')
res_dict = self.controller.index(req)
self.assertEqual(res_dict, {'agents': []})
def test_agents_list_query_allow_int_as_string(self):
req = fakes.HTTPRequest.blank('', use_admin_context=True,
query_string='hypervisor=1')
res_dict = self.controller.index(req)
self.assertEqual(res_dict, {'agents': []})
def test_agents_list_with_unknown_filter(self):
query_string = 'unknown_filter=abc'
self._test_agents_list(query_string=query_string)
def test_agents_list_with_hypervisor_and_additional_filter(self):
req = fakes.HTTPRequest.blank(
'', use_admin_context=True,
query_string='hypervisor=kvm&additional_filter=abc')
res_dict = self.controller.index(req)
response = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'http://example.com/path/to/resource1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
]
self.assertEqual(res_dict, {'agents': response})
def test_agents_update(self):
body = {'para': {'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
response = {'agent': {'agent_id': 1,
'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
res_dict = self.controller.update(self.req, 1, body=body)
self.assertEqual(res_dict, response)
def _test_agents_update_key_error(self, key):
body = {'para': {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
body['para'].pop(key)
self.assertRaises(self.validation_error,
self.controller.update, self.req, 1, body=body)
def test_agents_update_without_version(self):
self._test_agents_update_key_error('version')
def test_agents_update_without_url(self):
self._test_agents_update_key_error('url')
def test_agents_update_without_md5hash(self):
self._test_agents_update_key_error('md5hash')
def test_agents_update_with_wrong_type(self):
body = {'agent': None}
self.assertRaises(self.validation_error,
self.controller.update, self.req, 1, body=body)
def test_agents_update_with_empty(self):
body = {}
self.assertRaises(self.validation_error,
self.controller.update, self.req, 1, body=body)
def test_agents_update_value_error(self):
body = {'para': {'version': '7.0',
'url': 1111,
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(self.validation_error,
self.controller.update, self.req, 1, body=body)
def test_agents_update_with_string_id(self):
body = {'para': {'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, self.req,
'string_id', body=body)
def _test_agents_update_with_invalid_length(self, key):
body = {'para': {'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
body['para'][key] = 'x' * 256
self.assertRaises(self.validation_error,
self.controller.update, self.req, 1, body=body)
def test_agents_update_with_invalid_length_version(self):
self._test_agents_update_with_invalid_length('version')
def test_agents_update_with_invalid_length_url(self):
self._test_agents_update_with_invalid_length('url')
def test_agents_update_with_invalid_length_md5hash(self):
self._test_agents_update_with_invalid_length('md5hash')
def test_agents_update_with_id_not_found(self):
with mock.patch.object(db, 'agent_build_update',
side_effect=exception.AgentBuildNotFound(id=1)):
body = {'para': {'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, self.req, 1, body=body)
class AgentsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(AgentsPolicyEnforcementV21, self).setUp()
self.controller = agents_v21.AgentController()
self.req = fakes.HTTPRequest.blank('')
def test_create_policy_failed(self):
rule_name = "os_compute_api:os-agents"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create, self.req,
body={'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
rule_name = "os_compute_api:os-agents"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_policy_failed(self):
rule_name = "os_compute_api:os-agents"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_update_policy_failed(self):
rule_name = "os_compute_api:os-agents"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.update, self.req, fakes.FAKE_UUID,
body={'para': {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
| apache-2.0 | -3,498,550,663,633,195,500 | 41.84 | 78 | 0.549746 | false |
GrognardsFromHell/TemplePlus | tpdatasrc/co8infra/scr/Spell307 - Meteor Swarm.py | 1 | 3572 | from toee import *
from utilities import *
def OnBeginSpellCast( spell ):
print "Meteor Swarm OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-evocation-conjure", spell.caster )
def OnSpellEffect( spell ):
print "Meteor Swarm OnSpellEffect"
def OnBeginProjectile( spell, projectile, index_of_target ):
print "Meteor Swarm OnBeginProjectile"
projectiles = 4
if index_of_target < projectiles:
projectile.obj_set_int( obj_f_projectile_part_sys_id, game.particles( 'sp-Spheres of Fire-proj', projectile ) )
def OnEndProjectile( spell, projectile, index_of_target ):
print "Meteor Swarm OnEndProjectile"
dam = dice_new( '2d6' )
dam2 = dice_new( '6d6' )
projectiles = 4
if index_of_target < projectiles:
spell.duration = 0
game.particles_end( projectile.obj_get_int( obj_f_projectile_part_sys_id ) )
target_item = spell.target_list[ index_of_target ]
return_val = spell.caster.perform_touch_attack( target_item.obj )
xx,yy = location_to_axis(target_item.obj.location)
if target_item.obj.map == 5067 and ( xx >= 521 and xx <= 555 ) and ( yy >= 560 and yy <= 610):
target_item.obj.float_mesfile_line( 'mes\\skill_ui.mes', 2000 , 1)
game.particles( 'swirled gas', target_item.obj )
game.sound(7581,1)
game.sound(7581,1)
else:
if (return_val & D20CAF_HIT):
# hit target
if index_of_target > 0:
return_val |= D20CAF_NO_PRECISION_DAMAGE
game.particles( 'sp-Spheres of Fire-hit', target_item.obj )
target_item.obj.spell_damage_weaponlike( spell.caster, D20DT_BLUDGEONING, dam, D20DAP_UNSPECIFIED, 100, D20A_CAST_SPELL, spell.id, return_val, index_of_target )
target_item.obj.spell_damage_weaponlike( spell.caster, D20DT_FIRE, dam2, D20DAP_UNSPECIFIED, 100, D20A_CAST_SPELL, spell.id, return_val, index_of_target )
else:
# miss target
target_item.obj.float_mesfile_line( 'mes\\spell.mes', 30007 )
game.particles( 'Fizzle', target_item.obj )
if target_item.obj.reflex_save_and_damage( spell.caster, spell.dc, D20_Save_Reduction_Half, D20STD_F_NONE, dam2, D20DT_FIRE, D20DAP_UNSPECIFIED, D20A_CAST_SPELL, spell.id ):
# saving throw successful
target_item.obj.float_mesfile_line( 'mes\\spell.mes', 30001 )
else:
# saving throw unsuccessful
target_item.obj.float_mesfile_line( 'mes\\spell.mes', 30002 )
game.particles( 'sp-Fireball-Hit', target_item.obj )
for critter in game.obj_list_cone( target_item.obj, OLC_CRITTERS, 40, -180, 360 ):
if (critter != target_item.obj) and (critter.d20_query(Q_Dead) == 0):
xx,yy = location_to_axis(critter.location)
if critter.map == 5067 and ( xx >= 521 and xx <= 555 ) and ( yy >= 560 and yy <= 610):
critter.float_mesfile_line( 'mes\\skill_ui.mes', 2000 , 1)
game.particles( 'swirled gas', critter )
game.sound(7581,1)
game.sound(7581,1)
else:
game.particles( 'hit-FIRE-burst', critter )
if critter.reflex_save_and_damage( spell.caster, spell.dc, D20_Save_Reduction_Half, D20STD_F_NONE, dam2, D20DT_FIRE, D20DAP_UNSPECIFIED, D20A_CAST_SPELL, spell.id ):
# saving throw successful
critter.float_mesfile_line( 'mes\\spell.mes', 30001 )
else:
# saving throw unsuccessful
critter.float_mesfile_line( 'mes\\spell.mes', 30002 )
spell.num_of_projectiles = spell.num_of_projectiles - 1
if ( spell.num_of_projectiles <= 0 ):
spell.spell_end( spell.id, 1 )
def OnEndSpellCast( spell ):
print "Meteor Swarm OnEndSpellCast" | mit | -3,126,905,346,155,246,600 | 40.068966 | 177 | 0.69037 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.